aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-rw-r--r--.gitignore13
-rw-r--r--GNUmakefile1
-rw-r--r--MAINTAINERS421
-rw-r--r--app/Makefile8
-rw-r--r--app/cmdline_test/cmdline_test_data.py311
-rw-r--r--app/pdump/Makefile3
-rw-r--r--app/pdump/main.c3
-rw-r--r--app/proc_info/Makefile3
-rw-r--r--app/proc_info/main.c355
-rw-r--r--app/test-crypto-perf/Makefile48
-rw-r--r--app/test-crypto-perf/cperf.h58
-rw-r--r--app/test-crypto-perf/cperf_ops.c515
-rw-r--r--app/test-crypto-perf/cperf_ops.h62
-rw-r--r--app/test-crypto-perf/cperf_options.h115
-rw-r--r--app/test-crypto-perf/cperf_options_parsing.c934
-rw-r--r--app/test-crypto-perf/cperf_test_latency.c552
-rw-r--r--app/test-crypto-perf/cperf_test_latency.h57
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.c518
-rw-r--r--app/test-crypto-perf/cperf_test_throughput.h58
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c507
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.h73
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c500
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.h98
-rw-r--r--app/test-crypto-perf/cperf_test_verify.c579
-rw-r--r--app/test-crypto-perf/cperf_test_verify.h58
-rw-r--r--app/test-crypto-perf/data/aes_cbc_128_sha.data502
-rw-r--r--app/test-crypto-perf/data/aes_cbc_192_sha.data504
-rw-r--r--app/test-crypto-perf/data/aes_cbc_256_sha.data504
-rw-r--r--app/test-crypto-perf/main.c444
-rw-r--r--app/test-pmd/Makefile22
-rw-r--r--app/test-pmd/cmdline.c2732
-rw-r--r--app/test-pmd/cmdline_flow.c2812
-rw-r--r--app/test-pmd/config.c815
-rw-r--r--app/test-pmd/csumonly.c53
-rw-r--r--app/test-pmd/flowgen.c1
-rw-r--r--app/test-pmd/icmpecho.c5
-rw-r--r--app/test-pmd/ieee1588fwd.c1
-rw-r--r--app/test-pmd/iofwd.c1
-rw-r--r--app/test-pmd/macfwd.c3
-rw-r--r--app/test-pmd/macswap.c3
-rw-r--r--app/test-pmd/parameters.c158
-rw-r--r--app/test-pmd/rxonly.c6
-rw-r--r--app/test-pmd/testpmd.c389
-rw-r--r--app/test-pmd/testpmd.h67
-rw-r--r--app/test-pmd/txonly.c3
-rw-r--r--app/test/autotest_data.py470
-rw-r--r--app/test/autotest_runner.py422
-rw-r--r--app/test/autotest_test_funcs.py289
-rw-r--r--app/test/test_cryptodev_gcm_test_vectors.h1238
-rw-r--r--app/test/test_cryptodev_zuc_hash_test_vectors.h359
-rw-r--r--app/test/test_pci.c322
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/configbin64 -> 0 bytes
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent6
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource13
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor1
-rw-r--r--app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor1
-rw-r--r--app/test/test_ring.c1381
-rwxr-xr-xbuildtools/auto-config-h.sh (renamed from scripts/auto-config-h.sh)0
-rwxr-xr-xbuildtools/gen-build-mk.sh (renamed from scripts/gen-build-mk.sh)0
-rwxr-xr-xbuildtools/gen-config-h.sh (renamed from scripts/gen-config-h.sh)0
-rw-r--r--buildtools/pmdinfogen/Makefile2
-rw-r--r--buildtools/pmdinfogen/pmdinfogen.c1
-rw-r--r--buildtools/pmdinfogen/pmdinfogen.h1
-rwxr-xr-xbuildtools/relpath.sh (renamed from scripts/relpath.sh)2
-rw-r--r--config/common_base188
-rw-r--r--config/common_linuxapp4
-rw-r--r--config/defconfig_arm-armv7a-linuxapp-gcc4
-rw-r--r--config/defconfig_arm64-armv8a-linuxapp-gcc7
-rw-r--r--config/defconfig_arm64-dpaa2-linuxapp-gcc42
-rw-r--r--config/defconfig_arm64-thunderx-linuxapp-gcc10
-rw-r--r--config/defconfig_arm64-xgene1-linuxapp-gcc1
-rw-r--r--config/defconfig_i686-native-linuxapp-gcc10
-rw-r--r--config/defconfig_i686-native-linuxapp-icc10
-rw-r--r--config/defconfig_ppc_64-power8-linuxapp-gcc4
-rw-r--r--config/defconfig_x86_64-native-linuxapp-icc5
-rw-r--r--config/defconfig_x86_x32-native-linuxapp-gcc10
-rwxr-xr-xdevtools/build-tags.sh233
-rwxr-xr-xdevtools/check-git-log.sh (renamed from scripts/check-git-log.sh)38
-rwxr-xr-xdevtools/check-includes.sh (renamed from scripts/check-includes.sh)4
-rwxr-xr-xdevtools/check-maintainers.sh (renamed from scripts/check-maintainers.sh)0
-rwxr-xr-xdevtools/checkpatches.sh (renamed from scripts/checkpatches.sh)7
-rwxr-xr-xdevtools/cocci.sh (renamed from scripts/cocci.sh)2
-rw-r--r--devtools/cocci/mtod-offset.cocci (renamed from scripts/cocci/mtod-offset.cocci)0
-rwxr-xr-xdevtools/git-log-fixes.sh (renamed from scripts/git-log-fixes.sh)17
-rw-r--r--devtools/load-devel-config (renamed from scripts/load-devel-config)0
-rwxr-xr-xdevtools/test-build.sh (renamed from scripts/test-build.sh)18
-rwxr-xr-xdevtools/test-null.sh (renamed from scripts/test-null.sh)0
-rwxr-xr-xdevtools/validate-abi.sh (renamed from scripts/validate-abi.sh)0
-rw-r--r--doc/api/doxy-api-index.md26
-rw-r--r--doc/api/doxy-api.conf12
-rw-r--r--doc/api/examples.dox111
-rw-r--r--doc/build-sdk-quick.txt6
-rw-r--r--doc/guides/conf.py140
-rw-r--r--doc/guides/contributing/coding_style.rst33
-rw-r--r--doc/guides/contributing/design.rst14
-rw-r--r--doc/guides/contributing/documentation.rst11
-rw-r--r--doc/guides/contributing/index.rst1
-rw-r--r--doc/guides/contributing/patches.rst169
-rw-r--r--doc/guides/contributing/stable.rst99
-rw-r--r--doc/guides/contributing/versioning.rst35
-rw-r--r--doc/guides/cryptodevs/aesni_gcm.rst30
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst45
-rw-r--r--doc/guides/cryptodevs/armv8.rst98
-rw-r--r--doc/guides/cryptodevs/dpaa2_sec.rst232
-rw-r--r--doc/guides/cryptodevs/features/aesni_gcm.ini27
-rw-r--r--doc/guides/cryptodevs/features/aesni_mb.ini41
-rw-r--r--doc/guides/cryptodevs/features/armv8.ini28
-rw-r--r--doc/guides/cryptodevs/features/default.ini70
-rw-r--r--doc/guides/cryptodevs/features/dpaa2_sec.ini34
-rw-r--r--doc/guides/cryptodevs/features/kasumi.ini24
-rw-r--r--doc/guides/cryptodevs/features/null.ini25
-rw-r--r--doc/guides/cryptodevs/features/openssl.ini47
-rw-r--r--doc/guides/cryptodevs/features/qat.ini53
-rw-r--r--doc/guides/cryptodevs/features/snow3g.ini24
-rw-r--r--doc/guides/cryptodevs/features/zuc.ini24
-rw-r--r--doc/guides/cryptodevs/img/scheduler-overview.svg277
-rw-r--r--doc/guides/cryptodevs/index.rst5
-rw-r--r--doc/guides/cryptodevs/kasumi.rst6
-rw-r--r--doc/guides/cryptodevs/null.rst6
-rw-r--r--doc/guides/cryptodevs/openssl.rst6
-rw-r--r--doc/guides/cryptodevs/overview.rst68
-rw-r--r--doc/guides/cryptodevs/qat.rst430
-rw-r--r--doc/guides/cryptodevs/scheduler.rst172
-rw-r--r--doc/guides/cryptodevs/snow3g.rst6
-rw-r--r--doc/guides/cryptodevs/zuc.rst6
-rw-r--r--doc/guides/eventdevs/index.rst41
-rw-r--r--doc/guides/eventdevs/octeontx.rst151
-rw-r--r--doc/guides/eventdevs/sw.rst157
-rw-r--r--doc/guides/faq/faq.rst18
-rw-r--r--doc/guides/freebsd_gsg/build_dpdk.rst2
-rw-r--r--doc/guides/freebsd_gsg/build_sample_apps.rst11
-rw-r--r--doc/guides/freebsd_gsg/install_from_ports.rst2
-rw-r--r--doc/guides/howto/flow_bifurcation.rst4
-rw-r--r--doc/guides/howto/img/pvp_2nics.svg556
-rw-r--r--doc/guides/howto/img/use_models_for_running_dpdk_in_containers.svg398
-rw-r--r--doc/guides/howto/img/vf_daemon_overview.svg440
-rw-r--r--doc/guides/howto/img/virtio_user_as_exceptional_path.svg207
-rw-r--r--doc/guides/howto/img/virtio_user_for_container_networking.svg685
-rw-r--r--doc/guides/howto/index.rst4
-rw-r--r--doc/guides/howto/lm_bond_virtio_sriov.rst10
-rw-r--r--doc/guides/howto/lm_virtio_vhost_user.rst18
-rw-r--r--doc/guides/howto/pvp_reference_benchmark.rst398
-rw-r--r--doc/guides/howto/vfd.rst407
-rw-r--r--doc/guides/howto/virtio_user_as_exceptional_path.rst142
-rw-r--r--doc/guides/howto/virtio_user_for_container_networking.rst144
-rw-r--r--doc/guides/index.rst1
-rw-r--r--doc/guides/linux_gsg/build_dpdk.rst26
-rw-r--r--doc/guides/linux_gsg/build_sample_apps.rst26
-rw-r--r--doc/guides/linux_gsg/nic_perf_intel_platform.rst8
-rw-r--r--doc/guides/linux_gsg/quick_start.rst6
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst16
-rw-r--r--doc/guides/nics/ark.rst261
-rw-r--r--doc/guides/nics/avp.rst111
-rw-r--r--doc/guides/nics/bnx2x.rst184
-rw-r--r--doc/guides/nics/bnxt.rst4
-rw-r--r--doc/guides/nics/build_and_test.rst179
-rw-r--r--doc/guides/nics/cxgbe.rst88
-rw-r--r--doc/guides/nics/dpaa2.rst594
-rw-r--r--doc/guides/nics/ena.rst69
-rw-r--r--doc/guides/nics/enic.rst128
-rw-r--r--doc/guides/nics/features/ark.ini14
-rw-r--r--doc/guides/nics/features/avp.ini16
-rw-r--r--doc/guides/nics/features/default.ini11
-rw-r--r--doc/guides/nics/features/dpaa2.ini18
-rw-r--r--doc/guides/nics/features/e1000.ini3
-rw-r--r--doc/guides/nics/features/enic.ini2
-rw-r--r--doc/guides/nics/features/i40e.ini5
-rw-r--r--doc/guides/nics/features/i40e_vec.ini3
-rw-r--r--doc/guides/nics/features/i40e_vf.ini2
-rw-r--r--doc/guides/nics/features/i40e_vf_vec.ini2
-rw-r--r--doc/guides/nics/features/igb.ini3
-rw-r--r--doc/guides/nics/features/igb_vf.ini2
-rw-r--r--doc/guides/nics/features/ixgbe.ini5
-rw-r--r--doc/guides/nics/features/ixgbe_vec.ini2
-rw-r--r--doc/guides/nics/features/ixgbe_vf.ini2
-rw-r--r--doc/guides/nics/features/ixgbe_vf_vec.ini2
-rw-r--r--doc/guides/nics/features/kni.ini (renamed from doc/guides/nics/features/mpipe.ini)3
-rw-r--r--doc/guides/nics/features/liquidio.ini28
-rw-r--r--doc/guides/nics/features/mlx4.ini1
-rw-r--r--doc/guides/nics/features/mlx5.ini7
-rw-r--r--doc/guides/nics/features/nfp.ini23
-rw-r--r--doc/guides/nics/features/pcap.ini1
-rw-r--r--doc/guides/nics/features/qede.ini7
-rw-r--r--doc/guides/nics/features/qede_vf.ini2
-rw-r--r--doc/guides/nics/features/sfc_efx.ini34
-rw-r--r--doc/guides/nics/features/tap.ini26
-rw-r--r--doc/guides/nics/features/vhost.ini1
-rw-r--r--doc/guides/nics/features/virtio.ini3
-rw-r--r--doc/guides/nics/features/virtio_vec.ini1
-rw-r--r--doc/guides/nics/i40e.rst150
-rw-r--r--doc/guides/nics/index.rst8
-rw-r--r--doc/guides/nics/intel_vf.rst87
-rw-r--r--doc/guides/nics/ixgbe.rst55
-rw-r--r--doc/guides/nics/kni.rst197
-rw-r--r--doc/guides/nics/liquidio.rst223
-rw-r--r--doc/guides/nics/mlx4.rst17
-rw-r--r--doc/guides/nics/mlx5.rst108
-rw-r--r--doc/guides/nics/nfp.rst151
-rw-r--r--doc/guides/nics/overview.rst22
-rw-r--r--doc/guides/nics/pcap_ring.rst14
-rw-r--r--doc/guides/nics/qede.rst196
-rw-r--r--doc/guides/nics/sfc_efx.rst277
-rw-r--r--doc/guides/nics/szedata2.rst20
-rw-r--r--doc/guides/nics/tap.rst197
-rw-r--r--doc/guides/nics/thunderx.rst207
-rw-r--r--doc/guides/nics/vhost.rst2
-rw-r--r--doc/guides/nics/virtio.rst66
-rw-r--r--doc/guides/prog_guide/cryptodev_lib.rst4
-rw-r--r--doc/guides/prog_guide/dev_kit_build_system.rst2
-rw-r--r--doc/guides/prog_guide/dev_kit_root_make_help.rst28
-rw-r--r--doc/guides/prog_guide/efd_lib.rst455
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst26
-rw-r--r--doc/guides/prog_guide/extend_dpdk.rst18
-rw-r--r--doc/guides/prog_guide/img/efd_i1.svg130
-rw-r--r--doc/guides/prog_guide/img/efd_i10.svg384
-rw-r--r--doc/guides/prog_guide/img/efd_i11.svg319
-rw-r--r--doc/guides/prog_guide/img/efd_i12.svg1008
-rw-r--r--doc/guides/prog_guide/img/efd_i2.svg280
-rw-r--r--doc/guides/prog_guide/img/efd_i3.svg634
-rw-r--r--doc/guides/prog_guide/img/efd_i4.svg203
-rw-r--r--doc/guides/prog_guide/img/efd_i5.svg183
-rw-r--r--doc/guides/prog_guide/img/efd_i6.svg1254
-rw-r--r--doc/guides/prog_guide/img/efd_i7.svg790
-rw-r--r--doc/guides/prog_guide/img/efd_i8.svg182
-rw-r--r--doc/guides/prog_guide/img/efd_i9.svg390
-rw-r--r--doc/guides/prog_guide/index.rst31
-rw-r--r--doc/guides/prog_guide/intro.rst2
-rw-r--r--doc/guides/prog_guide/kernel_nic_interface.rst111
-rw-r--r--doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst14
-rw-r--r--doc/guides/prog_guide/lpm6_lib.rst2
-rw-r--r--doc/guides/prog_guide/mbuf_lib.rst11
-rw-r--r--doc/guides/prog_guide/metrics_lib.rst299
-rw-r--r--doc/guides/prog_guide/multi_proc_support.rst2
-rw-r--r--doc/guides/prog_guide/packet_classif_access_ctrl.rst3
-rw-r--r--doc/guides/prog_guide/packet_distrib_lib.rst4
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst207
-rw-r--r--doc/guides/prog_guide/port_hotplug_framework.rst4
-rw-r--r--doc/guides/prog_guide/ring_lib.rst15
-rw-r--r--doc/guides/prog_guide/rte_flow.rst2101
-rw-r--r--doc/guides/prog_guide/source_org.rst1
-rw-r--r--doc/guides/prog_guide/vhost_lib.rst44
-rw-r--r--doc/guides/prog_guide/writing_efficient_code.rst2
-rw-r--r--doc/guides/rel_notes/deprecation.rst155
-rw-r--r--doc/guides/rel_notes/index.rst2
-rw-r--r--doc/guides/rel_notes/known_issues.rst102
-rw-r--r--doc/guides/rel_notes/release_16_11.rst110
-rw-r--r--doc/guides/rel_notes/release_17_02.rst692
-rw-r--r--doc/guides/rel_notes/release_17_05.rst829
-rw-r--r--doc/guides/sample_app_ug/cmd_line.rst2
-rw-r--r--doc/guides/sample_app_ug/dist_app.rst52
-rw-r--r--doc/guides/sample_app_ug/ethtool.rst2
-rw-r--r--doc/guides/sample_app_ug/exception_path.rst4
-rw-r--r--doc/guides/sample_app_ug/hello_world.rst2
-rw-r--r--doc/guides/sample_app_ug/img/dist_app.svg276
-rw-r--r--doc/guides/sample_app_ug/img/server_node_efd.svg1254
-rw-r--r--doc/guides/sample_app_ug/index.rst6
-rw-r--r--doc/guides/sample_app_ug/intel_quickassist.rst221
-rw-r--r--doc/guides/sample_app_ug/ip_frag.rst4
-rw-r--r--doc/guides/sample_app_ug/ip_reassembly.rst4
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst14
-rw-r--r--doc/guides/sample_app_ug/keep_alive.rst2
-rw-r--r--doc/guides/sample_app_ug/kernel_nic_interface.rst4
-rw-r--r--doc/guides/sample_app_ug/l2_forward_cat.rst6
-rw-r--r--doc/guides/sample_app_ug/l2_forward_crypto.rst19
-rw-r--r--doc/guides/sample_app_ug/l2_forward_job_stats.rst35
-rw-r--r--doc/guides/sample_app_ug/l2_forward_real_virtual.rst32
-rw-r--r--doc/guides/sample_app_ug/l3_forward.rst28
-rw-r--r--doc/guides/sample_app_ug/l3_forward_access_ctrl.rst47
-rw-r--r--doc/guides/sample_app_ug/l3_forward_virtual.rst23
-rw-r--r--doc/guides/sample_app_ug/link_status_intr.rst6
-rw-r--r--doc/guides/sample_app_ug/load_balancer.rst2
-rw-r--r--doc/guides/sample_app_ug/multi_process.rst32
-rw-r--r--doc/guides/sample_app_ug/netmap_compatibility.rst2
-rw-r--r--doc/guides/sample_app_ug/performance_thread.rst24
-rw-r--r--doc/guides/sample_app_ug/ptpclient.rst13
-rw-r--r--doc/guides/sample_app_ug/qos_scheduler.rst6
-rw-r--r--doc/guides/sample_app_ug/quota_watermark.rst182
-rw-r--r--doc/guides/sample_app_ug/rxtx_callbacks.rst2
-rw-r--r--doc/guides/sample_app_ug/server_node_efd.rst494
-rw-r--r--doc/guides/sample_app_ug/skeleton.rst2
-rw-r--r--doc/guides/sample_app_ug/tep_termination.rst16
-rw-r--r--doc/guides/sample_app_ug/test_pipeline.rst2
-rw-r--r--doc/guides/sample_app_ug/timer.rst2
-rw-r--r--doc/guides/sample_app_ug/vhost.rst6
-rw-r--r--doc/guides/sample_app_ug/vm_power_management.rst4
-rw-r--r--doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst2
-rw-r--r--doc/guides/testpmd_app_ug/index.rst2
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst37
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst928
-rw-r--r--doc/guides/tools/cryptoperf.rst426
-rw-r--r--doc/guides/tools/index.rst1
-rw-r--r--doc/guides/tools/proc_info.rst2
-rw-r--r--doc/guides/xen/pkt_switch.rst12
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/bus/Makefile (renamed from mk/internal/rte.depdirs-pre.mk)13
-rw-r--r--drivers/bus/fslmc/Makefile75
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c141
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h76
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c642
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h82
-rw-r--r--drivers/bus/fslmc/mc/dpbp.c261
-rw-r--r--drivers/bus/fslmc/mc/dpio.c279
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp.h241
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpbp_cmd.h88
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio.h282
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpio_cmd.h122
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h239
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_sys.h105
-rw-r--r--drivers/bus/fslmc/mc/mc_sys.c114
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c139
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c445
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h70
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h270
-rw-r--r--drivers/bus/fslmc/qbman/include/compat.h410
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h160
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h1093
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c1496
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h277
-rw-r--r--drivers/bus/fslmc/qbman/qbman_private.h174
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h385
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h73
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map51
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h148
-rw-r--r--drivers/crypto/Makefile18
-rw-r--r--drivers/crypto/aesni_gcm/Makefile15
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h95
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c403
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c54
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h15
-rw-r--r--drivers/crypto/aesni_mb/Makefile7
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h52
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c424
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c56
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h19
-rw-r--r--drivers/crypto/armv8/Makefile65
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c906
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c370
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h211
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_version.map (renamed from drivers/net/mpipe/rte_pmd_mpipe_version.map)2
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile78
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c1656
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h (renamed from lib/librte_eal/common/include/arch/tile/rte_spinlock.h)90
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h368
-rw-r--r--drivers/crypto/dpaa2_sec/hw/compat.h157
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h2601
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h465
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/common.h131
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h1547
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta.h954
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h346
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h251
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h207
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h222
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h335
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h402
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h445
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h196
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h599
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h732
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h823
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h208
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h75
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h185
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c558
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h746
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h256
-rw-r--r--drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map4
-rw-r--r--drivers/crypto/kasumi/Makefile7
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c87
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c5
-rw-r--r--drivers/crypto/null/Makefile7
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c69
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c5
-rw-r--r--drivers/crypto/openssl/Makefile7
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c538
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c35
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h2
-rw-r--r--drivers/crypto/qat/Makefile7
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h11
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h31
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c176
-rw-r--r--drivers/crypto/qat/qat_crypto.c968
-rw-r--r--drivers/crypto/qat/qat_crypto.h12
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h574
-rw-r--r--drivers/crypto/qat/qat_qp.c69
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c39
-rw-r--r--drivers/crypto/scheduler/Makefile60
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c593
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h334
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h85
-rw-r--r--drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map23
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c287
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c464
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c456
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c571
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h156
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c281
-rw-r--r--drivers/crypto/snow3g/Makefile7
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c90
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c5
-rw-r--r--drivers/crypto/zuc/Makefile7
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c84
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c5
-rw-r--r--drivers/event/Makefile43
-rw-r--r--drivers/event/octeontx/Makefile70
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf.h61
-rw-r--r--drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map9
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c580
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h203
-rw-r--r--drivers/event/octeontx/ssovf_mbox.c232
-rw-r--r--drivers/event/octeontx/ssovf_probe.c288
-rw-r--r--drivers/event/octeontx/ssovf_worker.c252
-rw-r--r--drivers/event/octeontx/ssovf_worker.h139
-rw-r--r--drivers/event/skeleton/Makefile51
-rw-r--r--drivers/event/skeleton/rte_pmd_skeleton_event_version.map4
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c497
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.h (renamed from lib/librte_eal/common/include/arch/tile/rte_memcpy.h)79
-rw-r--r--drivers/event/sw/Makefile62
-rw-r--r--drivers/event/sw/event_ring.h185
-rw-r--r--drivers/event/sw/iq_ring.h176
-rw-r--r--drivers/event/sw/rte_pmd_evdev_sw_version.map3
-rw-r--r--drivers/event/sw/sw_evdev.c833
-rw-r--r--drivers/event/sw/sw_evdev.h318
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c601
-rw-r--r--drivers/event/sw/sw_evdev_worker.c185
-rw-r--r--drivers/event/sw/sw_evdev_xstats.c674
-rw-r--r--drivers/mempool/Makefile43
-rw-r--r--drivers/mempool/dpaa2/Makefile63
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c373
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.h91
-rw-r--r--drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map8
-rw-r--r--drivers/mempool/ring/Makefile48
-rw-r--r--drivers/mempool/ring/rte_mempool_ring.c (renamed from lib/librte_mempool/rte_mempool_ring.c)12
-rw-r--r--drivers/mempool/ring/rte_mempool_ring_version.map4
-rw-r--r--drivers/mempool/stack/Makefile51
-rw-r--r--drivers/mempool/stack/rte_mempool_stack.c (renamed from lib/librte_mempool/rte_mempool_stack.c)0
-rw-r--r--drivers/mempool/stack/rte_mempool_stack_version.map4
-rw-r--r--drivers/net/Makefile50
-rw-r--r--drivers/net/af_packet/Makefile7
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c176
-rw-r--r--drivers/net/ark/Makefile66
-rw-r--r--drivers/net/ark/ark_ddm.c151
-rw-r--r--drivers/net/ark/ark_ddm.h177
-rw-r--r--drivers/net/ark/ark_ethdev.c1001
-rw-r--r--drivers/net/ark/ark_ethdev.h41
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c673
-rw-r--r--drivers/net/ark/ark_ethdev_rx.h65
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c468
-rw-r--r--drivers/net/ark/ark_ethdev_tx.h59
-rw-r--r--drivers/net/ark/ark_ext.h115
-rw-r--r--drivers/net/ark/ark_global.h161
-rw-r--r--drivers/net/ark/ark_logs.h119
-rw-r--r--drivers/net/ark/ark_mpu.c181
-rw-r--r--drivers/net/ark/ark_mpu.h154
-rw-r--r--drivers/net/ark/ark_pktchkr.c474
-rw-r--r--drivers/net/ark/ark_pktchkr.h117
-rw-r--r--drivers/net/ark/ark_pktdir.c80
-rw-r--r--drivers/net/ark/ark_pktdir.h70
-rw-r--r--drivers/net/ark/ark_pktgen.c496
-rw-r--r--drivers/net/ark/ark_pktgen.h108
-rw-r--r--drivers/net/ark/ark_rqp.c97
-rw-r--r--drivers/net/ark/ark_rqp.h86
-rw-r--r--drivers/net/ark/ark_udm.c226
-rw-r--r--drivers/net/ark/ark_udm.h192
-rw-r--r--drivers/net/ark/rte_pmd_ark_version.map4
-rw-r--r--drivers/net/avp/Makefile57
-rw-r--r--drivers/net/avp/avp_ethdev.c2312
-rw-r--r--drivers/net/avp/avp_logs.h59
-rw-r--r--drivers/net/avp/rte_avp_common.h432
-rw-r--r--drivers/net/avp/rte_avp_fifo.h169
-rw-r--r--drivers/net/avp/rte_pmd_avp_version.map4
-rw-r--r--drivers/net/bnx2x/Makefile4
-rw-r--r--drivers/net/bnx2x/bnx2x.c4
-rw-r--r--drivers/net/bnx2x/bnx2x.h32
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c93
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c2
-rw-r--r--drivers/net/bnx2x/elink.c3
-rw-r--r--drivers/net/bnxt/Makefile6
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h13
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c117
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c8
-rw-r--r--drivers/net/bnxt/bnxt_irq.c3
-rw-r--r--drivers/net/bnxt/bnxt_ring.c16
-rw-r--r--drivers/net/bnxt/bnxt_txr.c2
-rw-r--r--drivers/net/bnxt/bnxt_txr.h6
-rw-r--r--drivers/net/bonding/Makefile9
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c13
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c185
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c14
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c240
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h5
-rw-r--r--drivers/net/cxgbe/Makefile5
-rw-r--r--drivers/net/cxgbe/base/adapter.h44
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c3
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h8
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c36
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c8
-rw-r--r--drivers/net/cxgbe/sge.c22
-rw-r--r--drivers/net/dpaa2/Makefile (renamed from config/defconfig_tile-tilegx-linuxapp-gcc)73
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c344
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h257
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c1035
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h83
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c422
-rw-r--r--drivers/net/dpaa2/mc/dpni.c739
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h184
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h1217
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h334
-rw-r--r--drivers/net/dpaa2/mc/fsl_net.h487
-rw-r--r--drivers/net/dpaa2/rte_pmd_dpaa2_version.map4
-rw-r--r--drivers/net/e1000/Makefile8
-rw-r--r--drivers/net/e1000/base/README25
-rw-r--r--drivers/net/e1000/base/e1000_82575.c1
-rw-r--r--drivers/net/e1000/base/e1000_82575.h1
-rw-r--r--drivers/net/e1000/base/e1000_api.c19
-rw-r--r--drivers/net/e1000/base/e1000_defines.h9
-rw-r--r--drivers/net/e1000/base/e1000_hw.h21
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.c865
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.h21
-rw-r--r--drivers/net/e1000/base/e1000_mbx.c36
-rw-r--r--drivers/net/e1000/base/e1000_nvm.c1
-rw-r--r--drivers/net/e1000/base/e1000_osdep.h18
-rw-r--r--drivers/net/e1000/base/e1000_regs.h7
-rw-r--r--drivers/net/e1000/base/e1000_vf.c3
-rw-r--r--drivers/net/e1000/e1000_ethdev.h21
-rw-r--r--drivers/net/e1000/em_ethdev.c149
-rw-r--r--drivers/net/e1000/em_rxtx.c106
-rw-r--r--drivers/net/e1000/igb_ethdev.c362
-rw-r--r--drivers/net/e1000/igb_pf.c8
-rw-r--r--drivers/net/e1000/igb_rxtx.c239
-rw-r--r--drivers/net/ena/Makefile5
-rw-r--r--drivers/net/ena/base/ena_com.c6
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h15
-rw-r--r--drivers/net/ena/ena_ethdev.c162
-rw-r--r--drivers/net/ena/ena_ethdev.h1
-rw-r--r--drivers/net/enic/Makefile6
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_compat.h27
-rw-r--r--drivers/net/enic/enic_ethdev.c51
-rw-r--r--drivers/net/enic/enic_main.c33
-rw-r--r--drivers/net/enic/enic_rxtx.c132
-rw-r--r--drivers/net/fm10k/Makefile10
-rw-r--r--drivers/net/fm10k/base/fm10k_common.c11
-rw-r--r--drivers/net/fm10k/base/fm10k_mbx.c10
-rw-r--r--drivers/net/fm10k/base/fm10k_mbx.h2
-rw-r--r--drivers/net/fm10k/base/fm10k_osdep.h34
-rw-r--r--drivers/net/fm10k/base/fm10k_pf.c140
-rw-r--r--drivers/net/fm10k/base/fm10k_pf.h2
-rw-r--r--drivers/net/fm10k/base/fm10k_tlv.c16
-rw-r--r--drivers/net/fm10k/base/fm10k_type.h49
-rw-r--r--drivers/net/fm10k/base/fm10k_vf.c24
-rw-r--r--drivers/net/fm10k/fm10k.h10
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c194
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c56
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c29
-rw-r--r--drivers/net/i40e/Makefile16
-rw-r--r--drivers/net/i40e/base/README59
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c8
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h4
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h180
-rw-r--r--drivers/net/i40e/base/i40e_common.c802
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c2
-rw-r--r--drivers/net/i40e/base/i40e_devids.h3
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.c5
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c52
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h10
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h59
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h174
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h7
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1852
-rw-r--r--drivers/net/i40e/i40e_ethdev.h283
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c235
-rw-r--r--drivers/net/i40e/i40e_fdir.c138
-rw-r--r--drivers/net/i40e/i40e_flow.c2258
-rw-r--r--drivers/net/i40e/i40e_logs.h17
-rw-r--r--drivers/net/i40e/i40e_pf.c476
-rw-r--r--drivers/net/i40e/i40e_pf.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx.c272
-rw-r--r--drivers/net/i40e/i40e_rxtx.h29
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c645
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h23
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c91
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c92
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c1937
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h590
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map36
-rw-r--r--drivers/net/ixgbe/Makefile11
-rw-r--r--drivers/net/ixgbe/base/README5
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c68
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.c240
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.h41
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h8
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h15
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c207
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h74
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h143
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c20
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c33
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1148
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c1973
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h287
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c415
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c2778
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c44
-rw-r--r--drivers/net/ixgbe/ixgbe_regs.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c293
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h13
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c102
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c910
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h245
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map23
-rw-r--r--drivers/net/kni/Makefile56
-rw-r--r--drivers/net/kni/rte_eth_kni.c510
-rw-r--r--drivers/net/kni/rte_pmd_kni_version.map4
-rw-r--r--drivers/net/liquidio/Makefile58
-rw-r--r--drivers/net/liquidio/base/lio_23xx_reg.h194
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.c588
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.h97
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h249
-rw-r--r--drivers/net/liquidio/base/lio_mbox.c275
-rw-r--r--drivers/net/liquidio/base/lio_mbox.h131
-rw-r--r--drivers/net/liquidio/lio_ethdev.c2058
-rw-r--r--drivers/net/liquidio/lio_ethdev.h205
-rw-r--r--drivers/net/liquidio/lio_logs.h91
-rw-r--r--drivers/net/liquidio/lio_rxtx.c1885
-rw-r--r--drivers/net/liquidio/lio_rxtx.h769
-rw-r--r--drivers/net/liquidio/lio_struct.h689
-rw-r--r--drivers/net/liquidio/rte_pmd_lio_version.map4
-rw-r--r--drivers/net/mlx4/Makefile11
-rw-r--r--drivers/net/mlx4/mlx4.c772
-rw-r--r--drivers/net/mlx4/mlx4.h198
-rw-r--r--drivers/net/mlx4/mlx4_flow.c1090
-rw-r--r--drivers/net/mlx4/mlx4_flow.h102
-rw-r--r--drivers/net/mlx5/Makefile15
-rw-r--r--drivers/net/mlx5/mlx5.c223
-rw-r--r--drivers/net/mlx5/mlx5.h57
-rw-r--r--drivers/net/mlx5/mlx5_defs.h14
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c126
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c15
-rw-r--r--drivers/net/mlx5/mlx5_flow.c1586
-rw-r--r--drivers/net/mlx5/mlx5_mac.c16
-rw-r--r--drivers/net/mlx5/mlx5_prm.h124
-rw-r--r--drivers/net/mlx5/mlx5_rss.c18
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c239
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c1097
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h30
-rw-r--r--drivers/net/mlx5/mlx5_stats.c365
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c32
-rw-r--r--drivers/net/mlx5/mlx5_txq.c162
-rw-r--r--drivers/net/mpipe/Makefile47
-rw-r--r--drivers/net/mpipe/mpipe_tilegx.c1655
-rw-r--r--drivers/net/nfp/Makefile5
-rw-r--r--drivers/net/nfp/nfp_net.c471
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h12
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h16
-rw-r--r--drivers/net/null/Makefile14
-rw-r--r--drivers/net/null/rte_eth_null.c77
-rw-r--r--drivers/net/null/rte_pmd_null_version.map7
-rw-r--r--drivers/net/pcap/Makefile7
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c81
-rw-r--r--drivers/net/qede/Makefile41
-rw-r--r--drivers/net/qede/base/bcm_osal.c4
-rw-r--r--drivers/net/qede/base/bcm_osal.h40
-rw-r--r--drivers/net/qede/base/common_hsi.h196
-rw-r--r--drivers/net/qede/base/ecore.h193
-rw-r--r--drivers/net/qede/base/ecore_chain.h142
-rw-r--r--drivers/net/qede/base/ecore_cxt.c409
-rw-r--r--drivers/net/qede/base/ecore_cxt.h68
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h24
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c456
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h10
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h5
-rw-r--r--drivers/net/qede/base/ecore_dev.c2342
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h127
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h20
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h911
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h221
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h2079
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h90
-rw-r--r--drivers/net/qede/base/ecore_hw.c55
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c1454
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h172
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c26
-rw-r--r--drivers/net/qede/base/ecore_int.c171
-rw-r--r--drivers/net/qede/base/ecore_int.h10
-rw-r--r--drivers/net/qede/base/ecore_int_api.h51
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h69
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h32
-rw-r--r--drivers/net/qede/base/ecore_l2.c892
-rw-r--r--drivers/net/qede/base/ecore_l2.h149
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h137
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1099
-rw-r--r--drivers/net/qede/base/ecore_mcp.h192
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h349
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c1535
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h16
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h623
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h19
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c376
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h23
-rw-r--r--drivers/net/qede/base/ecore_spq.c108
-rw-r--r--drivers/net/qede/base/ecore_spq.h36
-rw-r--r--drivers/net/qede/base/ecore_sriov.c1221
-rw-r--r--drivers/net/qede/base/ecore_sriov.h28
-rw-r--r--drivers/net/qede/base/ecore_status.h1
-rw-r--r--drivers/net/qede/base/ecore_utils.h6
-rw-r--r--drivers/net/qede/base/ecore_vf.c418
-rw-r--r--drivers/net/qede/base/ecore_vf.h85
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h21
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h55
-rw-r--r--drivers/net/qede/base/eth_common.h36
-rw-r--r--drivers/net/qede/base/mcp_public.h377
-rw-r--r--drivers/net/qede/base/nvm_cfg.h541
-rw-r--r--drivers/net/qede/base/reg_addr.h59
-rw-r--r--drivers/net/qede/qede_eth_if.c107
-rw-r--r--drivers/net/qede/qede_eth_if.h35
-rw-r--r--drivers/net/qede/qede_ethdev.c1137
-rw-r--r--drivers/net/qede/qede_ethdev.h134
-rw-r--r--drivers/net/qede/qede_fdir.c469
-rw-r--r--drivers/net/qede/qede_if.h80
-rw-r--r--drivers/net/qede/qede_logs.h16
-rw-r--r--drivers/net/qede/qede_main.c181
-rw-r--r--drivers/net/qede/qede_rxtx.c1298
-rw-r--r--drivers/net/qede/qede_rxtx.h102
-rw-r--r--drivers/net/ring/Makefile5
-rw-r--r--drivers/net/ring/rte_eth_ring.c26
-rw-r--r--drivers/net/sfc/Makefile143
-rw-r--r--drivers/net/sfc/base/README36
-rw-r--r--drivers/net/sfc/base/ef10_ev.c1401
-rw-r--r--drivers/net/sfc/base/ef10_filter.c1501
-rw-r--r--drivers/net/sfc/base/ef10_impl.h1183
-rw-r--r--drivers/net/sfc/base/ef10_intr.c197
-rw-r--r--drivers/net/sfc/base/ef10_mac.c897
-rw-r--r--drivers/net/sfc/base/ef10_mcdi.c342
-rw-r--r--drivers/net/sfc/base/ef10_nic.c1780
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c2385
-rw-r--r--drivers/net/sfc/base/ef10_phy.c631
-rw-r--r--drivers/net/sfc/base/ef10_rx.c965
-rw-r--r--drivers/net/sfc/base/ef10_tlv_layout.h941
-rw-r--r--drivers/net/sfc/base/ef10_tx.c710
-rw-r--r--drivers/net/sfc/base/ef10_vpd.c463
-rw-r--r--drivers/net/sfc/base/efx.h2535
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c563
-rw-r--r--drivers/net/sfc/base/efx_check.h346
-rw-r--r--drivers/net/sfc/base/efx_crc32.c122
-rw-r--r--drivers/net/sfc/base/efx_ev.c1470
-rw-r--r--drivers/net/sfc/base/efx_filter.c1424
-rw-r--r--drivers/net/sfc/base/efx_hash.c328
-rw-r--r--drivers/net/sfc/base/efx_impl.h1208
-rw-r--r--drivers/net/sfc/base/efx_intr.c572
-rw-r--r--drivers/net/sfc/base/efx_lic.c1751
-rw-r--r--drivers/net/sfc/base/efx_mac.c951
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c2346
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h415
-rw-r--r--drivers/net/sfc/base/efx_mon.c255
-rw-r--r--drivers/net/sfc/base/efx_nic.c1110
-rw-r--r--drivers/net/sfc/base/efx_nvram.c1044
-rw-r--r--drivers/net/sfc/base/efx_phy.c561
-rw-r--r--drivers/net/sfc/base/efx_phy_ids.h51
-rw-r--r--drivers/net/sfc/base/efx_port.c252
-rw-r--r--drivers/net/sfc/base/efx_regs.h3870
-rw-r--r--drivers/net/sfc/base/efx_regs_ef10.h571
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi.h15690
-rw-r--r--drivers/net/sfc/base/efx_regs_pci.h2356
-rw-r--r--drivers/net/sfc/base/efx_rx.c1315
-rw-r--r--drivers/net/sfc/base/efx_sram.c331
-rw-r--r--drivers/net/sfc/base/efx_tx.c1097
-rw-r--r--drivers/net/sfc/base/efx_types.h1647
-rw-r--r--drivers/net/sfc/base/efx_vpd.c1016
-rw-r--r--drivers/net/sfc/base/hunt_impl.h74
-rw-r--r--drivers/net/sfc/base/hunt_nic.c402
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c565
-rw-r--r--drivers/net/sfc/base/mcdi_mon.h74
-rw-r--r--drivers/net/sfc/base/medford_impl.h67
-rw-r--r--drivers/net/sfc/base/medford_nic.c402
-rw-r--r--drivers/net/sfc/base/siena_flash.h215
-rw-r--r--drivers/net/sfc/base/siena_impl.h431
-rw-r--r--drivers/net/sfc/base/siena_mac.c476
-rw-r--r--drivers/net/sfc/base/siena_mcdi.c263
-rw-r--r--drivers/net/sfc/base/siena_nic.c585
-rw-r--r--drivers/net/sfc/base/siena_nvram.c734
-rw-r--r--drivers/net/sfc/base/siena_phy.c797
-rw-r--r--drivers/net/sfc/base/siena_sram.c178
-rw-r--r--drivers/net/sfc/base/siena_vpd.c618
-rw-r--r--drivers/net/sfc/efsys.h780
-rw-r--r--drivers/net/sfc/rte_pmd_sfc_efx_version.map4
-rw-r--r--drivers/net/sfc/sfc.c750
-rw-r--r--drivers/net/sfc/sfc.h322
-rw-r--r--drivers/net/sfc/sfc_debug.h59
-rw-r--r--drivers/net/sfc/sfc_dp.c100
-rw-r--r--drivers/net/sfc/sfc_dp.h125
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h197
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h170
-rw-r--r--drivers/net/sfc/sfc_ef10.h107
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c712
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c560
-rw-r--r--drivers/net/sfc/sfc_ethdev.c1642
-rw-r--r--drivers/net/sfc/sfc_ev.c921
-rw-r--r--drivers/net/sfc/sfc_ev.h129
-rw-r--r--drivers/net/sfc/sfc_filter.c137
-rw-r--r--drivers/net/sfc/sfc_filter.h62
-rw-r--r--drivers/net/sfc/sfc_flow.c1175
-rw-r--r--drivers/net/sfc/sfc_flow.h64
-rw-r--r--drivers/net/sfc/sfc_intr.c342
-rw-r--r--drivers/net/sfc/sfc_kvargs.c145
-rw-r--r--drivers/net/sfc/sfc_kvargs.h93
-rw-r--r--drivers/net/sfc/sfc_log.h76
-rw-r--r--drivers/net/sfc/sfc_mcdi.c331
-rw-r--r--drivers/net/sfc/sfc_port.c475
-rw-r--r--drivers/net/sfc/sfc_rx.c1327
-rw-r--r--drivers/net/sfc/sfc_rx.h180
-rw-r--r--drivers/net/sfc/sfc_tso.c201
-rw-r--r--drivers/net/sfc/sfc_tweak.h56
-rw-r--r--drivers/net/sfc/sfc_tx.c992
-rw-r--r--drivers/net/sfc/sfc_tx.h164
-rw-r--r--drivers/net/szedata2/Makefile7
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c104
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h60
-rw-r--r--[-rwxr-xr-x]drivers/net/tap/Makefile (renamed from scripts/depdirs-rule.sh)120
-rw-r--r--drivers/net/tap/rte_eth_tap.c1394
-rw-r--r--drivers/net/tap/rte_eth_tap.h100
-rw-r--r--drivers/net/tap/rte_pmd_tap_version.map4
-rw-r--r--drivers/net/tap/tap_flow.c1507
-rw-r--r--drivers/net/tap/tap_flow.h82
-rw-r--r--drivers/net/tap/tap_netlink.c367
-rw-r--r--drivers/net/tap/tap_netlink.h69
-rw-r--r--drivers/net/tap/tap_tcmsgs.c323
-rw-r--r--drivers/net/tap/tap_tcmsgs.h61
-rw-r--r--drivers/net/thunderx/Makefile4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c12
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h58
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c9
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h11
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h40
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c99
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c46
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h27
-rw-r--r--drivers/net/thunderx/nicvf_struct.h23
-rw-r--r--drivers/net/vhost/Makefile8
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c314
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h32
-rw-r--r--drivers/net/vhost/rte_pmd_vhost_version.map3
-rw-r--r--drivers/net/virtio/Makefile8
-rw-r--r--drivers/net/virtio/virtio_ethdev.c364
-rw-r--r--drivers/net/virtio/virtio_ethdev.h8
-rw-r--r--drivers/net/virtio/virtio_pci.c251
-rw-r--r--drivers/net/virtio/virtio_pci.h16
-rw-r--r--drivers/net/virtio/virtio_rxtx.c41
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c5
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h6
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c6
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c6
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h51
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c403
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c133
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h67
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c98
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c246
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h20
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c175
-rw-r--r--drivers/net/virtio/virtqueue.c11
-rw-r--r--drivers/net/virtio/virtqueue.h24
-rw-r--r--drivers/net/vmxnet3/Makefile5
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h85
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c203
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c148
-rw-r--r--drivers/net/xenvirt/Makefile6
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c19
-rw-r--r--examples/Makefile7
-rw-r--r--examples/bond/Makefile4
-rw-r--r--examples/bond/main.c2
-rw-r--r--examples/distributor/main.c402
-rw-r--r--examples/dpdk_qat/Makefile93
-rw-r--r--examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf65
-rw-r--r--examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf293
-rw-r--r--examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf292
-rw-r--r--examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf235
-rw-r--r--examples/dpdk_qat/crypto.c943
-rw-r--r--examples/dpdk_qat/main.c821
-rw-r--r--examples/ethtool/Makefile3
-rw-r--r--examples/ethtool/ethtool-app/Makefile5
-rw-r--r--examples/ethtool/ethtool-app/ethapp.c2
-rw-r--r--examples/ethtool/ethtool-app/main.c1
-rw-r--r--examples/ethtool/lib/Makefile8
-rw-r--r--examples/ethtool/lib/rte_ethtool.c21
-rw-r--r--examples/exception_path/Makefile1
-rw-r--r--examples/exception_path/main.c2
-rw-r--r--examples/ip_fragmentation/main.c103
-rw-r--r--examples/ip_pipeline/Makefile2
-rwxr-xr-xexamples/ip_pipeline/config/diagram-generator.py13
-rwxr-xr-xexamples/ip_pipeline/config/pipeline-to-core-mapping.py11
-rw-r--r--examples/ip_pipeline/config_parse.c6
-rw-r--r--examples/ip_pipeline/init.c27
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c6
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c20
-rw-r--r--examples/ip_reassembly/main.c27
-rw-r--r--examples/ipsec-secgw/esp.c4
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c6
-rw-r--r--examples/ipsec-secgw/ipsec.c17
-rw-r--r--examples/ipsec-secgw/ipsec.h2
-rw-r--r--examples/ipsec-secgw/sa.c6
-rw-r--r--examples/ipv4_multicast/main.c12
-rw-r--r--examples/kni/main.c2
-rw-r--r--examples/l2fwd-cat/cat.c2
-rw-r--r--examples/l2fwd-crypto/main.c180
-rw-r--r--examples/l2fwd-jobstats/main.c4
-rw-r--r--examples/l2fwd-keepalive/main.c35
-rw-r--r--examples/l2fwd-keepalive/shm.c10
-rw-r--r--examples/l2fwd-keepalive/shm.h9
-rw-r--r--examples/l2fwd/main.c34
-rw-r--r--examples/l3fwd-acl/main.c4
-rw-r--r--examples/l3fwd-power/main.c131
-rw-r--r--examples/l3fwd-vf/main.c4
-rw-r--r--examples/l3fwd/l3fwd_lpm.h2
-rw-r--r--examples/l3fwd/l3fwd_lpm_sse.h24
-rw-r--r--examples/l3fwd/main.c173
-rw-r--r--examples/link_status_interrupt/main.c4
-rw-r--r--examples/load_balancer/config.c2
-rw-r--r--examples/load_balancer/init.c2
-rw-r--r--examples/load_balancer/runtime.c38
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c9
-rw-r--r--examples/multi_process/client_server_mp/mp_server/main.c2
-rw-r--r--examples/multi_process/l2fwd_fork/main.c19
-rw-r--r--examples/multi_process/symmetric_mp/main.c4
-rw-r--r--examples/netmap_compat/bridge/Makefile1
-rw-r--r--examples/netmap_compat/bridge/bridge.c2
-rw-r--r--examples/packet_ordering/main.c21
-rw-r--r--examples/performance-thread/common/arch/x86/ctx.h8
-rw-r--r--examples/performance-thread/common/common.mk20
-rw-r--r--examples/performance-thread/common/lthread.h8
-rw-r--r--examples/performance-thread/common/lthread_api.h8
-rw-r--r--examples/performance-thread/common/lthread_cond.h8
-rw-r--r--examples/performance-thread/common/lthread_diag.h9
-rw-r--r--examples/performance-thread/common/lthread_diag_api.h8
-rw-r--r--examples/performance-thread/common/lthread_int.h8
-rw-r--r--examples/performance-thread/common/lthread_mutex.h8
-rw-r--r--examples/performance-thread/common/lthread_objcache.h7
-rw-r--r--examples/performance-thread/common/lthread_pool.h7
-rw-r--r--examples/performance-thread/common/lthread_queue.h7
-rw-r--r--examples/performance-thread/common/lthread_sched.h7
-rw-r--r--examples/performance-thread/common/lthread_timer.h24
-rw-r--r--examples/performance-thread/common/lthread_tls.c1
-rw-r--r--examples/performance-thread/common/lthread_tls.h7
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c127
-rw-r--r--examples/performance-thread/pthread_shim/main.c8
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c38
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.h3
-rw-r--r--examples/ptpclient/ptpclient.c2
-rw-r--r--examples/qos_meter/main.c4
-rw-r--r--examples/qos_sched/Makefile1
-rw-r--r--examples/qos_sched/app_thread.c14
-rw-r--r--examples/qos_sched/init.c2
-rw-r--r--examples/quota_watermark/qw/args.c63
-rw-r--r--examples/quota_watermark/qw/init.c178
-rw-r--r--examples/quota_watermark/qw/main.c382
-rw-r--r--examples/quota_watermark/qw/main.h7
-rw-r--r--examples/quota_watermark/qwctl/commands.c208
-rw-r--r--examples/quota_watermark/qwctl/qwctl.c40
-rw-r--r--examples/quota_watermark/qwctl/qwctl.h1
-rw-r--r--examples/server_node_efd/Makefile44
-rw-r--r--examples/server_node_efd/node/Makefile48
-rw-r--r--examples/server_node_efd/node/node.c417
-rw-r--r--examples/server_node_efd/server/Makefile57
-rw-r--r--examples/server_node_efd/server/args.c200
-rw-r--r--examples/server_node_efd/server/args.h (renamed from drivers/net/null/rte_eth_null.h)11
-rw-r--r--examples/server_node_efd/server/init.c371
-rw-r--r--examples/server_node_efd/server/init.h (renamed from lib/librte_eal/common/include/rte_warnings.h)80
-rw-r--r--examples/server_node_efd/server/main.c362
-rw-r--r--examples/server_node_efd/shared/common.h99
-rw-r--r--examples/tep_termination/main.c43
-rw-r--r--examples/tep_termination/main.h2
-rw-r--r--examples/tep_termination/vxlan_setup.c4
-rw-r--r--examples/vhost/Makefile2
-rw-r--r--examples/vhost/main.c118
-rw-r--r--examples/vhost/main.h32
-rw-r--r--examples/vhost/virtio_net.c403
-rw-r--r--examples/vhost_xen/main.c22
-rw-r--r--lib/Makefile51
-rw-r--r--lib/librte_acl/Makefile3
-rw-r--r--lib/librte_acl/acl_bld.c8
-rw-r--r--lib/librte_acl/acl_run.h4
-rw-r--r--lib/librte_acl/rte_acl.c3
-rw-r--r--lib/librte_acl/rte_acl.h2
-rw-r--r--lib/librte_bitratestats/Makefile49
-rw-r--r--lib/librte_bitratestats/rte_bitrate.c153
-rw-r--r--lib/librte_bitratestats/rte_bitrate.h (renamed from lib/librte_eal/common/include/arch/tile/rte_atomic.h)84
-rw-r--r--lib/librte_bitratestats/rte_bitratestats_version.map9
-rw-r--r--lib/librte_cfgfile/Makefile3
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.c106
-rw-r--r--lib/librte_cfgfile/rte_cfgfile.h44
-rw-r--r--lib/librte_cfgfile/rte_cfgfile_version.map7
-rw-r--r--lib/librte_cmdline/Makefile3
-rw-r--r--lib/librte_cmdline/cmdline_parse.c69
-rw-r--r--lib/librte_cmdline/cmdline_parse.h21
-rw-r--r--lib/librte_cmdline/cmdline_parse_num.c4
-rw-r--r--lib/librte_cryptodev/Makefile7
-rw-r--r--lib/librte_cryptodev/rte_crypto_sym.h66
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c390
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h238
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.h92
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_version.map28
-rw-r--r--lib/librte_distributor/Makefile17
-rw-r--r--lib/librte_distributor/rte_distributor.c786
-rw-r--r--lib/librte_distributor/rte_distributor.h76
-rw-r--r--lib/librte_distributor/rte_distributor_match_generic.c43
-rw-r--r--lib/librte_distributor/rte_distributor_match_sse.c114
-rw-r--r--lib/librte_distributor/rte_distributor_private.h202
-rw-r--r--lib/librte_distributor/rte_distributor_v1705.h89
-rw-r--r--lib/librte_distributor/rte_distributor_v20.c427
-rw-r--r--lib/librte_distributor/rte_distributor_v20.h247
-rw-r--r--lib/librte_distributor/rte_distributor_version.map14
-rw-r--r--lib/librte_eal/Makefile2
-rw-r--r--lib/librte_eal/bsdapp/eal/Makefile6
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c114
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_debug.c4
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_interrupts.c24
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_lcore.c8
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_pci.c104
-rw-r--r--lib/librte_eal/bsdapp/eal/rte_eal_version.map73
-rw-r--r--lib/librte_eal/bsdapp/nic_uio/nic_uio.c44
-rw-r--r--lib/librte_eal/common/Makefile8
-rw-r--r--lib/librte_eal/common/eal_common_bus.c147
-rw-r--r--lib/librte_eal/common/eal_common_cpuflags.c13
-rw-r--r--lib/librte_eal/common/eal_common_dev.c67
-rw-r--r--lib/librte_eal/common/eal_common_lcore.c7
-rw-r--r--lib/librte_eal/common/eal_common_log.c247
-rw-r--r--lib/librte_eal/common/eal_common_options.c83
-rw-r--r--lib/librte_eal/common/eal_common_pci.c301
-rw-r--r--lib/librte_eal/common/eal_common_tailqs.c3
-rw-r--r--lib/librte_eal/common/eal_common_vdev.c302
-rw-r--r--lib/librte_eal/common/eal_internal_cfg.h1
-rw-r--r--lib/librte_eal/common/eal_private.h53
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic.h6
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic_32.h12
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic_64.h51
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_io.h (renamed from lib/librte_eal/common/include/arch/tile/rte_cpuflags.h)26
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_io_64.h199
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_vect.h1
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h6
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_io.h (renamed from lib/librte_eal/common/arch/tile/rte_cpuflags.c)28
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_vect.h1
-rw-r--r--lib/librte_eal/common/include/arch/tile/rte_byteorder.h91
-rw-r--r--lib/librte_eal/common/include/arch/tile/rte_prefetch.h67
-rw-r--r--lib/librte_eal/common/include/arch/tile/rte_rwlock.h70
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic.h6
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_io.h (renamed from lib/librte_eal/common/include/arch/tile/rte_cycles.h)39
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h81
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_vect.h7
-rw-r--r--lib/librte_eal/common/include/generic/rte_atomic.h27
-rw-r--r--lib/librte_eal/common/include/generic/rte_cpuflags.h11
-rw-r--r--lib/librte_eal/common/include/generic/rte_cycles.h12
-rw-r--r--lib/librte_eal/common/include/generic/rte_io.h381
-rw-r--r--lib/librte_eal/common/include/generic/rte_vect.h214
-rw-r--r--lib/librte_eal/common/include/rte_bus.h158
-rw-r--r--lib/librte_eal/common/include/rte_common.h23
-rw-r--r--lib/librte_eal/common/include/rte_dev.h105
-rw-r--r--lib/librte_eal/common/include/rte_eal.h40
-rw-r--r--lib/librte_eal/common/include/rte_interrupts.h11
-rw-r--r--lib/librte_eal/common/include/rte_log.h181
-rw-r--r--lib/librte_eal/common/include/rte_pci.h106
-rw-r--r--lib/librte_eal/common/include/rte_vdev.h32
-rw-r--r--lib/librte_eal/common/include/rte_version.h6
-rw-r--r--lib/librte_eal/linuxapp/Makefile2
-rw-r--r--lib/librte_eal/linuxapp/eal/Makefile6
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c128
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_alarm.c5
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_debug.c4
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_hugepage_info.c9
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c117
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c103
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci.c149
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_init.h3
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_uio.c3
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_pci_vfio.c88
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.c386
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.h67
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c23
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h14
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h5
-rw-r--r--lib/librte_eal/linuxapp/eal/rte_eal_version.map74
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/compat.h4
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/igb_uio.c39
-rw-r--r--lib/librte_eal/linuxapp/kni/Makefile58
-rw-r--r--lib/librte_eal/linuxapp/kni/compat.h6
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c15
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h12
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c4
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_dev.h39
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_fifo.h14
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_misc.c47
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_net.c13
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_vhost.c842
-rw-r--r--lib/librte_eal/linuxapp/xen_dom0/Makefile3
-rw-r--r--lib/librte_efd/Makefile50
-rw-r--r--lib/librte_efd/rte_efd.c1343
-rw-r--r--lib/librte_efd/rte_efd.h308
-rw-r--r--lib/librte_efd/rte_efd_version.map13
-rw-r--r--lib/librte_efd/rte_efd_x86.h86
-rw-r--r--lib/librte_ether/Makefile12
-rw-r--r--lib/librte_ether/rte_eth_ctrl.h1
-rw-r--r--lib/librte_ether/rte_ethdev.c769
-rw-r--r--lib/librte_ether/rte_ethdev.h1036
-rw-r--r--lib/librte_ether/rte_ethdev_pci.h193
-rw-r--r--lib/librte_ether/rte_ethdev_vdev.h84
-rw-r--r--lib/librte_ether/rte_ether_version.map28
-rw-r--r--lib/librte_ether/rte_flow.c159
-rw-r--r--lib/librte_ether/rte_flow.h1198
-rw-r--r--lib/librte_ether/rte_flow_driver.h182
-rw-r--r--lib/librte_eventdev/Makefile (renamed from mk/arch/tile/rte.vars.mk)30
-rw-r--r--lib/librte_eventdev/rte_eventdev.c1345
-rw-r--r--lib/librte_eventdev/rte_eventdev.h1588
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h599
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map44
-rw-r--r--lib/librte_hash/Makefile3
-rw-r--r--lib/librte_hash/rte_crc_arm64.h2
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c5
-rw-r--r--lib/librte_hash/rte_hash_crc.h6
-rw-r--r--lib/librte_ip_frag/Makefile6
-rw-r--r--lib/librte_jobstats/Makefile3
-rw-r--r--lib/librte_kni/Makefile5
-rw-r--r--lib/librte_kni/rte_kni.c46
-rw-r--r--lib/librte_kni/rte_kni_fifo.h9
-rw-r--r--lib/librte_kvargs/Makefile3
-rw-r--r--lib/librte_kvargs/rte_kvargs.c8
-rw-r--r--lib/librte_kvargs/rte_kvargs.h3
-rw-r--r--lib/librte_latencystats/Makefile50
-rw-r--r--lib/librte_latencystats/rte_latencystats.c360
-rw-r--r--lib/librte_latencystats/rte_latencystats.h156
-rw-r--r--lib/librte_latencystats/rte_latencystats_version.map11
-rw-r--r--lib/librte_lpm/Makefile3
-rw-r--r--lib/librte_lpm/rte_lpm6.c134
-rw-r--r--lib/librte_lpm/rte_lpm6.h32
-rw-r--r--lib/librte_lpm/rte_lpm_version.map10
-rw-r--r--lib/librte_mbuf/Makefile5
-rw-r--r--lib/librte_mbuf/rte_mbuf.c13
-rw-r--r--lib/librte_mbuf/rte_mbuf.h322
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h3
-rw-r--r--lib/librte_mempool/Makefile4
-rw-r--r--lib/librte_mempool/rte_mempool.c21
-rw-r--r--lib/librte_mempool/rte_mempool.h275
-rw-r--r--lib/librte_mempool/rte_mempool_version.map1
-rw-r--r--lib/librte_meter/Makefile3
-rw-r--r--lib/librte_metrics/Makefile49
-rw-r--r--lib/librte_metrics/rte_metrics.c302
-rw-r--r--lib/librte_metrics/rte_metrics.h250
-rw-r--r--lib/librte_metrics/rte_metrics_version.map13
-rw-r--r--lib/librte_net/Makefile4
-rw-r--r--lib/librte_net/net_crc_sse.h363
-rw-r--r--lib/librte_net/rte_ether.h5
-rw-r--r--lib/librte_net/rte_net.h111
-rw-r--r--lib/librte_net/rte_net_crc.c207
-rw-r--r--lib/librte_net/rte_net_crc.h (renamed from examples/dpdk_qat/crypto.h)104
-rw-r--r--lib/librte_net/rte_net_version.map8
-rw-r--r--lib/librte_pdump/Makefile6
-rw-r--r--lib/librte_pdump/rte_pdump.c6
-rw-r--r--lib/librte_pdump/rte_pdump.h2
-rw-r--r--lib/librte_pipeline/Makefile7
-rw-r--r--lib/librte_port/Makefile11
-rw-r--r--lib/librte_port/rte_port_ethdev.c30
-rw-r--r--lib/librte_port/rte_port_fd.c41
-rw-r--r--lib/librte_port/rte_port_frag.c9
-rw-r--r--lib/librte_port/rte_port_kni.c26
-rw-r--r--lib/librte_port/rte_port_ras.c14
-rw-r--r--lib/librte_port/rte_port_ring.c86
-rw-r--r--lib/librte_port/rte_port_sched.c10
-rw-r--r--lib/librte_port/rte_port_source_sink.c27
-rw-r--r--lib/librte_power/Makefile3
-rw-r--r--lib/librte_reorder/Makefile5
-rw-r--r--lib/librte_ring/Makefile2
-rw-r--r--lib/librte_ring/rte_ring.c76
-rw-r--r--lib/librte_ring/rte_ring.h809
-rw-r--r--lib/librte_sched/Makefile5
-rw-r--r--lib/librte_table/Makefile11
-rw-r--r--lib/librte_table/rte_table_acl.c26
-rw-r--r--lib/librte_table/rte_table_array.c11
-rw-r--r--lib/librte_table/rte_table_hash_cuckoo.c8
-rw-r--r--lib/librte_table/rte_table_hash_ext.c28
-rw-r--r--lib/librte_table/rte_table_hash_key16.c14
-rw-r--r--lib/librte_table/rte_table_hash_key32.c16
-rw-r--r--lib/librte_table/rte_table_hash_key8.c14
-rw-r--r--lib/librte_table/rte_table_hash_lru.c10
-rw-r--r--lib/librte_table/rte_table_lpm.c14
-rw-r--r--lib/librte_table/rte_table_lpm_ipv6.c23
-rw-r--r--lib/librte_table/rte_table_stub.c2
-rw-r--r--lib/librte_timer/Makefile3
-rw-r--r--lib/librte_vhost/Makefile11
-rw-r--r--lib/librte_vhost/fd_man.c33
-rw-r--r--lib/librte_vhost/fd_man.h2
-rw-r--r--lib/librte_vhost/rte_vhost.h439
-rw-r--r--lib/librte_vhost/rte_vhost_version.map23
-rw-r--r--lib/librte_vhost/rte_virtio_net.h193
-rw-r--r--lib/librte_vhost/socket.c277
-rw-r--r--lib/librte_vhost/vhost.c248
-rw-r--r--lib/librte_vhost/vhost.h136
-rw-r--r--lib/librte_vhost/vhost_user.c207
-rw-r--r--lib/librte_vhost/vhost_user.h14
-rw-r--r--lib/librte_vhost/virtio_net.c134
-rw-r--r--mk/internal/rte.install-post.mk2
-rw-r--r--mk/machine/armv7a/rte.vars.mk2
-rw-r--r--mk/machine/dpaa2/rte.vars.mk7
-rw-r--r--mk/machine/thunderx/rte.vars.mk2
-rw-r--r--mk/rte.app.mk87
-rw-r--r--mk/rte.bsdmodule.mk2
-rw-r--r--mk/rte.cpuflags.mk6
-rw-r--r--mk/rte.extsubdir.mk18
-rw-r--r--mk/rte.gnuconfigure.mk2
-rw-r--r--mk/rte.hostapp.mk2
-rw-r--r--mk/rte.hostlib.mk2
-rw-r--r--mk/rte.install.mk2
-rw-r--r--mk/rte.lib.mk26
-rw-r--r--mk/rte.module.mk2
-rw-r--r--mk/rte.obj.mk2
-rw-r--r--mk/rte.sdkbuild.mk29
-rw-r--r--mk/rte.sdkconfig.mk10
-rw-r--r--mk/rte.sdkdepdirs.mk27
-rw-r--r--mk/rte.sdkdoc.mk13
-rw-r--r--mk/rte.sdkinstall.mk14
-rw-r--r--mk/rte.sdkroot.mk14
-rw-r--r--mk/rte.sdktest.mk18
-rw-r--r--mk/rte.shared.mk2
-rw-r--r--mk/rte.subdir.mk40
-rw-r--r--mk/target/generic/rte.vars.mk6
-rw-r--r--mk/toolchain/clang/rte.toolchain-compat.mk3
-rw-r--r--mk/toolchain/clang/rte.vars.mk5
-rw-r--r--mk/toolchain/gcc/rte.toolchain-compat.mk5
-rw-r--r--mk/toolchain/gcc/rte.vars.mk11
-rw-r--r--mk/toolchain/icc/rte.toolchain-compat.mk5
-rw-r--r--mk/toolchain/icc/rte.vars.mk7
-rw-r--r--pkg/dpdk.spec21
-rw-r--r--test/Makefile (renamed from mk/internal/rte.depdirs-post.mk)19
-rw-r--r--test/cmdline_test/Makefile (renamed from app/cmdline_test/Makefile)3
-rw-r--r--test/cmdline_test/cmdline_test.c (renamed from app/cmdline_test/cmdline_test.c)0
-rw-r--r--test/cmdline_test/cmdline_test.h (renamed from app/cmdline_test/cmdline_test.h)0
-rwxr-xr-xtest/cmdline_test/cmdline_test.py (renamed from app/cmdline_test/cmdline_test.py)87
-rw-r--r--test/cmdline_test/cmdline_test_data.py310
-rw-r--r--test/cmdline_test/commands.c (renamed from app/cmdline_test/commands.c)0
-rw-r--r--test/test-acl/Makefile (renamed from app/test-acl/Makefile)3
-rw-r--r--test/test-acl/main.c (renamed from app/test-acl/main.c)0
-rw-r--r--test/test-pipeline/Makefile (renamed from app/test-pipeline/Makefile)3
-rw-r--r--test/test-pipeline/config.c (renamed from app/test-pipeline/config.c)2
-rw-r--r--test/test-pipeline/init.c (renamed from app/test-pipeline/init.c)2
-rw-r--r--test/test-pipeline/main.c (renamed from app/test-pipeline/main.c)0
-rw-r--r--test/test-pipeline/main.h (renamed from app/test-pipeline/main.h)0
-rw-r--r--test/test-pipeline/pipeline_acl.c (renamed from app/test-pipeline/pipeline_acl.c)0
-rw-r--r--test/test-pipeline/pipeline_hash.c (renamed from app/test-pipeline/pipeline_hash.c)7
-rw-r--r--test/test-pipeline/pipeline_lpm.c (renamed from app/test-pipeline/pipeline_lpm.c)0
-rw-r--r--test/test-pipeline/pipeline_lpm_ipv6.c (renamed from app/test-pipeline/pipeline_lpm_ipv6.c)0
-rw-r--r--test/test-pipeline/pipeline_stub.c (renamed from app/test-pipeline/pipeline_stub.c)0
-rw-r--r--test/test-pipeline/runtime.c (renamed from app/test-pipeline/runtime.c)19
-rw-r--r--test/test/Makefile (renamed from app/test/Makefile)46
-rw-r--r--test/test/autotest.py (renamed from app/test/autotest.py)46
-rw-r--r--test/test/autotest_data.py495
-rw-r--r--test/test/autotest_runner.py428
-rw-r--r--test/test/autotest_test_funcs.py295
-rw-r--r--test/test/commands.c (renamed from app/test/commands.c)56
-rw-r--r--test/test/packet_burst_generator.c (renamed from app/test/packet_burst_generator.c)0
-rw-r--r--test/test/packet_burst_generator.h (renamed from app/test/packet_burst_generator.h)0
-rw-r--r--test/test/process.h (renamed from app/test/process.h)0
-rw-r--r--test/test/resource.c (renamed from app/test/resource.c)0
-rw-r--r--test/test/resource.h (renamed from app/test/resource.h)0
-rw-r--r--test/test/test.c (renamed from app/test/test.c)24
-rw-r--r--test/test/test.h (renamed from app/test/test.h)26
-rw-r--r--test/test/test_acl.c (renamed from app/test/test_acl.c)33
-rw-r--r--test/test/test_acl.h (renamed from app/test/test_acl.h)0
-rw-r--r--test/test/test_alarm.c (renamed from app/test/test_alarm.c)0
-rw-r--r--test/test/test_atomic.c (renamed from app/test/test_atomic.c)0
-rw-r--r--test/test/test_byteorder.c (renamed from app/test/test_byteorder.c)0
-rw-r--r--test/test/test_cfgfile.c322
-rw-r--r--test/test/test_cfgfiles/etc/empty.ini0
-rw-r--r--test/test/test_cfgfiles/etc/empty_key_value.ini3
-rw-r--r--test/test/test_cfgfiles/etc/invalid_section.ini3
-rw-r--r--test/test/test_cfgfiles/etc/line_too_long.ini3
-rw-r--r--test/test/test_cfgfiles/etc/missing_section.ini2
-rw-r--r--test/test/test_cfgfiles/etc/sample1.ini12
-rw-r--r--test/test/test_cfgfiles/etc/sample2.ini12
-rw-r--r--test/test/test_cmdline.c (renamed from app/test/test_cmdline.c)0
-rw-r--r--test/test/test_cmdline.h (renamed from app/test/test_cmdline.h)0
-rw-r--r--test/test/test_cmdline_cirbuf.c (renamed from app/test/test_cmdline_cirbuf.c)0
-rw-r--r--test/test/test_cmdline_etheraddr.c (renamed from app/test/test_cmdline_etheraddr.c)0
-rw-r--r--test/test/test_cmdline_ipaddr.c (renamed from app/test/test_cmdline_ipaddr.c)0
-rw-r--r--test/test/test_cmdline_lib.c (renamed from app/test/test_cmdline_lib.c)0
-rw-r--r--test/test/test_cmdline_num.c (renamed from app/test/test_cmdline_num.c)1
-rw-r--r--test/test/test_cmdline_portlist.c (renamed from app/test/test_cmdline_portlist.c)0
-rw-r--r--test/test/test_cmdline_string.c (renamed from app/test/test_cmdline_string.c)0
-rw-r--r--test/test/test_common.c (renamed from app/test/test_common.c)0
-rw-r--r--test/test/test_cpuflags.c (renamed from app/test/test_cpuflags.c)0
-rw-r--r--test/test/test_crc.c184
-rw-r--r--test/test/test_cryptodev.c (renamed from app/test/test_cryptodev.c)2482
-rw-r--r--test/test/test_cryptodev.h (renamed from app/test/test_cryptodev.h)138
-rw-r--r--test/test/test_cryptodev_aes_test_vectors.h (renamed from app/test/test_cryptodev_aes_test_vectors.h)516
-rw-r--r--test/test/test_cryptodev_blockcipher.c (renamed from app/test/test_cryptodev_blockcipher.c)254
-rw-r--r--test/test/test_cryptodev_blockcipher.h (renamed from app/test/test_cryptodev_blockcipher.h)11
-rw-r--r--test/test/test_cryptodev_des_test_vectors.h (renamed from app/test/test_cryptodev_des_test_vectors.h)300
-rw-r--r--test/test/test_cryptodev_gcm_test_vectors.h3051
-rw-r--r--test/test/test_cryptodev_hash_test_vectors.h (renamed from app/test/test_cryptodev_hash_test_vectors.h)68
-rw-r--r--test/test/test_cryptodev_hmac_test_vectors.h (renamed from app/test/test_cryptodev_hmac_test_vectors.h)0
-rw-r--r--test/test/test_cryptodev_kasumi_hash_test_vectors.h (renamed from app/test/test_cryptodev_kasumi_hash_test_vectors.h)0
-rw-r--r--test/test/test_cryptodev_kasumi_test_vectors.h (renamed from app/test/test_cryptodev_kasumi_test_vectors.h)0
-rw-r--r--test/test/test_cryptodev_perf.c (renamed from app/test/test_cryptodev_perf.c)677
-rw-r--r--test/test/test_cryptodev_snow3g_hash_test_vectors.h (renamed from app/test/test_cryptodev_snow3g_hash_test_vectors.h)0
-rw-r--r--test/test/test_cryptodev_snow3g_test_vectors.h (renamed from app/test/test_cryptodev_snow3g_test_vectors.h)0
-rw-r--r--test/test/test_cryptodev_zuc_test_vectors.h (renamed from app/test/test_cryptodev_zuc_test_vectors.h)598
-rw-r--r--test/test/test_cycles.c (renamed from app/test/test_cycles.c)0
-rw-r--r--test/test/test_debug.c (renamed from app/test/test_debug.c)0
-rw-r--r--test/test/test_devargs.c (renamed from app/test/test_devargs.c)0
-rw-r--r--test/test/test_distributor.c (renamed from app/test/test_distributor.c)369
-rw-r--r--test/test/test_distributor_perf.c (renamed from app/test/test_distributor_perf.c)101
-rw-r--r--test/test/test_eal_flags.c (renamed from app/test/test_eal_flags.c)0
-rw-r--r--test/test/test_eal_fs.c (renamed from app/test/test_eal_fs.c)0
-rw-r--r--test/test/test_efd.c500
-rw-r--r--test/test/test_efd_perf.c414
-rw-r--r--test/test/test_errno.c (renamed from app/test/test_errno.c)0
-rw-r--r--test/test/test_eventdev.c787
-rw-r--r--test/test/test_eventdev_octeontx.c1399
-rw-r--r--test/test/test_eventdev_sw.c3188
-rw-r--r--test/test/test_func_reentrancy.c (renamed from app/test/test_func_reentrancy.c)0
-rw-r--r--test/test/test_hash.c (renamed from app/test/test_hash.c)0
-rw-r--r--test/test/test_hash_functions.c (renamed from app/test/test_hash_functions.c)0
-rw-r--r--test/test/test_hash_multiwriter.c (renamed from app/test/test_hash_multiwriter.c)0
-rw-r--r--test/test/test_hash_perf.c (renamed from app/test/test_hash_perf.c)0
-rw-r--r--test/test/test_hash_scaling.c (renamed from app/test/test_hash_scaling.c)0
-rw-r--r--test/test/test_interrupts.c (renamed from app/test/test_interrupts.c)23
-rw-r--r--test/test/test_kni.c (renamed from app/test/test_kni.c)2
-rw-r--r--test/test/test_kvargs.c (renamed from app/test/test_kvargs.c)0
-rw-r--r--test/test/test_link_bonding.c (renamed from app/test/test_link_bonding.c)4
-rw-r--r--test/test/test_link_bonding_mode4.c (renamed from app/test/test_link_bonding_mode4.c)8
-rw-r--r--test/test/test_link_bonding_rssconf.c (renamed from app/test/test_link_bonding_rssconf.c)19
-rw-r--r--test/test/test_logs.c (renamed from app/test/test_logs.c)12
-rw-r--r--test/test/test_lpm.c (renamed from app/test/test_lpm.c)0
-rw-r--r--test/test/test_lpm6.c (renamed from app/test/test_lpm6.c)115
-rw-r--r--test/test/test_lpm6_data.h (renamed from app/test/test_lpm6_data.h)0
-rw-r--r--test/test/test_lpm6_perf.c (renamed from app/test/test_lpm6_perf.c)4
-rw-r--r--test/test/test_lpm_perf.c (renamed from app/test/test_lpm_perf.c)0
-rw-r--r--test/test/test_malloc.c (renamed from app/test/test_malloc.c)0
-rw-r--r--test/test/test_mbuf.c (renamed from app/test/test_mbuf.c)125
-rw-r--r--test/test/test_memcpy.c (renamed from app/test/test_memcpy.c)0
-rw-r--r--test/test/test_memcpy_perf.c (renamed from app/test/test_memcpy_perf.c)0
-rw-r--r--test/test/test_memory.c (renamed from app/test/test_memory.c)0
-rw-r--r--test/test/test_mempool.c (renamed from app/test/test_mempool.c)37
-rw-r--r--test/test/test_mempool_perf.c (renamed from app/test/test_mempool_perf.c)136
-rw-r--r--test/test/test_memzone.c (renamed from app/test/test_memzone.c)0
-rw-r--r--test/test/test_meter.c (renamed from app/test/test_meter.c)0
-rw-r--r--test/test/test_mp_secondary.c (renamed from app/test/test_mp_secondary.c)17
-rw-r--r--test/test/test_per_lcore.c (renamed from app/test/test_per_lcore.c)0
-rw-r--r--test/test/test_pmd_perf.c (renamed from app/test/test_pmd_perf.c)2
-rw-r--r--test/test/test_pmd_ring.c (renamed from app/test/test_pmd_ring.c)0
-rw-r--r--test/test/test_pmd_ring_perf.c (renamed from app/test/test_pmd_ring_perf.c)12
-rw-r--r--test/test/test_power.c (renamed from app/test/test_power.c)0
-rw-r--r--test/test/test_power_acpi_cpufreq.c (renamed from app/test/test_power_acpi_cpufreq.c)0
-rw-r--r--test/test/test_power_kvm_vm.c (renamed from app/test/test_power_kvm_vm.c)0
-rw-r--r--test/test/test_prefetch.c (renamed from app/test/test_prefetch.c)0
-rw-r--r--test/test/test_red.c (renamed from app/test/test_red.c)0
-rw-r--r--test/test/test_reorder.c (renamed from app/test/test_reorder.c)0
-rw-r--r--test/test/test_resource.c (renamed from app/test/test_resource.c)0
-rw-r--r--test/test/test_ring.c829
-rw-r--r--test/test/test_ring_perf.c (renamed from app/test/test_ring_perf.c)36
-rw-r--r--test/test/test_rwlock.c (renamed from app/test/test_rwlock.c)0
-rw-r--r--test/test/test_sched.c (renamed from app/test/test_sched.c)0
-rw-r--r--test/test/test_spinlock.c (renamed from app/test/test_spinlock.c)0
-rw-r--r--test/test/test_string_fns.c (renamed from app/test/test_string_fns.c)0
-rw-r--r--test/test/test_table.c (renamed from app/test/test_table.c)2
-rw-r--r--test/test/test_table.h (renamed from app/test/test_table.h)0
-rw-r--r--test/test/test_table_acl.c (renamed from app/test/test_table_acl.c)4
-rw-r--r--test/test/test_table_acl.h (renamed from app/test/test_table_acl.h)0
-rw-r--r--test/test/test_table_combined.c (renamed from app/test/test_table_combined.c)0
-rw-r--r--test/test/test_table_combined.h (renamed from app/test/test_table_combined.h)0
-rw-r--r--test/test/test_table_pipeline.c (renamed from app/test/test_table_pipeline.c)4
-rw-r--r--test/test/test_table_pipeline.h (renamed from app/test/test_table_pipeline.h)0
-rw-r--r--test/test/test_table_ports.c (renamed from app/test/test_table_ports.c)12
-rw-r--r--test/test/test_table_ports.h (renamed from app/test/test_table_ports.h)0
-rw-r--r--test/test/test_table_tables.c (renamed from app/test/test_table_tables.c)0
-rw-r--r--test/test/test_table_tables.h (renamed from app/test/test_table_tables.h)0
-rw-r--r--test/test/test_tailq.c (renamed from app/test/test_tailq.c)0
-rw-r--r--test/test/test_thash.c (renamed from app/test/test_thash.c)0
-rw-r--r--test/test/test_timer.c (renamed from app/test/test_timer.c)0
-rw-r--r--test/test/test_timer_perf.c (renamed from app/test/test_timer_perf.c)0
-rw-r--r--test/test/test_timer_racecond.c (renamed from app/test/test_timer_racecond.c)0
-rw-r--r--test/test/test_version.c (renamed from app/test/test_version.c)0
-rw-r--r--test/test/test_xmmt_ops.h (renamed from app/test/test_xmmt_ops.h)0
-rw-r--r--test/test/virtual_pmd.c (renamed from app/test/virtual_pmd.c)29
-rw-r--r--test/test/virtual_pmd.h (renamed from app/test/virtual_pmd.h)0
-rwxr-xr-xusertools/cpu_layout.py (renamed from tools/cpu_layout.py)104
-rwxr-xr-xusertools/dpdk-devbind.py (renamed from tools/dpdk-devbind.py)397
-rwxr-xr-xusertools/dpdk-pmdinfo.py (renamed from tools/dpdk-pmdinfo.py)80
-rwxr-xr-xusertools/dpdk-setup.sh (renamed from tools/dpdk-setup.sh)14
1429 files changed, 260265 insertions, 34350 deletions
diff --git a/.gitignore b/.gitignore
index a722abea..6df5ba06 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,14 @@
doc/guides/nics/overview_table.txt
+doc/guides/cryptodevs/overview_feature_table.txt
+doc/guides/cryptodevs/overview_cipher_table.txt
+doc/guides/cryptodevs/overview_auth_table.txt
+doc/guides/cryptodevs/overview_aead_table.txt
+cscope.out.po
+cscope.out.in
+cscope.out
+cscope.files
+GTAGS
+GPATH
+GRTAGS
+tags
+TAGS
diff --git a/GNUmakefile b/GNUmakefile
index 00fe0db7..45b7fbbe 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -41,5 +41,6 @@ export RTE_SDK
#
ROOTDIRS-y := buildtools lib drivers app
+ROOTDIRS- := test
include $(RTE_SDK)/mk/rte.sdkroot.mk
diff --git a/MAINTAINERS b/MAINTAINERS
index 065397bf..afb4cab7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -21,16 +21,16 @@ Descriptions of section entries:
General Project Administration
------------------------------
-M: Thomas Monjalon <thomas.monjalon@6wind.com>
+M: Thomas Monjalon <thomas@monjalon.net>
T: git://dpdk.org/dpdk
F: MAINTAINERS
-F: scripts/check-maintainers.sh
-F: scripts/check-git-log.sh
-F: scripts/check-includes.sh
-F: scripts/checkpatches.sh
-F: scripts/git-log-fixes.sh
-F: scripts/load-devel-config
-F: scripts/test-build.sh
+F: devtools/check-maintainers.sh
+F: devtools/check-git-log.sh
+F: devtools/check-includes.sh
+F: devtools/checkpatches.sh
+F: devtools/git-log-fixes.sh
+F: devtools/load-devel-config
+F: devtools/test-build.sh
Stable Branches
---------------
@@ -50,17 +50,16 @@ F: doc/
Build System
------------
-M: Thomas Monjalon <thomas.monjalon@6wind.com>
+M: Thomas Monjalon <thomas@monjalon.net>
F: GNUmakefile
F: Makefile
F: config/
F: mk/
F: pkg/
-F: scripts/auto-config-h.sh
-F: scripts/depdirs-rule.sh
-F: scripts/gen-build-mk.sh
-F: scripts/gen-config-h.sh
-F: scripts/relpath.sh
+F: buildtools/auto-config-h.sh
+F: buildtools/gen-build-mk.sh
+F: buildtools/gen-config-h.sh
+F: buildtools/relpath.sh
F: doc/build-sdk-quick.txt
F: doc/guides/prog_guide/build_app.rst
F: doc/guides/prog_guide/dev_kit_*
@@ -70,11 +69,11 @@ ABI versioning
M: Neil Horman <nhorman@tuxdriver.com>
F: lib/librte_compat/
F: doc/guides/rel_notes/deprecation.rst
-F: scripts/validate-abi.sh
+F: devtools/validate-abi.sh
Driver information
F: buildtools/pmdinfogen/
-F: tools/dpdk-pmdinfo.py
+F: usertools/dpdk-pmdinfo.py
F: doc/guides/tools/pmdinfo.rst
@@ -82,33 +81,30 @@ Environment Abstraction Layer
-----------------------------
EAL API and common code
-M: David Marchand <david.marchand@6wind.com>
F: lib/librte_eal/common/*
F: lib/librte_eal/common/include/*
F: lib/librte_eal/common/include/generic/
F: doc/guides/prog_guide/env_abstraction_layer.rst
-F: app/test/test_alarm.c
-F: app/test/test_atomic.c
-F: app/test/test_byteorder.c
-F: app/test/test_common.c
-F: app/test/test_cpuflags.c
-F: app/test/test_cycles.c
-F: app/test/test_debug.c
-F: app/test/test_devargs.c
-F: app/test/test_eal*
-F: app/test/test_errno.c
-F: app/test/test_interrupts.c
-F: app/test/test_logs.c
-F: app/test/test_memcpy*
-F: app/test/test_pci.c
-F: app/test/test_pci_sysfs/
-F: app/test/test_per_lcore.c
-F: app/test/test_prefetch.c
-F: app/test/test_rwlock.c
-F: app/test/test_spinlock.c
-F: app/test/test_string_fns.c
-F: app/test/test_tailq.c
-F: app/test/test_version.c
+F: test/test/test_alarm.c
+F: test/test/test_atomic.c
+F: test/test/test_byteorder.c
+F: test/test/test_common.c
+F: test/test/test_cpuflags.c
+F: test/test/test_cycles.c
+F: test/test/test_debug.c
+F: test/test/test_devargs.c
+F: test/test/test_eal*
+F: test/test/test_errno.c
+F: test/test/test_interrupts.c
+F: test/test/test_logs.c
+F: test/test/test_memcpy*
+F: test/test/test_per_lcore.c
+F: test/test/test_prefetch.c
+F: test/test/test_rwlock.c
+F: test/test/test_spinlock.c
+F: test/test/test_string_fns.c
+F: test/test/test_tailq.c
+F: test/test/test_version.c
Memory Allocation
M: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
@@ -118,10 +114,10 @@ F: lib/librte_eal/common/*malloc*
F: lib/librte_eal/common/eal_common_mem*
F: lib/librte_eal/common/eal_hugepages.h
F: doc/guides/prog_guide/env_abstraction_layer.rst
-F: app/test/test_func_reentrancy.c
-F: app/test/test_malloc.c
-F: app/test/test_memory.c
-F: app/test/test_memzone.c
+F: test/test/test_func_reentrancy.c
+F: test/test/test_malloc.c
+F: test/test/test_memory.c
+F: test/test/test_memzone.c
Keep alive
M: Remy Horton <remy.horton@intel.com>
@@ -134,7 +130,7 @@ Secondary process
M: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
K: RTE_PROC_
F: doc/guides/prog_guide/multi_proc_support.rst
-F: app/test/test_mp_secondary.c
+F: test/test/test_mp_secondary.c
F: examples/multi_process/
F: doc/guides/sample_app_ug/multi_process.rst
@@ -155,17 +151,11 @@ F: drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
F: drivers/net/i40e/i40e_rxtx_vec_neon.c
F: drivers/net/virtio/virtio_rxtx_simple_neon.c
-EZchip TILE-Gx
-M: Zhigang Lu <zlu@ezchip.com>
-M: Liming Sun <lsun@ezchip.com>
-F: lib/librte_eal/common/arch/tile/
-F: lib/librte_eal/common/include/arch/tile/
-F: drivers/net/mpipe/
-
IBM POWER
M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
F: lib/librte_eal/common/arch/ppc_64/
F: lib/librte_eal/common/include/arch/ppc_64/
+F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
Intel x86
M: Bruce Richardson <bruce.richardson@intel.com>
@@ -174,7 +164,6 @@ F: lib/librte_eal/common/arch/x86/
F: lib/librte_eal/common/include/arch/x86/
Linux EAL (with overlaps)
-M: David Marchand <david.marchand@6wind.com>
F: lib/librte_eal/linuxapp/Makefile
F: lib/librte_eal/linuxapp/eal/
F: doc/guides/linux_gsg/
@@ -196,6 +185,7 @@ F: lib/librte_eal/linuxapp/eal/include/exec-env/rte_dom0_common.h
F: drivers/net/xenvirt/
F: doc/guides/xen/
F: examples/vhost_xen/
+F: doc/guides/nics/features/xenvirt.ini
FreeBSD EAL (with overlaps)
M: Bruce Richardson <bruce.richardson@intel.com>
@@ -221,106 +211,143 @@ Core Libraries
Memory pool
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_mempool/
+F: drivers/mempool/Makefile
+F: drivers/mempool/ring/
+F: drivers/mempool/stack/
F: doc/guides/prog_guide/mempool_lib.rst
-F: app/test/test_mempool*
-F: app/test/test_func_reentrancy.c
+F: test/test/test_mempool*
+F: test/test/test_func_reentrancy.c
Ring queue
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_ring/
F: doc/guides/prog_guide/ring_lib.rst
-F: app/test/test_ring*
-F: app/test/test_func_reentrancy.c
+F: test/test/test_ring*
+F: test/test/test_func_reentrancy.c
Packet buffer
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_mbuf/
F: doc/guides/prog_guide/mbuf_lib.rst
-F: app/test/test_mbuf.c
+F: test/test/test_mbuf.c
Ethernet API
-M: Thomas Monjalon <thomas.monjalon@6wind.com>
+M: Thomas Monjalon <thomas@monjalon.net>
F: lib/librte_ether/
-F: scripts/test-null.sh
+F: devtools/test-null.sh
+
+Flow API
+M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
+F: lib/librte_ether/rte_flow*
Crypto API
M: Declan Doherty <declan.doherty@intel.com>
F: lib/librte_cryptodev/
-F: app/test/test_cryptodev*
+F: test/test/test_cryptodev*
F: examples/l2fwd-crypto/
+Eventdev API - EXPERIMENTAL
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+F: lib/librte_eventdev/
+F: drivers/event/skeleton/
+F: test/test/test_eventdev.c
+
Networking Drivers
------------------
+M: Ferruh Yigit <ferruh.yigit@intel.com>
+T: git://dpdk.org/next/dpdk-next-net
+F: doc/guides/nics/features/default.ini
Link bonding
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/net/bonding/
F: doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
-F: app/test/test_link_bonding*
+F: test/test/test_link_bonding*
F: examples/bond/
+F: doc/guides/nics/features/bonding.ini
Linux KNI
M: Ferruh Yigit <ferruh.yigit@intel.com>
F: lib/librte_eal/linuxapp/kni/
F: lib/librte_kni/
F: doc/guides/prog_guide/kernel_nic_interface.rst
-F: app/test/test_kni.c
+F: test/test/test_kni.c
F: examples/kni/
F: doc/guides/sample_app_ug/kernel_nic_interface.rst
Linux AF_PACKET
M: John W. Linville <linville@tuxdriver.com>
F: drivers/net/af_packet/
+F: doc/guides/nics/features/afpacket.ini
Amazon ENA
-M: Jan Medala <jan@semihalf.com>
-M: Jakub Palider <jpa@semihalf.com>
-M: Netanel Belgazal <netanel@amazon.com>
+M: Marcin Wojtas <mw@semihalf.com>
+M: Michal Krawczyk <mk@semihalf.com>
+M: Guy Tzalik <gtzalik@amazon.com>
M: Evgeny Schemeilin <evgenys@amazon.com>
F: drivers/net/ena/
F: doc/guides/nics/ena.rst
+F: doc/guides/nics/features/ena.ini
+
+Atomic Rules ARK
+M: Shepard Siegel <shepard.siegel@atomicrules.com>
+M: Ed Czeck <ed.czeck@atomicrules.com>
+M: John Miller <john.miller@atomicrules.com>
+F: drivers/net/ark/
+F: doc/guides/nics/ark.rst
+F: doc/guides/nics/features/ark.ini
Broadcom bnxt
M: Stephen Hurd <stephen.hurd@broadcom.com>
M: Ajit Khaparde <ajit.khaparde@broadcom.com>
F: drivers/net/bnxt/
F: doc/guides/nics/bnxt.rst
+F: doc/guides/nics/features/bnxt.ini
Cavium ThunderX nicvf
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
M: Maciej Czekaj <maciej.czekaj@caviumnetworks.com>
F: drivers/net/thunderx/
F: doc/guides/nics/thunderx.rst
+F: doc/guides/nics/features/thunderx.ini
+
+Cavium LiquidIO
+M: Shijith Thotton <shijith.thotton@cavium.com>
+M: Srisivasubramanian Srinivasan <ssrinivasan@cavium.com>
+F: drivers/net/liquidio/
+F: doc/guides/nics/liquidio.rst
+F: doc/guides/nics/features/liquidio.ini
Chelsio cxgbe
M: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
F: drivers/net/cxgbe/
F: doc/guides/nics/cxgbe.rst
+F: doc/guides/nics/features/cxgbe.ini
Cisco enic
M: John Daley <johndale@cisco.com>
M: Nelson Escobar <neescoba@cisco.com>
F: drivers/net/enic/
F: doc/guides/nics/enic.rst
-
-Combo szedata2
-M: Matej Vido <matejvido@gmail.com>
-F: drivers/net/szedata2/
-F: doc/guides/nics/szedata2.rst
+F: doc/guides/nics/features/enic.ini
Intel e1000
M: Wenzhuo Lu <wenzhuo.lu@intel.com>
F: drivers/net/e1000/
F: doc/guides/nics/e1000em.rst
F: doc/guides/nics/intel_vf.rst
+F: doc/guides/nics/features/e1000.ini
+F: doc/guides/nics/features/igb*.ini
Intel ixgbe
-M: Helin Zhang <helin.zhang@intel.com>
+M: Wenzhuo Lu <wenzhuo.lu@intel.com>
M: Konstantin Ananyev <konstantin.ananyev@intel.com>
F: drivers/net/ixgbe/
F: doc/guides/nics/ixgbe.rst
F: doc/guides/nics/intel_vf.rst
+F: doc/guides/nics/features/ixgbe*.ini
Intel i40e
M: Helin Zhang <helin.zhang@intel.com>
@@ -328,94 +355,162 @@ M: Jingjing Wu <jingjing.wu@intel.com>
F: drivers/net/i40e/
F: doc/guides/nics/i40e.rst
F: doc/guides/nics/intel_vf.rst
+F: doc/guides/nics/features/i40e*.ini
Intel fm10k
M: Jing Chen <jing.d.chen@intel.com>
F: drivers/net/fm10k/
+F: doc/guides/nics/features/fm10k*.ini
Mellanox mlx4
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
+M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
F: drivers/net/mlx4/
F: doc/guides/nics/mlx4.rst
+F: doc/guides/nics/features/mlx4.ini
Mellanox mlx5
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
+M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
F: drivers/net/mlx5/
F: doc/guides/nics/mlx5.rst
+F: doc/guides/nics/features/mlx5.ini
+
+Netcope szedata2
+M: Matej Vido <vido@cesnet.cz>
+F: drivers/net/szedata2/
+F: doc/guides/nics/szedata2.rst
+F: doc/guides/nics/features/szedata2.ini
Netronome nfp
M: Alejandro Lucero <alejandro.lucero@netronome.com>
F: drivers/net/nfp/
F: doc/guides/nics/nfp.rst
+F: doc/guides/nics/features/nfp.ini
+
+NXP dpaa2
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+M: Shreyansh Jain <shreyansh.jain@nxp.com>
+F: drivers/bus/fslmc/
+F: drivers/mempool/dpaa2/
+F: drivers/net/dpaa2/
+F: doc/guides/nics/dpaa2.rst
+F: doc/guides/nics/features/dpaa2.ini
QLogic bnx2x
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Harish Patil <harish.patil@qlogic.com>
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
F: drivers/net/bnx2x/
F: doc/guides/nics/bnx2x.rst
+F: doc/guides/nics/features/bnx2x*.ini
QLogic qede PMD
-M: Harish Patil <harish.patil@qlogic.com>
-M: Rasesh Mody <rasesh.mody@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
F: drivers/net/qede/
F: doc/guides/nics/qede.rst
+F: doc/guides/nics/features/qede*.ini
-RedHat virtio
-M: Huawei Xie <huawei.xie@intel.com>
+Solarflare sfc_efx
+M: Andrew Rybchenko <arybchenko@solarflare.com>
+F: drivers/net/sfc/
+F: doc/guides/nics/sfc_efx.rst
+F: doc/guides/nics/features/sfc_efx.ini
+
+VMware vmxnet3
+M: Shrikrishna Khare <skhare@vmware.com>
+F: drivers/net/vmxnet3/
+F: doc/guides/nics/vmxnet3.rst
+F: doc/guides/nics/features/vmxnet3.ini
+
+Vhost-user
M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Maxime Coquelin <maxime.coquelin@redhat.com>
T: git://dpdk.org/next/dpdk-next-virtio
-F: drivers/net/virtio/
-F: doc/guides/nics/virtio.rst
F: lib/librte_vhost/
F: doc/guides/prog_guide/vhost_lib.rst
F: examples/vhost/
F: doc/guides/sample_app_ug/vhost.rst
-VMware vmxnet3
-M: Yong Wang <yongwang@vmware.com>
-F: drivers/net/vmxnet3/
-F: doc/guides/nics/vmxnet3.rst
-
Vhost PMD
M: Tetsuya Mukawa <mtetsuyah@gmail.com>
M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Maxime Coquelin <maxime.coquelin@redhat.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: drivers/net/vhost/
+F: doc/guides/nics/features/vhost.ini
+
+Virtio PMD
+M: Yuanhan Liu <yuanhan.liu@linux.intel.com>
+M: Maxime Coquelin <maxime.coquelin@redhat.com>
+T: git://dpdk.org/next/dpdk-next-virtio
+F: drivers/net/virtio/
+F: doc/guides/nics/virtio.rst
+F: doc/guides/nics/features/virtio*.ini
+
+Wind River AVP
+M: Allain Legacy <allain.legacy@windriver.com>
+M: Matt Peters <matt.peters@windriver.com>
+F: drivers/net/avp/
+F: doc/guides/nics/avp.rst
+F: doc/guides/nics/features/avp.ini
PCAP PMD
-M: Nicolás Pernas Maradei <nicolas.pernas.maradei@emutex.com>
M: Ferruh Yigit <ferruh.yigit@intel.com>
F: drivers/net/pcap/
F: doc/guides/nics/pcap_ring.rst
+F: doc/guides/nics/features/pcap.ini
+
+Tap PMD
+M: Pascal Mazon <pascal.mazon@6wind.com>
+F: drivers/net/tap/
+F: doc/guides/nics/tap.rst
+F: doc/guides/nics/features/tap.ini
+
+KNI PMD
+M: Ferruh Yigit <ferruh.yigit@intel.com>
+F: drivers/net/kni/
+F: doc/guides/nics/kni.rst
+F: doc/guides/nics/features/kni.ini
Ring PMD
M: Bruce Richardson <bruce.richardson@intel.com>
F: drivers/net/ring/
F: doc/guides/nics/pcap_ring.rst
-F: app/test/test_pmd_ring.c
-F: app/test/test_pmd_ring_perf.c
+F: test/test/test_pmd_ring.c
+F: test/test/test_pmd_ring_perf.c
+F: doc/guides/nics/features/ring.ini
Null Networking PMD
M: Tetsuya Mukawa <mtetsuyah@gmail.com>
F: drivers/net/null/
+F: doc/guides/nics/features/null.ini
Crypto Drivers
--------------
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
T: git://dpdk.org/next/dpdk-next-crypto
+F: doc/guides/cryptodevs/features/default.ini
+
+ARMv8 Crypto PMD
+M: Zbigniew Bodek <zbigniew.bodek@caviumnetworks.com>
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+F: drivers/crypto/armv8/
+F: doc/guides/cryptodevs/armv8.rst
+F: doc/guides/cryptodevs/features/armv8.ini
Intel AES-NI GCM PMD
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/aesni_gcm/
F: doc/guides/cryptodevs/aesni_gcm.rst
+F: doc/guides/cryptodevs/features/aesni_gcm.ini
Intel AES-NI Multi-Buffer
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/aesni_mb/
F: doc/guides/cryptodevs/aesni_mb.rst
+F: doc/guides/cryptodevs/features/aesni_mb.ini
Intel QuickAssist
M: John Griffin <john.griffin@intel.com>
@@ -423,31 +518,67 @@ M: Fiona Trahe <fiona.trahe@intel.com>
M: Deepak Kumar Jain <deepak.k.jain@intel.com>
F: drivers/crypto/qat/
F: doc/guides/cryptodevs/qat.rst
+F: doc/guides/cryptodevs/features/qat.ini
+
+NXP DPAA2_SEC
+M: Akhil Goyal <akhil.goyal@nxp.com>
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+F: drivers/crypto/dpaa2_sec/
+F: doc/guides/cryptodevs/dpaa2_sec.rst
+F: doc/guides/cryptodevs/features/dpaa2_sec.ini
SNOW 3G PMD
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/snow3g/
F: doc/guides/cryptodevs/snow3g.rst
+F: doc/guides/cryptodevs/features/snow3g.ini
KASUMI PMD
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/kasumi/
F: doc/guides/cryptodevs/kasumi.rst
+F: doc/guides/cryptodevs/features/kasumi.ini
ZUC PMD
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/zuc/
F: doc/guides/cryptodevs/zuc.rst
+F: doc/guides/cryptodevs/features/zuc.ini
OpenSSL PMD
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/openssl/
F: doc/guides/cryptodevs/openssl.rst
+F: doc/guides/cryptodevs/features/openssl.ini
Null Crypto PMD
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/null/
F: doc/guides/cryptodevs/null.rst
+F: doc/guides/cryptodevs/features/null.ini
+
+Crypto Scheduler PMD
+M: Fan Zhang <roy.fan.zhang@intel.com>
+F: drivers/crypto/scheduler/
+F: doc/guides/cryptodevs/scheduler.rst
+
+Eventdev Drivers
+----------------
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+
+Cavium OCTEONTX ssovf
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+M: Santosh Shukla <santosh.shukla@caviumnetworks.com>
+F: drivers/event/octeontx/
+F: test/test/test_eventdev_octeontx.c
+F: doc/guides/eventdevs/octeontx.rst
+
+Software Eventdev PMD
+M: Harry van Haaren <harry.van.haaren@intel.com>
+F: drivers/event/sw/
+F: test/test/test_eventdev_sw.c
+F: doc/guides/eventdevs/sw.rst
Packet processing
@@ -457,6 +588,12 @@ Network headers
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_net/
+Packet CRC
+M: Jasvinder Singh <jasvinder.singh@intel.com>
+F: lib/librte_net/rte_net_crc*
+F: lib/librte_net/net_crc_sse.h
+F: test/test/test_crc.c
+
IP fragmentation & reassembly
M: Konstantin Ananyev <konstantin.ananyev@intel.com>
F: lib/librte_ip_frag/
@@ -468,9 +605,10 @@ F: doc/guides/sample_app_ug/ip_reassembly.rst
Distributor
M: Bruce Richardson <bruce.richardson@intel.com>
+M: David Hunt <david.hunt@intel.com>
F: lib/librte_distributor/
F: doc/guides/prog_guide/packet_distrib_lib.rst
-F: app/test/test_distributor*
+F: test/test/test_distributor*
F: examples/distributor/
F: doc/guides/sample_app_ug/dist_app.rst
@@ -478,7 +616,7 @@ Reorder
M: Reshma Pattan <reshma.pattan@intel.com>
F: lib/librte_reorder/
F: doc/guides/prog_guide/reorder_lib.rst
-F: app/test/test_reorder*
+F: test/test/test_reorder*
F: examples/packet_ordering/
F: doc/guides/sample_app_ug/packet_ordering.rst
@@ -486,8 +624,8 @@ Hierarchical scheduler
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_sched/
F: doc/guides/prog_guide/qos_framework.rst
-F: app/test/test_red.c
-F: app/test/test_sched.c
+F: test/test/test_red.c
+F: test/test/test_sched.c
F: examples/qos_sched/
F: doc/guides/sample_app_ug/qos_scheduler.rst
@@ -506,8 +644,8 @@ F: lib/librte_pipeline/
F: lib/librte_port/
F: lib/librte_table/
F: doc/guides/prog_guide/packet_framework.rst
-F: app/test/test_table*
-F: app/test-pipeline/
+F: test/test/test_table*
+F: test/test-pipeline/
F: doc/guides/sample_app_ug/test_pipeline.rst
F: examples/ip_pipeline/
F: doc/guides/sample_app_ug/ip_pipeline.rst
@@ -520,32 +658,41 @@ ACL
M: Konstantin Ananyev <konstantin.ananyev@intel.com>
F: lib/librte_acl/
F: doc/guides/prog_guide/packet_classif_access_ctrl.rst
-F: app/test-acl/
-F: app/test/test_acl.*
+F: test/test-acl/
+F: test/test/test_acl.*
F: examples/l3fwd-acl/
F: doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
+EFD
+M: Byron Marohn <byron.marohn@intel.com>
+M: Pablo de Lara Guarch <pablo.de.lara.guarch@intel.com>
+F: lib/librte_efd/
+F: doc/guides/prog_guide/efd_lib.rst
+F: test/test/test_efd*
+F: examples/server_node_efd/
+F: doc/guides/sample_app_ug/server_node_efd.rst
+
Hashes
M: Bruce Richardson <bruce.richardson@intel.com>
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: lib/librte_hash/
F: doc/guides/prog_guide/hash_lib.rst
-F: app/test/test_*hash*
-F: app/test/test_func_reentrancy.c
+F: test/test/test_*hash*
+F: test/test/test_func_reentrancy.c
LPM
M: Bruce Richardson <bruce.richardson@intel.com>
F: lib/librte_lpm/
F: doc/guides/prog_guide/lpm*
-F: app/test/test_lpm*
-F: app/test/test_func_reentrancy.c
-F: app/test/test_xmmt_ops.h
+F: test/test/test_lpm*
+F: test/test/test_func_reentrancy.c
+F: test/test/test_xmmt_ops.h
Traffic metering
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_meter/
F: doc/guides/sample_app_ug/qos_scheduler.rst
-F: app/test/test_meter.c
+F: test/test/test_meter.c
F: examples/qos_meter/
F: doc/guides/sample_app_ug/qos_metering.rst
@@ -556,24 +703,26 @@ Other libraries
Configuration file
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: lib/librte_cfgfile/
+F: test/test/test_cfgfile.c
+F: test/test/test_cfgfiles/
Interactive command line
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_cmdline/
-F: app/cmdline_test/
-F: app/test/test_cmdline*
+F: test/cmdline_test/
+F: test/test/test_cmdline*
F: examples/cmdline/
F: doc/guides/sample_app_ug/cmd_line.rst
Key/Value parsing
M: Olivier Matz <olivier.matz@6wind.com>
F: lib/librte_kvargs/
-F: app/test/test_kvargs.c
+F: test/test/test_kvargs.c
Power management
F: lib/librte_power/
F: doc/guides/prog_guide/power_man.rst
-F: app/test/test_power*
+F: test/test/test_power*
F: examples/l3fwd-power/
F: doc/guides/sample_app_ug/l3_forward_power_man.rst
F: examples/vm_power_manager/
@@ -583,7 +732,7 @@ Timers
M: Robert Sanford <rsanford@akamai.com>
F: lib/librte_timer/
F: doc/guides/prog_guide/timer_lib.rst
-F: app/test/test_timer*
+F: test/test/test_timer*
F: examples/timer/
F: doc/guides/sample_app_ug/timer.rst
@@ -593,32 +742,51 @@ F: lib/librte_jobstats/
F: examples/l2fwd-jobstats/
F: doc/guides/sample_app_ug/l2_forward_job_stats.rst
+Metrics
+M: Remy Horton <remy.horton@intel.com>
+F: lib/librte_metrics/
+
+Bit-rate statistics
+M: Remy Horton <remy.horton@intel.com>
+F: lib/librte_bitratestats/
+
+Latency statistics
+M: Reshma Pattan <reshma.pattan@intel.com>
+F: lib/librte_latencystats/
+
Test Applications
-----------------
Unit tests framework
-F: app/test/autotest*
-F: app/test/commands.c
-F: app/test/packet_burst_generator.c
-F: app/test/packet_burst_generator.h
-F: app/test/process.h
-F: app/test/resource.*
-F: app/test/test.c
-F: app/test/test.h
-F: app/test/test_pmd_perf.c
-F: app/test/test_resource.c
-F: app/test/virtual_pmd.c
-F: app/test/virtual_pmd.h
+F: test/Makefile
+F: test/test/Makefile
+F: test/test/autotest*
+F: test/test/commands.c
+F: test/test/packet_burst_generator.c
+F: test/test/packet_burst_generator.h
+F: test/test/process.h
+F: test/test/resource.*
+F: test/test/test.c
+F: test/test/test.h
+F: test/test/test_pmd_perf.c
+F: test/test/test_resource.c
+F: test/test/virtual_pmd.c
+F: test/test/virtual_pmd.h
Driver testing tool
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+M: Jingjing Wu <jingjing.wu@intel.com>
F: app/test-pmd/
F: doc/guides/testpmd_app_ug/
-Dump tool
+Crypto performance test application
+M: Declan Doherty <declan.doherty@intel.com>
+F: app/test-crypto-perf/
+F: doc/guides/tools/cryptoperf.rst
+
+Procinfo tool
M: Maryam Tahhan <maryam.tahhan@intel.com>
-M: John McNamara <john.mcnamara@intel.com>
+M: Reshma Pattan <reshma.pattan@intel.com>
F: app/proc_info/
F: doc/guides/tools/proc_info.rst
@@ -626,11 +794,6 @@ F: doc/guides/tools/proc_info.rst
Other Example Applications
--------------------------
-M: Bruce Richardson <bruce.richardson@intel.com>
-M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
-F: examples/dpdk_qat/
-F: doc/guides/sample_app_ug/intel_quickassist.rst
-
M: Remy Horton <remy.horton@intel.com>
F: examples/ethtool/
F: doc/guides/sample_app_ug/ethtool.rst
@@ -675,7 +838,7 @@ F: examples/netmap_compat/
F: doc/guides/sample_app_ug/netmap_compatibility.rst
L-threads - EXPERIMENTAL
-M: Ian Betts <ian.betts@intel.com>
+M: John McNamara <john.mcnamara@intel.com>
F: examples/performance-thread/
F: doc/guides/sample_app_ug/performance_thread.rst
diff --git a/app/Makefile b/app/Makefile
index 30ec292a..c3aeebf6 100644
--- a/app/Makefile
+++ b/app/Makefile
@@ -31,12 +31,12 @@
include $(RTE_SDK)/mk/rte.vars.mk
-DIRS-$(CONFIG_RTE_APP_TEST) += test
-DIRS-$(CONFIG_RTE_LIBRTE_ACL) += test-acl
-DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
-DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += proc_info
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+DIRS-$(CONFIG_RTE_APP_CRYPTO_PERF) += test-crypto-perf
+endif
+
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/cmdline_test/cmdline_test_data.py b/app/cmdline_test/cmdline_test_data.py
deleted file mode 100644
index b1945a57..00000000
--- a/app/cmdline_test/cmdline_test_data.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# collection of static data
-
-import sys
-
-# keycode constants
-CTRL_A = chr(1)
-CTRL_B = chr(2)
-CTRL_C = chr(3)
-CTRL_D = chr(4)
-CTRL_E = chr(5)
-CTRL_F = chr(6)
-CTRL_K = chr(11)
-CTRL_L = chr(12)
-CTRL_N = chr(14)
-CTRL_P = chr(16)
-CTRL_W = chr(23)
-CTRL_Y = chr(25)
-ALT_B = chr(27) + chr(98)
-ALT_D = chr(27) + chr(100)
-ALT_F = chr(27) + chr(102)
-ALT_BKSPACE = chr(27) + chr(127)
-DEL = chr(27) + chr(91) + chr(51) + chr(126)
-TAB = chr(9)
-HELP = chr(63)
-BKSPACE = chr(127)
-RIGHT = chr(27) + chr(91) + chr(67)
-DOWN = chr(27) + chr(91) + chr(66)
-LEFT = chr(27) + chr(91) + chr(68)
-UP = chr(27) + chr(91) + chr(65)
-ENTER2 = '\r'
-ENTER = '\n'
-
-# expected result constants
-NOT_FOUND = "Command not found"
-BAD_ARG = "Bad arguments"
-AMBIG = "Ambiguous command"
-CMD1 = "Command 1 parsed!"
-CMD2 = "Command 2 parsed!"
-SINGLE = "Single word command parsed!"
-SINGLE_LONG = "Single long word command parsed!"
-AUTO1 = "Autocomplete command 1 parsed!"
-AUTO2 = "Autocomplete command 2 parsed!"
-
-# misc defines
-CMD_QUIT = "quit"
-CMD_GET_BUFSIZE = "get_history_bufsize"
-BUFSIZE_TEMPLATE = "History buffer size: "
-PROMPT = "CMDLINE_TEST>>"
-
-# test defines
-# each test tests progressively diverse set of keys. this way for example
-# if we want to use some key sequence in the test, we first need to test
-# that it itself does what it is expected to do. Most of the tests are
-# designed that way.
-#
-# example: "arrows & delete test 1". we enter a partially valid command,
-# then move 3 chars left and use delete three times. this way we get to
-# know that "delete", "left" and "ctrl+B" all work (because if any of
-# them fails, the whole test will fail and next tests won't be run).
-#
-# each test consists of name, character sequence to send to child,
-# and expected output (if any).
-
-tests = [
-# test basic commands
- {"Name" : "command test 1",
- "Sequence" : "ambiguous first" + ENTER,
- "Result" : CMD1},
- {"Name" : "command test 2",
- "Sequence" : "ambiguous second" + ENTER,
- "Result" : CMD2},
- {"Name" : "command test 3",
- "Sequence" : "ambiguous ambiguous" + ENTER,
- "Result" : AMBIG},
- {"Name" : "command test 4",
- "Sequence" : "ambiguous ambiguous2" + ENTER,
- "Result" : AMBIG},
-
- {"Name" : "invalid command test 1",
- "Sequence" : "ambiguous invalid" + ENTER,
- "Result" : BAD_ARG},
-# test invalid commands
- {"Name" : "invalid command test 2",
- "Sequence" : "invalid" + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "invalid command test 3",
- "Sequence" : "ambiguousinvalid" + ENTER2,
- "Result" : NOT_FOUND},
-
-# test arrows and deletes
- {"Name" : "arrows & delete test 1",
- "Sequence" : "singlebad" + LEFT*2 + CTRL_B + DEL*3 + ENTER,
- "Result" : SINGLE},
- {"Name" : "arrows & delete test 2",
- "Sequence" : "singlebad" + LEFT*5 + RIGHT + CTRL_F + DEL*3 + ENTER,
- "Result" : SINGLE},
-
-# test backspace
- {"Name" : "backspace test",
- "Sequence" : "singlebad" + BKSPACE*3 + ENTER,
- "Result" : SINGLE},
-
-# test goto left and goto right
- {"Name" : "goto left test",
- "Sequence" : "biguous first" + CTRL_A + "am" + ENTER,
- "Result" : CMD1},
- {"Name" : "goto right test",
- "Sequence" : "biguous fir" + CTRL_A + "am" + CTRL_E + "st" + ENTER,
- "Result" : CMD1},
-
-# test goto words
- {"Name" : "goto left word test",
- "Sequence" : "ambiguous st" + ALT_B + "fir" + ENTER,
- "Result" : CMD1},
- {"Name" : "goto right word test",
- "Sequence" : "ambig first" + CTRL_A + ALT_F + "uous" + ENTER,
- "Result" : CMD1},
-
-# test removing words
- {"Name" : "remove left word 1",
- "Sequence" : "single invalid" + CTRL_W + ENTER,
- "Result" : SINGLE},
- {"Name" : "remove left word 2",
- "Sequence" : "single invalid" + ALT_BKSPACE + ENTER,
- "Result" : SINGLE},
- {"Name" : "remove right word",
- "Sequence" : "single invalid" + ALT_B + ALT_D + ENTER,
- "Result" : SINGLE},
-
-# test kill buffer (copy and paste)
- {"Name" : "killbuffer test 1",
- "Sequence" : "ambiguous" + CTRL_A + CTRL_K + " first" + CTRL_A + CTRL_Y + ENTER,
- "Result" : CMD1},
- {"Name" : "killbuffer test 2",
- "Sequence" : "ambiguous" + CTRL_A + CTRL_K + CTRL_Y*26 + ENTER,
- "Result" : NOT_FOUND},
-
-# test newline
- {"Name" : "newline test",
- "Sequence" : "invalid" + CTRL_C + "single" + ENTER,
- "Result" : SINGLE},
-
-# test redisplay (nothing should really happen)
- {"Name" : "redisplay test",
- "Sequence" : "single" + CTRL_L + ENTER,
- "Result" : SINGLE},
-
-# test autocomplete
- {"Name" : "autocomplete test 1",
- "Sequence" : "si" + TAB + ENTER,
- "Result" : SINGLE},
- {"Name" : "autocomplete test 2",
- "Sequence" : "si" + TAB + "_" + TAB + ENTER,
- "Result" : SINGLE_LONG},
- {"Name" : "autocomplete test 3",
- "Sequence" : "in" + TAB + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "autocomplete test 4",
- "Sequence" : "am" + TAB + ENTER,
- "Result" : BAD_ARG},
- {"Name" : "autocomplete test 5",
- "Sequence" : "am" + TAB + "fir" + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 6",
- "Sequence" : "am" + TAB + "fir" + TAB + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 7",
- "Sequence" : "am" + TAB + "fir" + TAB + " " + TAB + ENTER,
- "Result" : CMD1},
- {"Name" : "autocomplete test 8",
- "Sequence" : "am" + TAB + " am" + TAB + " " + ENTER,
- "Result" : AMBIG},
- {"Name" : "autocomplete test 9",
- "Sequence" : "am" + TAB + "inv" + TAB + ENTER,
- "Result" : BAD_ARG},
- {"Name" : "autocomplete test 10",
- "Sequence" : "au" + TAB + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "autocomplete test 11",
- "Sequence" : "au" + TAB + "1" + ENTER,
- "Result" : AUTO1},
- {"Name" : "autocomplete test 12",
- "Sequence" : "au" + TAB + "2" + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 13",
- "Sequence" : "au" + TAB + "2" + TAB + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 14",
- "Sequence" : "au" + TAB + "2 " + TAB + ENTER,
- "Result" : AUTO2},
- {"Name" : "autocomplete test 15",
- "Sequence" : "24" + TAB + ENTER,
- "Result" : "24"},
-
-# test history
- {"Name" : "history test 1",
- "Sequence" : "invalid" + ENTER + "single" + ENTER + "invalid" + ENTER + UP + CTRL_P + ENTER,
- "Result" : SINGLE},
- {"Name" : "history test 2",
- "Sequence" : "invalid" + ENTER + "ambiguous first" + ENTER + "invalid" + ENTER + "single" + ENTER + UP * 3 + CTRL_N + DOWN + ENTER,
- "Result" : SINGLE},
-
-#
-# tests that improve coverage
-#
-
-# empty space tests
- {"Name" : "empty space test 1",
- "Sequence" : RIGHT + LEFT + CTRL_B + CTRL_F + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 2",
- "Sequence" : BKSPACE + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 3",
- "Sequence" : CTRL_E*2 + CTRL_A*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 4",
- "Sequence" : ALT_F*2 + ALT_B*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 5",
- "Sequence" : " " + CTRL_E*2 + CTRL_A*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 6",
- "Sequence" : " " + CTRL_A + ALT_F*2 + ALT_B*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 7",
- "Sequence" : " " + CTRL_A + CTRL_D + CTRL_E + CTRL_D + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 8",
- "Sequence" : " space" + CTRL_W*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 9",
- "Sequence" : " space" + ALT_BKSPACE*2 + ENTER,
- "Result" : PROMPT},
- {"Name" : "empty space test 10",
- "Sequence" : " space " + CTRL_A + ALT_D*3 + ENTER,
- "Result" : PROMPT},
-
-# non-printable char tests
- {"Name" : "non-printable test 1",
- "Sequence" : chr(27) + chr(47) + ENTER,
- "Result" : PROMPT},
- {"Name" : "non-printable test 2",
- "Sequence" : chr(27) + chr(128) + ENTER*7,
- "Result" : PROMPT},
- {"Name" : "non-printable test 3",
- "Sequence" : chr(27) + chr(91) + chr(127) + ENTER*6,
- "Result" : PROMPT},
-
-# miscellaneous tests
- {"Name" : "misc test 1",
- "Sequence" : ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 2",
- "Sequence" : "single #comment" + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 3",
- "Sequence" : "#empty line" + ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 4",
- "Sequence" : " single " + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 5",
- "Sequence" : "single#" + ENTER,
- "Result" : SINGLE},
- {"Name" : "misc test 6",
- "Sequence" : 'a' * 257 + ENTER,
- "Result" : NOT_FOUND},
- {"Name" : "misc test 7",
- "Sequence" : "clear_history" + UP*5 + DOWN*5 + ENTER,
- "Result" : PROMPT},
- {"Name" : "misc test 8",
- "Sequence" : "a" + HELP + CTRL_C,
- "Result" : PROMPT},
- {"Name" : "misc test 9",
- "Sequence" : CTRL_D*3,
- "Result" : None},
-]
diff --git a/app/pdump/Makefile b/app/pdump/Makefile
index 536198fa..38ac3e9a 100644
--- a/app/pdump/Makefile
+++ b/app/pdump/Makefile
@@ -41,9 +41,6 @@ CFLAGS += $(WERROR_FLAGS)
SRCS-y := main.c
-# this application needs libraries first
-DEPDIRS-y += lib
-
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/pdump/main.c b/app/pdump/main.c
index f3ef181f..3b13753d 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -92,7 +92,6 @@
#define BURST_SIZE 32
#define NUM_VDEVS 2
-#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
/* true if x is a power of 2 */
#define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -497,7 +496,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
/* first dequeue packets from ring of primary process */
const uint16_t nb_in_deq = rte_ring_dequeue_burst(ring,
- (void *)rxtx_bufs, BURST_SIZE);
+ (void *)rxtx_bufs, BURST_SIZE, NULL);
stats->dequeue_pkts += nb_in_deq;
if (nb_in_deq) {
diff --git a/app/proc_info/Makefile b/app/proc_info/Makefile
index e051e032..9e90438e 100644
--- a/app/proc_info/Makefile
+++ b/app/proc_info/Makefile
@@ -39,7 +39,4 @@ CFLAGS += $(WERROR_FLAGS)
SRCS-y := main.c
-# this application needs libraries first
-DEPDIRS-y += lib
-
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 2c56d106..d4f6a823 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,6 +40,7 @@
#include <sys/queue.h>
#include <stdlib.h>
#include <getopt.h>
+#include <unistd.h>
#include <rte_eal.h>
#include <rte_common.h>
@@ -57,23 +58,42 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_string_fns.h>
+#include <rte_metrics.h>
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64
#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+#define MAX_STRING_LEN 256
+
/**< mask of enabled ports */
static uint32_t enabled_port_mask;
/**< Enable stats. */
static uint32_t enable_stats;
/**< Enable xstats. */
static uint32_t enable_xstats;
+/**< Enable collectd format*/
+static uint32_t enable_collectd_format;
+/**< FD to send collectd format messages to STDOUT*/
+static int stdout_fd;
+/**< Host id process is running on */
+static char host_id[MAX_LONG_OPT_SZ];
+/**< Enable metrics. */
+static uint32_t enable_metrics;
/**< Enable stats reset. */
static uint32_t reset_stats;
/**< Enable xstats reset. */
static uint32_t reset_xstats;
/**< Enable memory info. */
static uint32_t mem_info;
+/**< Enable displaying xstat name. */
+static uint32_t enable_xstats_name;
+static char *xstats_name;
+
+/**< Enable xstats by ids. */
+#define MAX_NB_XSTATS_IDS 1024
+static uint32_t nb_xstats_ids;
+static uint64_t xstats_ids[MAX_NB_XSTATS_IDS];
/**< display usage */
static void
@@ -85,8 +105,15 @@ proc_info_usage(const char *prgname)
" --stats: to display port statistics, enabled by default\n"
" --xstats: to display extended port statistics, disabled by "
"default\n"
+ " --metrics: to display derived metrics of the ports, disabled by "
+ "default\n"
+ " --xstats-name NAME: to display single xstat id by NAME\n"
+ " --xstats-ids IDLIST: to display xstat values by id. "
+ "The argument is comma-separated list of xstat ids to print out.\n"
" --stats-reset: to reset port statistics\n"
- " --xstats-reset: to reset port extended statistics\n",
+ " --xstats-reset: to reset port extended statistics\n"
+ " --collectd-format: to print statistics to STDOUT in expected by collectd format\n"
+ " --host-id STRING: host id used to identify the system process is running on\n",
prgname);
}
@@ -116,6 +143,66 @@ parse_portmask(const char *portmask)
}
+/*
+ * Parse ids value list into array
+ */
+static int
+parse_xstats_ids(char *list, uint64_t *ids, int limit) {
+ int length;
+ char *token;
+ char *ctx = NULL;
+ char *endptr;
+
+ length = 0;
+ token = strtok_r(list, ",", &ctx);
+ while (token != NULL) {
+ ids[length] = strtoull(token, &endptr, 10);
+ if (*endptr != '\0')
+ return -EINVAL;
+
+ length++;
+ if (length >= limit)
+ return -E2BIG;
+
+ token = strtok_r(NULL, ",", &ctx);
+ }
+
+ return length;
+}
+
+static int
+proc_info_preparse_args(int argc, char **argv)
+{
+ char *prgname = argv[0];
+ int i;
+
+ for (i = 0; i < argc; i++) {
+ /* Print stats or xstats to STDOUT in collectd format */
+ if (!strncmp(argv[i], "--collectd-format", MAX_LONG_OPT_SZ)) {
+ enable_collectd_format = 1;
+ stdout_fd = dup(STDOUT_FILENO);
+ close(STDOUT_FILENO);
+ }
+ if (!strncmp(argv[i], "--host-id", MAX_LONG_OPT_SZ)) {
+ if ((i + 1) == argc) {
+ printf("Invalid host id or not specified\n");
+ proc_info_usage(prgname);
+ return -1;
+ }
+ strncpy(host_id, argv[i+1], sizeof(host_id));
+ }
+ }
+
+ if (!strlen(host_id)) {
+ int err = gethostname(host_id, MAX_LONG_OPT_SZ-1);
+
+ if (err)
+ strcpy(host_id, "unknown");
+ }
+
+ return 0;
+}
+
/* Parse the argument given in the command line of the application */
static int
proc_info_parse_args(int argc, char **argv)
@@ -127,7 +214,12 @@ proc_info_parse_args(int argc, char **argv)
{"stats", 0, NULL, 0},
{"stats-reset", 0, NULL, 0},
{"xstats", 0, NULL, 0},
+ {"metrics", 0, NULL, 0},
{"xstats-reset", 0, NULL, 0},
+ {"xstats-name", required_argument, NULL, 1},
+ {"collectd-format", 0, NULL, 0},
+ {"xstats-ids", 1, NULL, 1},
+ {"host-id", 0, NULL, 0},
{NULL, 0, 0, 0}
};
@@ -159,6 +251,10 @@ proc_info_parse_args(int argc, char **argv)
else if (!strncmp(long_option[option_index].name, "xstats",
MAX_LONG_OPT_SZ))
enable_xstats = 1;
+ else if (!strncmp(long_option[option_index].name,
+ "metrics",
+ MAX_LONG_OPT_SZ))
+ enable_metrics = 1;
/* Reset stats */
if (!strncmp(long_option[option_index].name, "stats-reset",
MAX_LONG_OPT_SZ))
@@ -168,7 +264,28 @@ proc_info_parse_args(int argc, char **argv)
MAX_LONG_OPT_SZ))
reset_xstats = 1;
break;
+ case 1:
+ /* Print xstat single value given by name*/
+ if (!strncmp(long_option[option_index].name,
+ "xstats-name", MAX_LONG_OPT_SZ)) {
+ enable_xstats_name = 1;
+ xstats_name = optarg;
+ printf("name:%s:%s\n",
+ long_option[option_index].name,
+ optarg);
+ } else if (!strncmp(long_option[option_index].name,
+ "xstats-ids",
+ MAX_LONG_OPT_SZ)) {
+ nb_xstats_ids = parse_xstats_ids(optarg,
+ xstats_ids, MAX_NB_XSTATS_IDS);
+
+ if (nb_xstats_ids <= 0) {
+ printf("xstats-id list parse error.\n");
+ return -1;
+ }
+ }
+ break;
default:
proc_info_usage(prgname);
return -1;
@@ -240,21 +357,137 @@ nic_stats_clear(uint8_t port_id)
printf("\n NIC statistics for port %d cleared\n", port_id);
}
+static void collectd_resolve_cnt_type(char *cnt_type, size_t cnt_type_len,
+ const char *cnt_name) {
+ char *type_end = strrchr(cnt_name, '_');
+
+ if ((type_end != NULL) &&
+ (strncmp(cnt_name, "rx_", strlen("rx_")) == 0)) {
+ if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
+ strncpy(cnt_type, "if_rx_errors", cnt_type_len);
+ else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0)
+ strncpy(cnt_type, "if_rx_dropped", cnt_type_len);
+ else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0)
+ strncpy(cnt_type, "if_rx_octets", cnt_type_len);
+ else if (strncmp(type_end, "_packets", strlen("_packets")) == 0)
+ strncpy(cnt_type, "if_rx_packets", cnt_type_len);
+ else if (strncmp(type_end, "_placement",
+ strlen("_placement")) == 0)
+ strncpy(cnt_type, "if_rx_errors", cnt_type_len);
+ else if (strncmp(type_end, "_buff", strlen("_buff")) == 0)
+ strncpy(cnt_type, "if_rx_errors", cnt_type_len);
+ else
+ /* Does not fit obvious type: use a more generic one */
+ strncpy(cnt_type, "derive", cnt_type_len);
+ } else if ((type_end != NULL) &&
+ (strncmp(cnt_name, "tx_", strlen("tx_"))) == 0) {
+ if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
+ strncpy(cnt_type, "if_tx_errors", cnt_type_len);
+ else if (strncmp(type_end, "_dropped", strlen("_dropped")) == 0)
+ strncpy(cnt_type, "if_tx_dropped", cnt_type_len);
+ else if (strncmp(type_end, "_bytes", strlen("_bytes")) == 0)
+ strncpy(cnt_type, "if_tx_octets", cnt_type_len);
+ else if (strncmp(type_end, "_packets", strlen("_packets")) == 0)
+ strncpy(cnt_type, "if_tx_packets", cnt_type_len);
+ else
+ /* Does not fit obvious type: use a more generic one */
+ strncpy(cnt_type, "derive", cnt_type_len);
+ } else if ((type_end != NULL) &&
+ (strncmp(cnt_name, "flow_", strlen("flow_"))) == 0) {
+ if (strncmp(type_end, "_filters", strlen("_filters")) == 0)
+ strncpy(cnt_type, "operations", cnt_type_len);
+ else if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
+ strncpy(cnt_type, "errors", cnt_type_len);
+ else if (strncmp(type_end, "_filters", strlen("_filters")) == 0)
+ strncpy(cnt_type, "filter_result", cnt_type_len);
+ } else if ((type_end != NULL) &&
+ (strncmp(cnt_name, "mac_", strlen("mac_"))) == 0) {
+ if (strncmp(type_end, "_errors", strlen("_errors")) == 0)
+ strncpy(cnt_type, "errors", cnt_type_len);
+ } else {
+ /* Does not fit obvious type, or strrchr error: */
+ /* use a more generic type */
+ strncpy(cnt_type, "derive", cnt_type_len);
+ }
+}
+
+static void
+nic_xstats_by_name_display(uint8_t port_id, char *name)
+{
+ uint64_t id;
+
+ printf("###### NIC statistics for port %-2d, statistic name '%s':\n",
+ port_id, name);
+
+ if (rte_eth_xstats_get_id_by_name(port_id, name, &id) == 0)
+ printf("%s: %"PRIu64"\n", name, id);
+ else
+ printf("Statistic not found...\n");
+
+}
+
+static void
+nic_xstats_by_ids_display(uint8_t port_id, uint64_t *ids, int len)
+{
+ struct rte_eth_xstat_name *xstats_names;
+ uint64_t *values;
+ int ret, i;
+ static const char *nic_stats_border = "########################";
+
+ values = malloc(sizeof(*values) * len);
+ if (values == NULL) {
+ printf("Cannot allocate memory for xstats\n");
+ return;
+ }
+
+ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len);
+ if (xstats_names == NULL) {
+ printf("Cannot allocate memory for xstat names\n");
+ free(values);
+ return;
+ }
+
+ if (len != rte_eth_xstats_get_names_by_id(
+ port_id, xstats_names, len, ids)) {
+ printf("Cannot get xstat names\n");
+ goto err;
+ }
+
+ printf("###### NIC extended statistics for port %-2d #########\n",
+ port_id);
+ printf("%s############################\n", nic_stats_border);
+ ret = rte_eth_xstats_get_by_id(port_id, ids, values, len);
+ if (ret < 0 || ret > len) {
+ printf("Cannot get xstats\n");
+ goto err;
+ }
+
+ for (i = 0; i < len; i++)
+ printf("%s: %"PRIu64"\n",
+ xstats_names[i].name,
+ values[i]);
+
+ printf("%s############################\n", nic_stats_border);
+err:
+ free(values);
+ free(xstats_names);
+}
+
static void
nic_xstats_display(uint8_t port_id)
{
struct rte_eth_xstat_name *xstats_names;
- struct rte_eth_xstat *xstats;
+ uint64_t *values;
int len, ret, i;
static const char *nic_stats_border = "########################";
- len = rte_eth_xstats_get_names(port_id, NULL, 0);
+ len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
if (len < 0) {
printf("Cannot get xstats count\n");
return;
}
- xstats = malloc(sizeof(xstats[0]) * len);
- if (xstats == NULL) {
+ values = malloc(sizeof(*values) * len);
+ if (values == NULL) {
printf("Cannot allocate memory for xstats\n");
return;
}
@@ -262,11 +495,11 @@ nic_xstats_display(uint8_t port_id)
xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len);
if (xstats_names == NULL) {
printf("Cannot allocate memory for xstat names\n");
- free(xstats);
+ free(values);
return;
}
- if (len != rte_eth_xstats_get_names(
- port_id, xstats_names, len)) {
+ if (len != rte_eth_xstats_get_names_by_id(
+ port_id, xstats_names, len, NULL)) {
printf("Cannot get xstat names\n");
goto err;
}
@@ -275,21 +508,34 @@ nic_xstats_display(uint8_t port_id)
port_id);
printf("%s############################\n",
nic_stats_border);
- ret = rte_eth_xstats_get(port_id, xstats, len);
+ ret = rte_eth_xstats_get_by_id(port_id, NULL, values, len);
if (ret < 0 || ret > len) {
printf("Cannot get xstats\n");
goto err;
}
- for (i = 0; i < len; i++)
- printf("%s: %"PRIu64"\n",
- xstats_names[i].name,
- xstats[i].value);
+ for (i = 0; i < len; i++) {
+ if (enable_collectd_format) {
+ char counter_type[MAX_STRING_LEN];
+ char buf[MAX_STRING_LEN];
+
+ collectd_resolve_cnt_type(counter_type,
+ sizeof(counter_type),
+ xstats_names[i].name);
+ sprintf(buf, "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
+ PRIu64"\n", host_id, port_id, counter_type,
+ xstats_names[i].name, values[i]);
+ write(stdout_fd, buf, strlen(buf));
+ } else {
+ printf("%s: %"PRIu64"\n", xstats_names[i].name,
+ values[i]);
+ }
+ }
printf("%s############################\n",
nic_stats_border);
err:
- free(xstats);
+ free(values);
free(xstats_names);
}
@@ -301,6 +547,67 @@ nic_xstats_clear(uint8_t port_id)
printf("\n NIC extended statistics for port %d cleared\n", port_id);
}
+static void
+metrics_display(int port_id)
+{
+ struct rte_metric_value *metrics;
+ struct rte_metric_name *names;
+ int len, ret;
+ static const char *nic_stats_border = "########################";
+
+ len = rte_metrics_get_names(NULL, 0);
+ if (len < 0) {
+ printf("Cannot get metrics count\n");
+ return;
+ }
+ if (len == 0) {
+ printf("No metrics to display (none have been registered)\n");
+ return;
+ }
+
+ metrics = rte_malloc("proc_info_metrics",
+ sizeof(struct rte_metric_value) * len, 0);
+ if (metrics == NULL) {
+ printf("Cannot allocate memory for metrics\n");
+ return;
+ }
+
+ names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);
+ if (names == NULL) {
+ printf("Cannot allocate memory for metrcis names\n");
+ rte_free(metrics);
+ return;
+ }
+
+ if (len != rte_metrics_get_names(names, len)) {
+ printf("Cannot get metrics names\n");
+ rte_free(metrics);
+ rte_free(names);
+ return;
+ }
+
+ if (port_id == RTE_METRICS_GLOBAL)
+ printf("###### Non port specific metrics #########\n");
+ else
+ printf("###### metrics for port %-2d #########\n", port_id);
+ printf("%s############################\n", nic_stats_border);
+ ret = rte_metrics_get_values(port_id, metrics, len);
+ if (ret < 0 || ret > len) {
+ printf("Cannot get metrics values\n");
+ rte_free(metrics);
+ rte_free(names);
+ return;
+ }
+
+ int i;
+ for (i = 0; i < len; i++)
+ printf("%s: %"PRIu64"\n", names[i].name, metrics[i].value);
+
+ printf("%s############################\n", nic_stats_border);
+ rte_free(metrics);
+ rte_free(names);
+}
+
int
main(int argc, char **argv)
{
@@ -312,6 +619,13 @@ main(int argc, char **argv)
char *argp[argc + 3];
uint8_t nb_ports;
+ /* preparse app arguments */
+ ret = proc_info_preparse_args(argc, argv);
+ if (ret < 0) {
+ printf("Failed to parse arguments\n");
+ return -1;
+ }
+
argp[0] = argv[0];
argp[1] = c_flag;
argp[2] = n_flag;
@@ -360,8 +674,19 @@ main(int argc, char **argv)
nic_stats_clear(i);
else if (reset_xstats)
nic_xstats_clear(i);
+ else if (enable_xstats_name)
+ nic_xstats_by_name_display(i, xstats_name);
+ else if (nb_xstats_ids > 0)
+ nic_xstats_by_ids_display(i, xstats_ids,
+ nb_xstats_ids);
+ else if (enable_metrics)
+ metrics_display(i);
}
}
+ /* print port independent stats */
+ if (enable_metrics)
+ metrics_display(RTE_METRICS_GLOBAL);
+
return 0;
}
diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile
new file mode 100644
index 00000000..e4a989fe
--- /dev/null
+++ b/app/test-crypto-perf/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+APP = dpdk-test-crypto-perf
+
+CFLAGS += $(WERROR_FLAGS)
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+SRCS-y += cperf_ops.c
+SRCS-y += cperf_options_parsing.c
+SRCS-y += cperf_test_vectors.c
+SRCS-y += cperf_test_throughput.c
+SRCS-y += cperf_test_latency.c
+SRCS-y += cperf_test_verify.c
+SRCS-y += cperf_test_vector_parsing.c
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-crypto-perf/cperf.h b/app/test-crypto-perf/cperf.h
new file mode 100644
index 00000000..293ba940
--- /dev/null
+++ b/app/test-crypto-perf/cperf.h
@@ -0,0 +1,58 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_
+#define _CPERF_
+
+#include <rte_crypto.h>
+
+#include "cperf_ops.h"
+
+struct cperf_options;
+struct cperf_test_vector;
+struct cperf_op_fns;
+
+typedef void *(*cperf_constructor_t)(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *t_vec,
+ const struct cperf_op_fns *op_fns);
+
+typedef int (*cperf_runner_t)(void *test_ctx);
+typedef void (*cperf_destructor_t)(void *test_ctx);
+
+struct cperf_test {
+ cperf_constructor_t constructor;
+ cperf_runner_t runner;
+ cperf_destructor_t destructor;
+};
+
+#endif /* _CPERF_ */
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
new file mode 100644
index 00000000..c2c3db57
--- /dev/null
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -0,0 +1,515 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+
+#include "cperf_ops.h"
+#include "cperf_test_vectors.h"
+
+static int
+cperf_set_ops_null_cipher(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.data.length = options->test_buffer_size;
+ sym_op->cipher.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_null_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* auth parameters */
+ sym_op->auth.data.length = options->test_buffer_size;
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_cipher(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ sym_op->cipher.data.length = options->test_buffer_size << 3;
+ else
+ sym_op->cipher.data.length = options->test_buffer_size;
+
+ sym_op->cipher.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = options->test_buffer_size;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
+ sym_op->auth.aad.data = test_vector->aad.data;
+ sym_op->auth.aad.length = options->auth_aad_sz;
+
+ }
+
+ if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
+ options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
+ sym_op->auth.data.length = options->test_buffer_size << 3;
+ else
+ sym_op->auth.data.length = options->test_buffer_size;
+
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ sym_op->cipher.data.length = options->test_buffer_size << 3;
+ else
+ sym_op->cipher.data.length = options->test_buffer_size;
+
+ sym_op->cipher.data.offset = 0;
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = options->test_buffer_size;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ sym_op->auth.aad.phys_addr = test_vector->aad.phys_addr;
+ sym_op->auth.aad.data = test_vector->aad.data;
+ sym_op->auth.aad.length = options->auth_aad_sz;
+ }
+
+ if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ options->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
+ options->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3)
+ sym_op->auth.data.length = options->test_buffer_size << 3;
+ else
+ sym_op->auth.data.length = options->test_buffer_size;
+
+ sym_op->auth.data.offset = 0;
+ }
+
+ return 0;
+}
+
+static int
+cperf_set_ops_aead(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_ops; i++) {
+ struct rte_crypto_sym_op *sym_op = ops[i]->sym;
+
+ rte_crypto_op_attach_sym_session(ops[i], sess);
+
+ sym_op->m_src = bufs_in[i];
+ sym_op->m_dst = bufs_out[i];
+
+ /* cipher parameters */
+ sym_op->cipher.iv.data = test_vector->iv.data;
+ sym_op->cipher.iv.phys_addr = test_vector->iv.phys_addr;
+ sym_op->cipher.iv.length = test_vector->iv.length;
+
+ sym_op->cipher.data.length = options->test_buffer_size;
+ sym_op->cipher.data.offset =
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16);
+
+ sym_op->auth.aad.data = rte_pktmbuf_mtod(bufs_in[i], uint8_t *);
+ sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(bufs_in[i]);
+ sym_op->auth.aad.length = options->auth_aad_sz;
+
+ /* authentication parameters */
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sym_op->auth.digest.data = test_vector->digest.data;
+ sym_op->auth.digest.phys_addr =
+ test_vector->digest.phys_addr;
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ } else {
+
+ uint32_t offset = sym_op->cipher.data.length +
+ sym_op->cipher.data.offset;
+ struct rte_mbuf *buf, *tbuf;
+
+ if (options->out_of_place) {
+ buf = bufs_out[i];
+ } else {
+ buf = bufs_in[i];
+
+ tbuf = buf;
+ while ((tbuf->next != NULL) &&
+ (offset >= tbuf->data_len)) {
+ offset -= tbuf->data_len;
+ tbuf = tbuf->next;
+ }
+ }
+
+ sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
+ uint8_t *, offset);
+ sym_op->auth.digest.phys_addr =
+ rte_pktmbuf_mtophys_offset(buf, offset);
+
+ sym_op->auth.digest.length = options->auth_digest_sz;
+ }
+
+ sym_op->auth.data.length = options->test_buffer_size;
+ sym_op->auth.data.offset = options->auth_aad_sz;
+ }
+
+ return 0;
+}
+
+static struct rte_cryptodev_sym_session *
+cperf_create_session(uint8_t dev_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
+ struct rte_cryptodev_sym_session *sess = NULL;
+
+ /*
+ * cipher only
+ */
+ if (options->op_type == CPERF_CIPHER_ONLY) {
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = options->cipher_algo;
+ cipher_xform.cipher.op = options->cipher_op;
+
+ /* cipher different than null */
+ if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ cipher_xform.cipher.key.data =
+ test_vector->cipher_key.data;
+ cipher_xform.cipher.key.length =
+ test_vector->cipher_key.length;
+ } else {
+ cipher_xform.cipher.key.data = NULL;
+ cipher_xform.cipher.key.length = 0;
+ }
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ /*
+ * auth only
+ */
+ } else if (options->op_type == CPERF_AUTH_ONLY) {
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.algo = options->auth_algo;
+ auth_xform.auth.op = options->auth_op;
+
+ /* auth different than null */
+ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ auth_xform.auth.digest_length =
+ options->auth_digest_sz;
+ auth_xform.auth.add_auth_data_length =
+ options->auth_aad_sz;
+ auth_xform.auth.key.length =
+ test_vector->auth_key.length;
+ auth_xform.auth.key.data = test_vector->auth_key.data;
+ } else {
+ auth_xform.auth.digest_length = 0;
+ auth_xform.auth.add_auth_data_length = 0;
+ auth_xform.auth.key.length = 0;
+ auth_xform.auth.key.data = NULL;
+ }
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ /*
+ * cipher and auth
+ */
+ } else if (options->op_type == CPERF_CIPHER_THEN_AUTH
+ || options->op_type == CPERF_AUTH_THEN_CIPHER
+ || options->op_type == CPERF_AEAD) {
+
+ /*
+ * cipher
+ */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+ cipher_xform.cipher.algo = options->cipher_algo;
+ cipher_xform.cipher.op = options->cipher_op;
+
+ /* cipher different than null */
+ if (options->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ cipher_xform.cipher.key.data =
+ test_vector->cipher_key.data;
+ cipher_xform.cipher.key.length =
+ test_vector->cipher_key.length;
+ } else {
+ cipher_xform.cipher.key.data = NULL;
+ cipher_xform.cipher.key.length = 0;
+ }
+
+ /*
+ * auth
+ */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+ auth_xform.auth.algo = options->auth_algo;
+ auth_xform.auth.op = options->auth_op;
+
+ /* auth different than null */
+ if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ auth_xform.auth.digest_length = options->auth_digest_sz;
+ auth_xform.auth.add_auth_data_length =
+ options->auth_aad_sz;
+ /* auth options for aes gcm */
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM) {
+ auth_xform.auth.key.length = 0;
+ auth_xform.auth.key.data = NULL;
+ } else { /* auth options for others */
+ auth_xform.auth.key.length =
+ test_vector->auth_key.length;
+ auth_xform.auth.key.data =
+ test_vector->auth_key.data;
+ }
+ } else {
+ auth_xform.auth.digest_length = 0;
+ auth_xform.auth.add_auth_data_length = 0;
+ auth_xform.auth.key.length = 0;
+ auth_xform.auth.key.data = NULL;
+ }
+
+ /* create crypto session for aes gcm */
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM) {
+ if (options->cipher_op ==
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &cipher_xform);
+ } else { /* decrypt */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &auth_xform);
+ }
+ } else { /* create crypto session for other */
+ /* cipher then auth */
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ cipher_xform.next = &auth_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &cipher_xform);
+ } else { /* auth then cipher */
+ auth_xform.next = &cipher_xform;
+ /* create crypto session */
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &auth_xform);
+ }
+ }
+ }
+ return sess;
+}
+
+int
+cperf_get_op_functions(const struct cperf_options *options,
+ struct cperf_op_fns *op_fns)
+{
+ memset(op_fns, 0, sizeof(struct cperf_op_fns));
+
+ op_fns->sess_create = cperf_create_session;
+
+ if (options->op_type == CPERF_AEAD
+ || options->op_type == CPERF_AUTH_THEN_CIPHER
+ || options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM)
+ op_fns->populate_ops = cperf_set_ops_aead;
+ else
+ op_fns->populate_ops = cperf_set_ops_cipher_auth;
+ return 0;
+ }
+ if (options->op_type == CPERF_AUTH_ONLY) {
+ if (options->auth_algo == RTE_CRYPTO_AUTH_NULL)
+ op_fns->populate_ops = cperf_set_ops_null_auth;
+ else
+ op_fns->populate_ops = cperf_set_ops_auth;
+ return 0;
+ }
+ if (options->op_type == CPERF_CIPHER_ONLY) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL)
+ op_fns->populate_ops = cperf_set_ops_null_cipher;
+ else
+ op_fns->populate_ops = cperf_set_ops_cipher;
+ return 0;
+ }
+
+ return -1;
+}
diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h
new file mode 100644
index 00000000..1b748daf
--- /dev/null
+++ b/app/test-crypto-perf/cperf_ops.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_OPS_
+#define _CPERF_OPS_
+
+#include <rte_crypto.h>
+
+#include "cperf.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)(
+ uint8_t dev_id, const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector);
+
+typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops,
+ struct rte_mbuf **bufs_in, struct rte_mbuf **bufs_out,
+ uint16_t nb_ops, struct rte_cryptodev_sym_session *sess,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector);
+
+struct cperf_op_fns {
+ cperf_sessions_create_t sess_create;
+ cperf_populate_ops_t populate_ops;
+};
+
+int
+cperf_get_op_functions(const struct cperf_options *options,
+ struct cperf_op_fns *op_fns);
+
+#endif /* _CPERF_OPS_ */
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
new file mode 100644
index 00000000..b928c584
--- /dev/null
+++ b/app/test-crypto-perf/cperf_options.h
@@ -0,0 +1,115 @@
+
+#ifndef _CPERF_OPTIONS_
+#define _CPERF_OPTIONS_
+
+#include <rte_crypto.h>
+
+#define CPERF_PTEST_TYPE ("ptest")
+#define CPERF_SILENT ("silent")
+
+#define CPERF_POOL_SIZE ("pool-sz")
+#define CPERF_TOTAL_OPS ("total-ops")
+#define CPERF_BURST_SIZE ("burst-sz")
+#define CPERF_BUFFER_SIZE ("buffer-sz")
+#define CPERF_SEGMENTS_NB ("segments-nb")
+
+#define CPERF_DEVTYPE ("devtype")
+#define CPERF_OPTYPE ("optype")
+#define CPERF_SESSIONLESS ("sessionless")
+#define CPERF_OUT_OF_PLACE ("out-of-place")
+#define CPERF_TEST_FILE ("test-file")
+#define CPERF_TEST_NAME ("test-name")
+
+#define CPERF_CIPHER_ALGO ("cipher-algo")
+#define CPERF_CIPHER_OP ("cipher-op")
+#define CPERF_CIPHER_KEY_SZ ("cipher-key-sz")
+#define CPERF_CIPHER_IV_SZ ("cipher-iv-sz")
+
+#define CPERF_AUTH_ALGO ("auth-algo")
+#define CPERF_AUTH_OP ("auth-op")
+#define CPERF_AUTH_KEY_SZ ("auth-key-sz")
+#define CPERF_AUTH_DIGEST_SZ ("auth-digest-sz")
+#define CPERF_AUTH_AAD_SZ ("auth-aad-sz")
+#define CPERF_CSV ("csv-friendly")
+
+#define MAX_LIST 32
+
+enum cperf_perf_test_type {
+ CPERF_TEST_TYPE_THROUGHPUT,
+ CPERF_TEST_TYPE_LATENCY,
+ CPERF_TEST_TYPE_VERIFY
+};
+
+
+extern const char *cperf_test_type_strs[];
+
+enum cperf_op_type {
+ CPERF_CIPHER_ONLY = 1,
+ CPERF_AUTH_ONLY,
+ CPERF_CIPHER_THEN_AUTH,
+ CPERF_AUTH_THEN_CIPHER,
+ CPERF_AEAD
+};
+
+extern const char *cperf_op_type_strs[];
+
+struct cperf_options {
+ enum cperf_perf_test_type test;
+
+ uint32_t pool_sz;
+ uint32_t total_ops;
+ uint32_t segments_nb;
+ uint32_t test_buffer_size;
+
+ uint32_t sessionless:1;
+ uint32_t out_of_place:1;
+ uint32_t silent:1;
+ uint32_t csv:1;
+
+ enum rte_crypto_cipher_algorithm cipher_algo;
+ enum rte_crypto_cipher_operation cipher_op;
+
+ uint16_t cipher_key_sz;
+ uint16_t cipher_iv_sz;
+
+ enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_auth_operation auth_op;
+
+ uint16_t auth_key_sz;
+ uint16_t auth_digest_sz;
+ uint16_t auth_aad_sz;
+
+ char device_type[RTE_CRYPTODEV_NAME_LEN];
+ enum cperf_op_type op_type;
+
+ char *test_file;
+ char *test_name;
+
+ uint32_t buffer_size_list[MAX_LIST];
+ uint8_t buffer_size_count;
+ uint32_t max_buffer_size;
+ uint32_t min_buffer_size;
+ uint32_t inc_buffer_size;
+
+ uint32_t burst_size_list[MAX_LIST];
+ uint8_t burst_size_count;
+ uint32_t max_burst_size;
+ uint32_t min_burst_size;
+ uint32_t inc_burst_size;
+
+};
+
+void
+cperf_options_default(struct cperf_options *options);
+
+int
+cperf_options_parse(struct cperf_options *options,
+ int argc, char **argv);
+
+int
+cperf_options_check(struct cperf_options *options);
+
+void
+cperf_options_dump(struct cperf_options *options);
+
+#endif
diff --git a/app/test-crypto-perf/cperf_options_parsing.c b/app/test-crypto-perf/cperf_options_parsing.c
new file mode 100644
index 00000000..d172671f
--- /dev/null
+++ b/app/test-crypto-perf/cperf_options_parsing.c
@@ -0,0 +1,934 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <unistd.h>
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "cperf_options.h"
+
+#define AES_BLOCK_SIZE 16
+#define DES_BLOCK_SIZE 8
+
+struct name_id_map {
+ const char *name;
+ uint32_t id;
+};
+
+static int
+get_str_key_id_mapping(struct name_id_map *map, unsigned int map_len,
+ const char *str_key)
+{
+ unsigned int i;
+
+ for (i = 0; i < map_len; i++) {
+
+ if (strcmp(str_key, map[i].name) == 0)
+ return map[i].id;
+ }
+
+ return -1;
+}
+
+static int
+parse_cperf_test_type(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map cperftest_namemap[] = {
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_THROUGHPUT],
+ CPERF_TEST_TYPE_THROUGHPUT
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_VERIFY],
+ CPERF_TEST_TYPE_VERIFY
+ },
+ {
+ cperf_test_type_strs[CPERF_TEST_TYPE_LATENCY],
+ CPERF_TEST_TYPE_LATENCY
+ }
+ };
+
+ int id = get_str_key_id_mapping(
+ (struct name_id_map *)cperftest_namemap,
+ RTE_DIM(cperftest_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse test type");
+ return -1;
+ }
+
+ opts->test = (enum cperf_perf_test_type)id;
+
+ return 0;
+}
+
+static int
+parse_uint32_t(uint32_t *value, const char *arg)
+{
+ char *end = NULL;
+ unsigned long n = strtoul(arg, &end, 10);
+
+ if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (n > UINT32_MAX)
+ return -ERANGE;
+
+ *value = (uint32_t) n;
+
+ return 0;
+}
+
+static int
+parse_uint16_t(uint16_t *value, const char *arg)
+{
+ uint32_t val = 0;
+ int ret = parse_uint32_t(&val, arg);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = (uint16_t) val;
+
+ return 0;
+}
+
+static int
+parse_range(const char *arg, uint32_t *min, uint32_t *max, uint32_t *inc)
+{
+ char *token;
+ uint32_t number;
+
+ char *copy_arg = strdup(arg);
+
+ if (copy_arg == NULL)
+ return -1;
+
+ token = strtok(copy_arg, ":");
+
+ /* Parse minimum value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_range;
+
+ *min = number;
+ } else
+ goto err_range;
+
+ token = strtok(NULL, ":");
+
+ /* Parse increment value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_range;
+
+ *inc = number;
+ } else
+ goto err_range;
+
+ token = strtok(NULL, ":");
+
+ /* Parse maximum value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0 ||
+ number < *min)
+ goto err_range;
+
+ *max = number;
+ } else
+ goto err_range;
+
+ if (strtok(NULL, ":") != NULL)
+ goto err_range;
+
+ free(copy_arg);
+ return 0;
+
+err_range:
+ free(copy_arg);
+ return -1;
+}
+
+static int
+parse_list(const char *arg, uint32_t *list, uint32_t *min, uint32_t *max)
+{
+ char *token;
+ uint32_t number;
+ uint8_t count = 0;
+
+ char *copy_arg = strdup(arg);
+
+ if (copy_arg == NULL)
+ return -1;
+
+ token = strtok(copy_arg, ",");
+
+ /* Parse first value */
+ if (token != NULL) {
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_list;
+
+ list[count++] = number;
+ *min = number;
+ *max = number;
+ } else
+ goto err_list;
+
+ token = strtok(NULL, ",");
+
+ while (token != NULL) {
+ if (count == MAX_LIST) {
+ RTE_LOG(WARNING, USER1, "Using only the first %u sizes\n",
+ MAX_LIST);
+ break;
+ }
+
+ number = strtoul(token, NULL, 10);
+
+ if (errno == EINVAL || errno == ERANGE ||
+ number == 0)
+ goto err_list;
+
+ list[count++] = number;
+
+ if (number < *min)
+ *min = number;
+ if (number > *max)
+ *max = number;
+
+ token = strtok(NULL, ",");
+ }
+
+ free(copy_arg);
+ return count;
+
+err_list:
+ free(copy_arg);
+ return -1;
+}
+
+static int
+parse_total_ops(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->total_ops, arg);
+
+ if (ret)
+ RTE_LOG(ERR, USER1, "failed to parse total operations count\n");
+
+ if (opts->total_ops == 0) {
+ RTE_LOG(ERR, USER1,
+ "invalid total operations count number specified\n");
+ return -1;
+ }
+
+ return ret;
+}
+
+static int
+parse_pool_sz(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->pool_sz, arg);
+
+ if (ret)
+ RTE_LOG(ERR, USER1, "failed to parse pool size");
+ return ret;
+}
+
+static int
+parse_burst_sz(struct cperf_options *opts, const char *arg)
+{
+ int ret;
+
+ /* Try parsing the argument as a range, if it fails, parse it as a list */
+ if (parse_range(arg, &opts->min_burst_size, &opts->max_burst_size,
+ &opts->inc_burst_size) < 0) {
+ ret = parse_list(arg, opts->burst_size_list,
+ &opts->min_burst_size,
+ &opts->max_burst_size);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ return -1;
+ }
+ opts->burst_size_count = ret;
+ }
+
+ return 0;
+}
+
+static int
+parse_buffer_sz(struct cperf_options *opts, const char *arg)
+{
+ int ret;
+
+ /* Try parsing the argument as a range, if it fails, parse it as a list */
+ if (parse_range(arg, &opts->min_buffer_size, &opts->max_buffer_size,
+ &opts->inc_buffer_size) < 0) {
+ ret = parse_list(arg, opts->buffer_size_list,
+ &opts->min_buffer_size,
+ &opts->max_buffer_size);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "failed to parse burst size/s\n");
+ return -1;
+ }
+ opts->buffer_size_count = ret;
+ }
+
+ return 0;
+}
+
+static int
+parse_segments_nb(struct cperf_options *opts, const char *arg)
+{
+ int ret = parse_uint32_t(&opts->segments_nb, arg);
+
+ if (ret) {
+ RTE_LOG(ERR, USER1, "failed to parse segments number\n");
+ return -1;
+ }
+
+ if ((opts->segments_nb == 0) || (opts->segments_nb > 255)) {
+ RTE_LOG(ERR, USER1, "invalid segments number specified\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_device_type(struct cperf_options *opts, const char *arg)
+{
+ if (strlen(arg) > (sizeof(opts->device_type) - 1))
+ return -1;
+
+ strncpy(opts->device_type, arg, sizeof(opts->device_type) - 1);
+ *(opts->device_type + sizeof(opts->device_type) - 1) = '\0';
+
+ return 0;
+}
+
+static int
+parse_op_type(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map optype_namemap[] = {
+ {
+ cperf_op_type_strs[CPERF_CIPHER_ONLY],
+ CPERF_CIPHER_ONLY
+ },
+ {
+ cperf_op_type_strs[CPERF_AUTH_ONLY],
+ CPERF_AUTH_ONLY
+ },
+ {
+ cperf_op_type_strs[CPERF_CIPHER_THEN_AUTH],
+ CPERF_CIPHER_THEN_AUTH
+ },
+ {
+ cperf_op_type_strs[CPERF_AUTH_THEN_CIPHER],
+ CPERF_AUTH_THEN_CIPHER
+ },
+ {
+ cperf_op_type_strs[CPERF_AEAD],
+ CPERF_AEAD
+ }
+ };
+
+ int id = get_str_key_id_mapping(optype_namemap,
+ RTE_DIM(optype_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid opt type specified\n");
+ return -1;
+ }
+
+ opts->op_type = (enum cperf_op_type)id;
+
+ return 0;
+}
+
+static int
+parse_sessionless(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->sessionless = 1;
+ return 0;
+}
+
+static int
+parse_out_of_place(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->out_of_place = 1;
+ return 0;
+}
+
+static int
+parse_test_file(struct cperf_options *opts,
+ const char *arg)
+{
+ opts->test_file = strdup(arg);
+ if (access(opts->test_file, F_OK) != -1)
+ return 0;
+ RTE_LOG(ERR, USER1, "Test vector file doesn't exist\n");
+
+ return -1;
+}
+
+static int
+parse_test_name(struct cperf_options *opts,
+ const char *arg)
+{
+ char *test_name = (char *) rte_zmalloc(NULL,
+ sizeof(char) * (strlen(arg) + 3), 0);
+ snprintf(test_name, strlen(arg) + 3, "[%s]", arg);
+ opts->test_name = test_name;
+
+ return 0;
+}
+
+static int
+parse_silent(struct cperf_options *opts,
+ const char *arg __rte_unused)
+{
+ opts->silent = 1;
+
+ return 0;
+}
+
+static int
+parse_cipher_algo(struct cperf_options *opts, const char *arg)
+{
+
+ enum rte_crypto_cipher_algorithm cipher_algo;
+
+ if (rte_cryptodev_get_cipher_algo_enum(&cipher_algo, arg) < 0) {
+ RTE_LOG(ERR, USER1, "Invalid cipher algorithm specified\n");
+ return -1;
+ }
+
+ opts->cipher_algo = cipher_algo;
+
+ return 0;
+}
+
+static int
+parse_cipher_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map cipher_op_namemap[] = {
+ {
+ rte_crypto_cipher_operation_strings
+ [RTE_CRYPTO_CIPHER_OP_ENCRYPT],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT },
+ {
+ rte_crypto_cipher_operation_strings
+ [RTE_CRYPTO_CIPHER_OP_DECRYPT],
+ RTE_CRYPTO_CIPHER_OP_DECRYPT
+ }
+ };
+
+ int id = get_str_key_id_mapping(cipher_op_namemap,
+ RTE_DIM(cipher_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "Invalid cipher operation specified\n");
+ return -1;
+ }
+
+ opts->cipher_op = (enum rte_crypto_cipher_operation)id;
+
+ return 0;
+}
+
+static int
+parse_cipher_key_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->cipher_key_sz, arg);
+}
+
+static int
+parse_cipher_iv_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->cipher_iv_sz, arg);
+}
+
+static int
+parse_auth_algo(struct cperf_options *opts, const char *arg)
+{
+ enum rte_crypto_auth_algorithm auth_algo;
+
+ if (rte_cryptodev_get_auth_algo_enum(&auth_algo, arg) < 0) {
+ RTE_LOG(ERR, USER1, "Invalid authentication algorithm specified\n");
+ return -1;
+ }
+
+ opts->auth_algo = auth_algo;
+
+ return 0;
+}
+
+static int
+parse_auth_op(struct cperf_options *opts, const char *arg)
+{
+ struct name_id_map auth_op_namemap[] = {
+ {
+ rte_crypto_auth_operation_strings
+ [RTE_CRYPTO_AUTH_OP_GENERATE],
+ RTE_CRYPTO_AUTH_OP_GENERATE },
+ {
+ rte_crypto_auth_operation_strings
+ [RTE_CRYPTO_AUTH_OP_VERIFY],
+ RTE_CRYPTO_AUTH_OP_VERIFY
+ }
+ };
+
+ int id = get_str_key_id_mapping(auth_op_namemap,
+ RTE_DIM(auth_op_namemap), arg);
+ if (id < 0) {
+ RTE_LOG(ERR, USER1, "invalid authentication operation specified"
+ "\n");
+ return -1;
+ }
+
+ opts->auth_op = (enum rte_crypto_auth_operation)id;
+
+ return 0;
+}
+
+static int
+parse_auth_key_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_key_sz, arg);
+}
+
+static int
+parse_auth_digest_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_digest_sz, arg);
+}
+
+static int
+parse_auth_aad_sz(struct cperf_options *opts, const char *arg)
+{
+ return parse_uint16_t(&opts->auth_aad_sz, arg);
+}
+
+static int
+parse_csv_friendly(struct cperf_options *opts, const char *arg __rte_unused)
+{
+ opts->csv = 1;
+ opts->silent = 1;
+ return 0;
+}
+
+typedef int (*option_parser_t)(struct cperf_options *opts,
+ const char *arg);
+
+struct long_opt_parser {
+ const char *lgopt_name;
+ option_parser_t parser_fn;
+
+};
+
+static struct option lgopts[] = {
+
+ { CPERF_PTEST_TYPE, required_argument, 0, 0 },
+
+ { CPERF_POOL_SIZE, required_argument, 0, 0 },
+ { CPERF_TOTAL_OPS, required_argument, 0, 0 },
+ { CPERF_BURST_SIZE, required_argument, 0, 0 },
+ { CPERF_BUFFER_SIZE, required_argument, 0, 0 },
+ { CPERF_SEGMENTS_NB, required_argument, 0, 0 },
+
+ { CPERF_DEVTYPE, required_argument, 0, 0 },
+ { CPERF_OPTYPE, required_argument, 0, 0 },
+
+ { CPERF_SILENT, no_argument, 0, 0 },
+ { CPERF_SESSIONLESS, no_argument, 0, 0 },
+ { CPERF_OUT_OF_PLACE, no_argument, 0, 0 },
+ { CPERF_TEST_FILE, required_argument, 0, 0 },
+ { CPERF_TEST_NAME, required_argument, 0, 0 },
+
+ { CPERF_CIPHER_ALGO, required_argument, 0, 0 },
+ { CPERF_CIPHER_OP, required_argument, 0, 0 },
+
+ { CPERF_CIPHER_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_CIPHER_IV_SZ, required_argument, 0, 0 },
+
+ { CPERF_AUTH_ALGO, required_argument, 0, 0 },
+ { CPERF_AUTH_OP, required_argument, 0, 0 },
+
+ { CPERF_AUTH_KEY_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_DIGEST_SZ, required_argument, 0, 0 },
+ { CPERF_AUTH_AAD_SZ, required_argument, 0, 0 },
+ { CPERF_CSV, no_argument, 0, 0},
+
+ { NULL, 0, 0, 0 }
+};
+
+void
+cperf_options_default(struct cperf_options *opts)
+{
+ opts->test = CPERF_TEST_TYPE_THROUGHPUT;
+
+ opts->pool_sz = 8192;
+ opts->total_ops = 10000000;
+
+ opts->buffer_size_list[0] = 64;
+ opts->buffer_size_count = 1;
+ opts->max_buffer_size = 64;
+ opts->min_buffer_size = 64;
+ opts->inc_buffer_size = 0;
+
+ opts->burst_size_list[0] = 32;
+ opts->burst_size_count = 1;
+ opts->max_burst_size = 32;
+ opts->min_burst_size = 32;
+ opts->inc_burst_size = 0;
+
+ opts->segments_nb = 1;
+
+ strncpy(opts->device_type, "crypto_aesni_mb",
+ sizeof(opts->device_type));
+
+ opts->op_type = CPERF_CIPHER_THEN_AUTH;
+
+ opts->silent = 0;
+ opts->test_file = NULL;
+ opts->test_name = NULL;
+ opts->sessionless = 0;
+ opts->out_of_place = 0;
+ opts->csv = 0;
+
+ opts->cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ opts->cipher_op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ opts->cipher_key_sz = 16;
+ opts->cipher_iv_sz = 16;
+
+ opts->auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ opts->auth_op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ opts->auth_key_sz = 64;
+ opts->auth_digest_sz = 12;
+ opts->auth_aad_sz = 0;
+}
+
+static int
+cperf_opts_parse_long(int opt_idx, struct cperf_options *opts)
+{
+ struct long_opt_parser parsermap[] = {
+ { CPERF_PTEST_TYPE, parse_cperf_test_type },
+ { CPERF_SILENT, parse_silent },
+ { CPERF_POOL_SIZE, parse_pool_sz },
+ { CPERF_TOTAL_OPS, parse_total_ops },
+ { CPERF_BURST_SIZE, parse_burst_sz },
+ { CPERF_BUFFER_SIZE, parse_buffer_sz },
+ { CPERF_SEGMENTS_NB, parse_segments_nb },
+ { CPERF_DEVTYPE, parse_device_type },
+ { CPERF_OPTYPE, parse_op_type },
+ { CPERF_SESSIONLESS, parse_sessionless },
+ { CPERF_OUT_OF_PLACE, parse_out_of_place },
+ { CPERF_TEST_FILE, parse_test_file },
+ { CPERF_TEST_NAME, parse_test_name },
+ { CPERF_CIPHER_ALGO, parse_cipher_algo },
+ { CPERF_CIPHER_OP, parse_cipher_op },
+ { CPERF_CIPHER_KEY_SZ, parse_cipher_key_sz },
+ { CPERF_CIPHER_IV_SZ, parse_cipher_iv_sz },
+ { CPERF_AUTH_ALGO, parse_auth_algo },
+ { CPERF_AUTH_OP, parse_auth_op },
+ { CPERF_AUTH_KEY_SZ, parse_auth_key_sz },
+ { CPERF_AUTH_DIGEST_SZ, parse_auth_digest_sz },
+ { CPERF_AUTH_AAD_SZ, parse_auth_aad_sz },
+ { CPERF_CSV, parse_csv_friendly},
+ };
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(parsermap); i++) {
+ if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
+ strlen(lgopts[opt_idx].name)) == 0)
+ return parsermap[i].parser_fn(opts, optarg);
+ }
+
+ return -EINVAL;
+}
+
+int
+cperf_options_parse(struct cperf_options *options, int argc, char **argv)
+{
+ int opt, retval, opt_idx;
+
+ while ((opt = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ switch (opt) {
+ /* long options */
+ case 0:
+
+ retval = cperf_opts_parse_long(opt_idx, options);
+ if (retval != 0)
+ return retval;
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+cperf_options_check(struct cperf_options *options)
+{
+ uint32_t buffer_size, buffer_size_idx = 0;
+
+ if (options->segments_nb > options->min_buffer_size) {
+ RTE_LOG(ERR, USER1,
+ "Segments number greater than buffer size.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ options->op_type != CPERF_CIPHER_ONLY &&
+ options->test_name == NULL) {
+ RTE_LOG(ERR, USER1, "Define test name to get the correct digest"
+ " from the test vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->test_name != NULL && options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ options->test_file == NULL) {
+ RTE_LOG(ERR, USER1, "Define path to the file with test"
+ " vectors.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ options->total_ops > options->pool_sz) {
+ RTE_LOG(ERR, USER1, "Total number of ops must be less than or"
+ " equal to the pool size.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ (options->inc_buffer_size != 0 ||
+ options->buffer_size_count > 1)) {
+ RTE_LOG(ERR, USER1, "Only one buffer size is allowed when "
+ "using the verify test.\n");
+ return -EINVAL;
+ }
+
+ if (options->test == CPERF_TEST_TYPE_VERIFY &&
+ (options->inc_burst_size != 0 ||
+ options->burst_size_count > 1)) {
+ RTE_LOG(ERR, USER1, "Only one burst size is allowed when "
+ "using the verify test.\n");
+ return -EINVAL;
+ }
+
+ if (options->op_type == CPERF_CIPHER_THEN_AUTH) {
+ if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ options->auth_op !=
+ RTE_CRYPTO_AUTH_OP_GENERATE) {
+ RTE_LOG(ERR, USER1, "Option cipher then auth must use"
+ " options: encrypt and generate.\n");
+ return -EINVAL;
+ }
+ } else if (options->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (options->cipher_op != RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ options->auth_op !=
+ RTE_CRYPTO_AUTH_OP_VERIFY) {
+ RTE_LOG(ERR, USER1, "Option auth then cipher must use"
+ " options: decrypt and verify.\n");
+ return -EINVAL;
+ }
+ } else if (options->op_type == CPERF_AEAD) {
+ if (!(options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ options->auth_op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) &&
+ !(options->cipher_op ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ options->auth_op ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)) {
+ RTE_LOG(ERR, USER1, "Use together options: encrypt and"
+ " generate or decrypt and verify.\n");
+ return -EINVAL;
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_GCM ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CCM ||
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_CCM ||
+ options->auth_algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (options->op_type != CPERF_AEAD) {
+ RTE_LOG(ERR, USER1, "Use --optype aead\n");
+ return -EINVAL;
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB) {
+ if (options->inc_buffer_size != 0)
+ buffer_size = options->min_buffer_size;
+ else
+ buffer_size = options->buffer_size_list[0];
+
+ while (buffer_size <= options->max_buffer_size) {
+ if ((buffer_size % AES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+
+ if (options->inc_buffer_size != 0)
+ buffer_size += options->inc_buffer_size;
+ else {
+ if (++buffer_size_idx == options->buffer_size_count)
+ break;
+ buffer_size = options->buffer_size_list[buffer_size_idx];
+ }
+
+ }
+ }
+
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC ||
+ options->cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+ for (buffer_size = options->min_buffer_size;
+ buffer_size < options->max_buffer_size;
+ buffer_size += options->inc_buffer_size) {
+ if ((buffer_size % DES_BLOCK_SIZE) != 0) {
+ RTE_LOG(ERR, USER1, "Some of the buffer sizes are "
+ "not suitable for the algorithm selected\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+cperf_options_dump(struct cperf_options *opts)
+{
+ uint8_t size_idx;
+
+ printf("# Crypto Performance Application Options:\n");
+ printf("#\n");
+ printf("# cperf test: %s\n", cperf_test_type_strs[opts->test]);
+ printf("#\n");
+ printf("# size of crypto op / mbuf pool: %u\n", opts->pool_sz);
+ printf("# total number of ops: %u\n", opts->total_ops);
+ if (opts->inc_buffer_size != 0) {
+ printf("# buffer size:\n");
+ printf("#\t min: %u\n", opts->min_buffer_size);
+ printf("#\t max: %u\n", opts->max_buffer_size);
+ printf("#\t inc: %u\n", opts->inc_buffer_size);
+ } else {
+ printf("# buffer sizes: ");
+ for (size_idx = 0; size_idx < opts->buffer_size_count; size_idx++)
+ printf("%u ", opts->buffer_size_list[size_idx]);
+ printf("\n");
+ }
+ if (opts->inc_burst_size != 0) {
+ printf("# burst size:\n");
+ printf("#\t min: %u\n", opts->min_burst_size);
+ printf("#\t max: %u\n", opts->max_burst_size);
+ printf("#\t inc: %u\n", opts->inc_burst_size);
+ } else {
+ printf("# burst sizes: ");
+ for (size_idx = 0; size_idx < opts->burst_size_count; size_idx++)
+ printf("%u ", opts->burst_size_list[size_idx]);
+ printf("\n");
+ }
+ printf("\n# segments per buffer: %u\n", opts->segments_nb);
+ printf("#\n");
+ printf("# cryptodev type: %s\n", opts->device_type);
+ printf("#\n");
+ printf("# crypto operation: %s\n", cperf_op_type_strs[opts->op_type]);
+ printf("# sessionless: %s\n", opts->sessionless ? "yes" : "no");
+ printf("# out of place: %s\n", opts->out_of_place ? "yes" : "no");
+
+ printf("#\n");
+
+ if (opts->op_type == CPERF_AUTH_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+ printf("# auth algorithm: %s\n",
+ rte_crypto_auth_algorithm_strings[opts->auth_algo]);
+ printf("# auth operation: %s\n",
+ rte_crypto_auth_operation_strings[opts->auth_op]);
+ printf("# auth key size: %u\n", opts->auth_key_sz);
+ printf("# auth digest size: %u\n", opts->auth_digest_sz);
+ printf("# auth aad size: %u\n", opts->auth_aad_sz);
+ printf("#\n");
+ }
+
+ if (opts->op_type == CPERF_CIPHER_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+ printf("# cipher algorithm: %s\n",
+ rte_crypto_cipher_algorithm_strings[opts->cipher_algo]);
+ printf("# cipher operation: %s\n",
+ rte_crypto_cipher_operation_strings[opts->cipher_op]);
+ printf("# cipher key size: %u\n", opts->cipher_key_sz);
+ printf("# cipher iv size: %u\n", opts->cipher_iv_sz);
+ printf("#\n");
+ }
+}
diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c
new file mode 100644
index 00000000..e61ac972
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_latency.c
@@ -0,0 +1,552 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#include "cperf_test_latency.h"
+#include "cperf_ops.h"
+
+
+struct cperf_op_result {
+ uint64_t tsc_start;
+ uint64_t tsc_end;
+ enum rte_crypto_op_status status;
+};
+
+struct cperf_latency_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pkt_mbuf_pool_in;
+ struct rte_mempool *pkt_mbuf_pool_out;
+ struct rte_mbuf **mbufs_in;
+ struct rte_mbuf **mbufs_out;
+
+ struct rte_mempool *crypto_op_pool;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+ struct cperf_op_result *res;
+};
+
+#define max(a, b) (a > b ? (uint64_t)a : (uint64_t)b)
+#define min(a, b) (a < b ? (uint64_t)a : (uint64_t)b)
+
+static void
+cperf_latency_test_free(struct cperf_latency_ctx *ctx, uint32_t mbuf_nb)
+{
+ uint32_t i;
+
+ if (ctx) {
+ if (ctx->sess)
+ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+
+ if (ctx->mbufs_in) {
+ for (i = 0; i < mbuf_nb; i++)
+ rte_pktmbuf_free(ctx->mbufs_in[i]);
+
+ rte_free(ctx->mbufs_in);
+ }
+
+ if (ctx->mbufs_out) {
+ for (i = 0; i < mbuf_nb; i++) {
+ if (ctx->mbufs_out[i] != NULL)
+ rte_pktmbuf_free(ctx->mbufs_out[i]);
+ }
+
+ rte_free(ctx->mbufs_out);
+ }
+
+ if (ctx->pkt_mbuf_pool_in)
+ rte_mempool_free(ctx->pkt_mbuf_pool_in);
+
+ if (ctx->pkt_mbuf_pool_out)
+ rte_mempool_free(ctx->pkt_mbuf_pool_out);
+
+ if (ctx->crypto_op_pool)
+ rte_mempool_free(ctx->crypto_op_pool);
+
+ rte_free(ctx->res);
+ rte_free(ctx);
+ }
+}
+
+static struct rte_mbuf *
+cperf_mbuf_create(struct rte_mempool *mempool,
+ uint32_t segments_nb,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_mbuf *mbuf;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+
+ mbuf = rte_pktmbuf_alloc(mempool);
+ if (mbuf == NULL)
+ goto error;
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+
+ while (segments_nb) {
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mempool);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+ }
+
+ if (last_sz) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, last_sz);
+ }
+
+ if (options->op_type != CPERF_CIPHER_ONLY) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
+ options->auth_digest_sz);
+ if (mbuf_data == NULL)
+ goto error;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+
+ if (aead == NULL)
+ goto error;
+
+ memcpy(aead, test_vector->aad.data, test_vector->aad.length);
+ }
+
+ return mbuf;
+error:
+ if (mbuf != NULL)
+ rte_pktmbuf_free(mbuf);
+
+ return NULL;
+}
+
+void *
+cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_latency_ctx *ctx = NULL;
+ unsigned int mbuf_idx = 0;
+ char pool_name[32] = "";
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_latency_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ if (ctx->sess == NULL)
+ goto err;
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
+ options->pool_sz * options->segments_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_in == NULL)
+ goto err;
+
+ /* Generate mbufs_in with plaintext populated for test */
+ ctx->mbufs_in = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_in, options->segments_nb,
+ options, test_vector);
+ if (ctx->mbufs_in[mbuf_idx] == NULL)
+ goto err;
+ }
+
+ if (options->out_of_place == 1) {
+
+ snprintf(pool_name, sizeof(pool_name),
+ "cperf_pool_out_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
+ pool_name, options->pool_sz, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ options->max_buffer_size +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_out == NULL)
+ goto err;
+ }
+
+ ctx->mbufs_out = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ if (options->out_of_place == 1) {
+ ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_out, 1,
+ options, test_vector);
+ if (ctx->mbufs_out[mbuf_idx] == NULL)
+ goto err;
+ } else {
+ ctx->mbufs_out[mbuf_idx] = NULL;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
+ dev_id);
+
+ ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+ rte_socket_id());
+ if (ctx->crypto_op_pool == NULL)
+ goto err;
+
+ ctx->res = rte_malloc(NULL, sizeof(struct cperf_op_result) *
+ ctx->options->total_ops, 0);
+
+ if (ctx->res == NULL)
+ goto err;
+
+ return ctx;
+err:
+ cperf_latency_test_free(ctx, mbuf_idx);
+
+ return NULL;
+}
+
+int
+cperf_latency_test_runner(void *arg)
+{
+ struct cperf_latency_ctx *ctx = arg;
+ struct cperf_op_result *pres;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
+
+ static int only_once;
+
+ if (ctx == NULL)
+ return 0;
+
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
+ uint64_t i;
+
+ uint32_t lcore = rte_lcore_id();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+ int linearize = 0;
+
+ /* Check if source mbufs require coalescing */
+ if (ctx->options->segments_nb > 1) {
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ linearize = 1;
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ ctx->lcore_id = lcore;
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* Get first size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size = ctx->options->min_burst_size;
+ else
+ test_burst_size = ctx->options->burst_size_list[0];
+
+ while (test_burst_size <= ctx->options->max_burst_size) {
+ uint64_t ops_enqd = 0, ops_deqd = 0;
+ uint64_t m_idx = 0, b_idx = 0;
+
+ uint64_t tsc_val, tsc_end, tsc_start;
+ uint64_t tsc_max = 0, tsc_min = ~0UL, tsc_tot = 0, tsc_idx = 0;
+ uint64_t enqd_max = 0, enqd_min = ~0UL, enqd_tot = 0;
+ uint64_t deqd_max = 0, deqd_min = ~0UL, deqd_tot = 0;
+
+ while (enqd_tot < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((enqd_tot + test_burst_size)
+ <= ctx->options->total_ops) ?
+ test_burst_size :
+ ctx->options->total_ops -
+ enqd_tot;
+
+ /* Allocate crypto ops from pool */
+ if (burst_size != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, burst_size))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ burst_size, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+ tsc_start = rte_rdtsc_precise();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ /* Free memory for not enqueued operations */
+ for (i = ops_enqd; i < burst_size; i++)
+ rte_crypto_op_free(ops[i]);
+
+ for (i = 0; i < ops_enqd; i++) {
+ ctx->res[tsc_idx].tsc_start = tsc_start;
+ ops[i]->opaque_data = (void *)&ctx->res[tsc_idx];
+ tsc_idx++;
+ }
+
+ if (likely(ops_deqd)) {
+ /*
+ * free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
+
+ rte_crypto_op_free(ops_processed[i]);
+ }
+
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
+ }
+
+ enqd_tot += ops_enqd;
+ enqd_max = max(ops_enqd, enqd_max);
+ enqd_min = min(ops_enqd, enqd_min);
+
+ m_idx += ops_enqd;
+ m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+ 0 : m_idx;
+ b_idx++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (deqd_tot < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+
+ tsc_end = rte_rdtsc_precise();
+
+ if (ops_deqd != 0) {
+ for (i = 0; i < ops_deqd; i++) {
+ pres = (struct cperf_op_result *)
+ (ops_processed[i]->opaque_data);
+ pres->status = ops_processed[i]->status;
+ pres->tsc_end = tsc_end;
+
+ rte_crypto_op_free(ops_processed[i]);
+ }
+
+ deqd_tot += ops_deqd;
+ deqd_max = max(ops_deqd, deqd_max);
+ deqd_min = min(ops_deqd, deqd_min);
+ }
+ }
+
+ for (i = 0; i < tsc_idx; i++) {
+ tsc_val = ctx->res[i].tsc_end - ctx->res[i].tsc_start;
+ tsc_max = max(tsc_val, tsc_max);
+ tsc_min = min(tsc_val, tsc_min);
+ tsc_tot += tsc_val;
+ }
+
+ double time_tot, time_avg, time_max, time_min;
+
+ const uint64_t tunit = 1000000; /* us */
+ const uint64_t tsc_hz = rte_get_tsc_hz();
+
+ uint64_t enqd_avg = enqd_tot / b_idx;
+ uint64_t deqd_avg = deqd_tot / b_idx;
+ uint64_t tsc_avg = tsc_tot / tsc_idx;
+
+ time_tot = tunit*(double)(tsc_tot) / tsc_hz;
+ time_avg = tunit*(double)(tsc_avg) / tsc_hz;
+ time_max = tunit*(double)(tsc_max) / tsc_hz;
+ time_min = tunit*(double)(tsc_min) / tsc_hz;
+
+ if (ctx->options->csv) {
+ if (!only_once)
+ printf("\n# lcore, Buffer Size, Burst Size, Pakt Seq #, "
+ "Packet Size, cycles, time (us)");
+
+ for (i = 0; i < ctx->options->total_ops; i++) {
+
+ printf("\n%u;%u;%u;%"PRIu64";%"PRIu64";%.3f",
+ ctx->lcore_id, ctx->options->test_buffer_size,
+ test_burst_size, i + 1,
+ ctx->res[i].tsc_end - ctx->res[i].tsc_start,
+ tunit * (double) (ctx->res[i].tsc_end
+ - ctx->res[i].tsc_start)
+ / tsc_hz);
+
+ }
+ only_once = 1;
+ } else {
+ printf("\n# Device %d on lcore %u\n", ctx->dev_id,
+ ctx->lcore_id);
+ printf("\n# total operations: %u", ctx->options->total_ops);
+ printf("\n# Buffer size: %u", ctx->options->test_buffer_size);
+ printf("\n# Burst size: %u", test_burst_size);
+ printf("\n# Number of bursts: %"PRIu64,
+ b_idx);
+
+ printf("\n#");
+ printf("\n# \t Total\t Average\t "
+ "Maximum\t Minimum");
+ printf("\n# enqueued\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, enqd_tot,
+ enqd_avg, enqd_max, enqd_min);
+ printf("\n# dequeued\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, deqd_tot,
+ deqd_avg, deqd_max, deqd_min);
+ printf("\n# cycles\t%12"PRIu64"\t%10"PRIu64"\t"
+ "%10"PRIu64"\t%10"PRIu64, tsc_tot,
+ tsc_avg, tsc_max, tsc_min);
+ printf("\n# time [us]\t%12.0f\t%10.3f\t%10.3f\t%10.3f",
+ time_tot, time_avg, time_max, time_min);
+ printf("\n\n");
+
+ }
+
+ /* Get next size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size += ctx->options->inc_burst_size;
+ else {
+ if (++burst_size_idx == ctx->options->burst_size_count)
+ break;
+ test_burst_size =
+ ctx->options->burst_size_list[burst_size_idx];
+ }
+ }
+
+ return 0;
+}
+
+void
+cperf_latency_test_destructor(void *arg)
+{
+ struct cperf_latency_ctx *ctx = arg;
+
+ if (ctx == NULL)
+ return;
+
+ cperf_latency_test_free(ctx, ctx->options->pool_sz);
+
+}
diff --git a/app/test-crypto-perf/cperf_test_latency.h b/app/test-crypto-perf/cperf_test_latency.h
new file mode 100644
index 00000000..6a2cf610
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_latency.h
@@ -0,0 +1,57 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_LATENCY_
+#define _CPERF_LATENCY_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+void *
+cperf_latency_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_latency_test_runner(void *test_ctx);
+
+void
+cperf_latency_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_LATENCY_ */
diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c
new file mode 100644
index 00000000..61b27ea5
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_throughput.c
@@ -0,0 +1,518 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#include "cperf_test_throughput.h"
+#include "cperf_ops.h"
+
+struct cperf_throughput_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pkt_mbuf_pool_in;
+ struct rte_mempool *pkt_mbuf_pool_out;
+ struct rte_mbuf **mbufs_in;
+ struct rte_mbuf **mbufs_out;
+
+ struct rte_mempool *crypto_op_pool;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+};
+
+static void
+cperf_throughput_test_free(struct cperf_throughput_ctx *ctx, uint32_t mbuf_nb)
+{
+ uint32_t i;
+
+ if (ctx) {
+ if (ctx->sess)
+ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+
+ if (ctx->mbufs_in) {
+ for (i = 0; i < mbuf_nb; i++)
+ rte_pktmbuf_free(ctx->mbufs_in[i]);
+
+ rte_free(ctx->mbufs_in);
+ }
+
+ if (ctx->mbufs_out) {
+ for (i = 0; i < mbuf_nb; i++) {
+ if (ctx->mbufs_out[i] != NULL)
+ rte_pktmbuf_free(ctx->mbufs_out[i]);
+ }
+
+ rte_free(ctx->mbufs_out);
+ }
+
+ if (ctx->pkt_mbuf_pool_in)
+ rte_mempool_free(ctx->pkt_mbuf_pool_in);
+
+ if (ctx->pkt_mbuf_pool_out)
+ rte_mempool_free(ctx->pkt_mbuf_pool_out);
+
+ if (ctx->crypto_op_pool)
+ rte_mempool_free(ctx->crypto_op_pool);
+
+ rte_free(ctx);
+ }
+}
+
+static struct rte_mbuf *
+cperf_mbuf_create(struct rte_mempool *mempool,
+ uint32_t segments_nb,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_mbuf *mbuf;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+
+ mbuf = rte_pktmbuf_alloc(mempool);
+ if (mbuf == NULL)
+ goto error;
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+
+ while (segments_nb) {
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mempool);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+ }
+
+ if (last_sz) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, last_sz);
+ }
+
+ if (options->op_type != CPERF_CIPHER_ONLY) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
+ options->auth_digest_sz);
+ if (mbuf_data == NULL)
+ goto error;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+
+ if (aead == NULL)
+ goto error;
+
+ memcpy(aead, test_vector->aad.data, test_vector->aad.length);
+ }
+
+ return mbuf;
+error:
+ if (mbuf != NULL)
+ rte_pktmbuf_free(mbuf);
+
+ return NULL;
+}
+
+void *
+cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_throughput_ctx *ctx = NULL;
+ unsigned int mbuf_idx = 0;
+ char pool_name[32] = "";
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_throughput_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ if (ctx->sess == NULL)
+ goto err;
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
+ options->pool_sz * options->segments_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_in == NULL)
+ goto err;
+
+ /* Generate mbufs_in with plaintext populated for test */
+ ctx->mbufs_in = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_in, options->segments_nb,
+ options, test_vector);
+ if (ctx->mbufs_in[mbuf_idx] == NULL)
+ goto err;
+ }
+
+ if (options->out_of_place == 1) {
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
+ pool_name, options->pool_sz, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ options->max_buffer_size +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_out == NULL)
+ goto err;
+ }
+
+ ctx->mbufs_out = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ if (options->out_of_place == 1) {
+ ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_out, 1,
+ options, test_vector);
+ if (ctx->mbufs_out[mbuf_idx] == NULL)
+ goto err;
+ } else {
+ ctx->mbufs_out[mbuf_idx] = NULL;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
+ dev_id);
+
+ ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+ rte_socket_id());
+ if (ctx->crypto_op_pool == NULL)
+ goto err;
+
+ return ctx;
+err:
+ cperf_throughput_test_free(ctx, mbuf_idx);
+
+ return NULL;
+}
+
+int
+cperf_throughput_test_runner(void *test_ctx)
+{
+ struct cperf_throughput_ctx *ctx = test_ctx;
+ uint16_t test_burst_size;
+ uint8_t burst_size_idx = 0;
+
+ static int only_once;
+
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
+ uint64_t i;
+
+ uint32_t lcore = rte_lcore_id();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+ int linearize = 0;
+
+ /* Check if source mbufs require coalescing */
+ if (ctx->options->segments_nb > 1) {
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ linearize = 1;
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ ctx->lcore_id = lcore;
+
+ /* Warm up the host CPU before starting the test */
+ for (i = 0; i < ctx->options->total_ops; i++)
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* Get first size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size = ctx->options->min_burst_size;
+ else
+ test_burst_size = ctx->options->burst_size_list[0];
+
+ while (test_burst_size <= ctx->options->max_burst_size) {
+ uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+ uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+
+ uint64_t m_idx = 0, tsc_start, tsc_end, tsc_duration;
+
+ uint16_t ops_unused = 0;
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (ops_enqd_total < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((ops_enqd_total + test_burst_size)
+ <= ctx->options->total_ops) ?
+ test_burst_size :
+ ctx->options->total_ops -
+ ops_enqd_total;
+
+ uint16_t ops_needed = burst_size - ops_unused;
+
+ /* Allocate crypto ops from pool */
+ if (ops_needed != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, ops_needed))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ ops_needed, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+ /**
+ * When ops_needed is smaller than ops_enqd, the
+ * unused ops need to be moved to the front for
+ * next round use.
+ */
+ if (unlikely(ops_enqd > ops_needed)) {
+ size_t nb_b_to_mov = ops_unused * sizeof(
+ struct rte_crypto_op *);
+
+ memmove(&ops[ops_needed], &ops[ops_enqd],
+ nb_b_to_mov);
+ }
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+ if (ops_enqd < burst_size)
+ ops_enqd_failed++;
+
+ /**
+ * Calculate number of ops not enqueued (mainly for hw
+ * accelerators whose ingress queue can fill up).
+ */
+ ops_unused = burst_size - ops_enqd;
+ ops_enqd_total += ops_enqd;
+
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+
+ if (likely(ops_deqd)) {
+ /* free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
+
+ ops_deqd_total += ops_deqd;
+ } else {
+ /**
+ * Count dequeue polls which didn't return any
+ * processed operations. This statistic is mainly
+ * relevant to hw accelerators.
+ */
+ ops_deqd_failed++;
+ }
+
+ m_idx += ops_needed;
+ m_idx = m_idx + test_burst_size > ctx->options->pool_sz ?
+ 0 : m_idx;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+
+ while (ops_deqd_total < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, test_burst_size);
+ if (ops_deqd == 0)
+ ops_deqd_failed++;
+ else {
+ for (i = 0; i < ops_deqd; i++)
+ rte_crypto_op_free(ops_processed[i]);
+
+ ops_deqd_total += ops_deqd;
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+ tsc_duration = (tsc_end - tsc_start);
+
+ /* Calculate average operations processed per second */
+ double ops_per_second = ((double)ctx->options->total_ops /
+ tsc_duration) * rte_get_tsc_hz();
+
+ /* Calculate average throughput (Gbps) in bits per second */
+ double throughput_gbps = ((ops_per_second *
+ ctx->options->test_buffer_size * 8) / 1000000000);
+
+ /* Calculate average cycles per packet */
+ double cycles_per_packet = ((double)tsc_duration /
+ ctx->options->total_ops);
+
+ if (!ctx->options->csv) {
+ if (!only_once)
+ printf("%12s%12s%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+ "lcore id", "Buf Size", "Burst Size",
+ "Enqueued", "Dequeued", "Failed Enq",
+ "Failed Deq", "MOps", "Gbps",
+ "Cycles/Buf");
+ only_once = 1;
+
+ printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+ "%12"PRIu64"%12.4f%12.4f%12.2f\n",
+ ctx->lcore_id,
+ ctx->options->test_buffer_size,
+ test_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_per_second/1000000,
+ throughput_gbps,
+ cycles_per_packet);
+ } else {
+ if (!only_once)
+ printf("# lcore id, Buffer Size(B),"
+ "Burst Size,Enqueued,Dequeued,Failed Enq,"
+ "Failed Deq,Ops(Millions),Throughput(Gbps),"
+ "Cycles/Buf\n\n");
+ only_once = 1;
+
+ printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%.f3;%.f3;%.f3\n",
+ ctx->lcore_id,
+ ctx->options->test_buffer_size,
+ test_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_per_second/1000000,
+ throughput_gbps,
+ cycles_per_packet);
+ }
+
+ /* Get next size from range or list */
+ if (ctx->options->inc_burst_size != 0)
+ test_burst_size += ctx->options->inc_burst_size;
+ else {
+ if (++burst_size_idx == ctx->options->burst_size_count)
+ break;
+ test_burst_size = ctx->options->burst_size_list[burst_size_idx];
+ }
+
+ }
+
+ return 0;
+}
+
+
+void
+cperf_throughput_test_destructor(void *arg)
+{
+ struct cperf_throughput_ctx *ctx = arg;
+
+ if (ctx == NULL)
+ return;
+
+ cperf_throughput_test_free(ctx, ctx->options->pool_sz);
+}
diff --git a/app/test-crypto-perf/cperf_test_throughput.h b/app/test-crypto-perf/cperf_test_throughput.h
new file mode 100644
index 00000000..f1b5766c
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_throughput.h
@@ -0,0 +1,58 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_THROUGHPUT_
+#define _CPERF_THROUGHPUT_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+void *
+cperf_throughput_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_throughput_test_runner(void *test_ctx);
+
+void
+cperf_throughput_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_THROUGHPUT_ */
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
new file mode 100644
index 00000000..f384e3d9
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -0,0 +1,507 @@
+#ifdef RTE_EXEC_ENV_BSDAPP
+ #define _WITH_GETLINE
+#endif
+#include <stdio.h>
+
+#include <rte_malloc.h>
+
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+#include "cperf_test_vector_parsing.h"
+
+int
+free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts)
+{
+ if (vector == NULL || opts == NULL)
+ return -1;
+
+ rte_free(vector->iv.data);
+ rte_free(vector->aad.data);
+ rte_free(vector->digest.data);
+
+ if (opts->test_file != NULL) {
+ rte_free(vector->plaintext.data);
+ rte_free(vector->cipher_key.data);
+ rte_free(vector->auth_key.data);
+ rte_free(vector->ciphertext.data);
+ }
+
+ rte_free(vector);
+
+ return 0;
+}
+
+void
+show_test_vector(struct cperf_test_vector *test_vector)
+{
+ const uint8_t wrap = 32;
+ uint32_t i;
+
+ if (test_vector == NULL)
+ return;
+
+ if (test_vector->plaintext.data) {
+ printf("\nplaintext =\n");
+ for (i = 0; i < test_vector->plaintext.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == test_vector->plaintext.length - 1)
+ printf("0x%02x",
+ test_vector->plaintext.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->plaintext.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->cipher_key.data) {
+ printf("\ncipher_key =\n");
+ for (i = 0; i < test_vector->cipher_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->cipher_key.length - 1))
+ printf("0x%02x",
+ test_vector->cipher_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->cipher_key.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->auth_key.data) {
+ printf("\nauth_key =\n");
+ for (i = 0; i < test_vector->auth_key.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->auth_key.length - 1))
+ printf("0x%02x", test_vector->auth_key.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->auth_key.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->iv.data) {
+ printf("\niv =\n");
+ for (i = 0; i < test_vector->iv.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->iv.length - 1))
+ printf("0x%02x", test_vector->iv.data[i]);
+ else
+ printf("0x%02x, ", test_vector->iv.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->ciphertext.data) {
+ printf("\nciphertext =\n");
+ for (i = 0; i < test_vector->ciphertext.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == test_vector->ciphertext.length - 1)
+ printf("0x%02x",
+ test_vector->ciphertext.data[i]);
+ else
+ printf("0x%02x, ",
+ test_vector->ciphertext.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->aad.data) {
+ printf("\naad =\n");
+ for (i = 0; i < test_vector->aad.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->aad.length - 1))
+ printf("0x%02x", test_vector->aad.data[i]);
+ else
+ printf("0x%02x, ", test_vector->aad.data[i]);
+ }
+ printf("\n");
+ }
+
+ if (test_vector->digest.data) {
+ printf("\ndigest =\n");
+ for (i = 0; i < test_vector->digest.length; ++i) {
+ if ((i % wrap == 0) && (i != 0))
+ printf("\n");
+ if (i == (uint32_t)(test_vector->digest.length - 1))
+ printf("0x%02x", test_vector->digest.data[i]);
+ else
+ printf("0x%02x, ", test_vector->digest.data[i]);
+ }
+ printf("\n");
+ }
+}
+
+/* trim leading and trailing spaces */
+static char *
+trim_space(char *str)
+{
+ char *start, *end;
+
+ for (start = str; *start; start++) {
+ if (!isspace((unsigned char) start[0]))
+ break;
+ }
+
+ for (end = start + strlen(start); end > start + 1; end--) {
+ if (!isspace((unsigned char) end[-1]))
+ break;
+ }
+
+ *end = 0;
+
+ /* Shift from "start" to the beginning of the string */
+ if (start > str)
+ memmove(str, start, (end - start) + 1);
+
+ return str;
+}
+
+/* tokenization test values separated by a comma */
+static int
+parse_values(char *tokens, uint8_t **data, uint32_t *data_length)
+{
+ uint32_t n_tokens;
+ uint32_t data_size = 32;
+
+ uint8_t *values, *values_resized;
+ char *tok, *error = NULL;
+
+ tok = strtok(tokens, CPERF_VALUE_DELIMITER);
+ if (tok == NULL)
+ return -1;
+
+ values = (uint8_t *) rte_zmalloc(NULL, sizeof(uint8_t) * data_size, 0);
+ if (values == NULL)
+ return -1;
+
+ n_tokens = 0;
+ while (tok != NULL) {
+ values_resized = NULL;
+
+ if (n_tokens >= data_size) {
+ data_size *= 2;
+
+ values_resized = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * data_size, 0);
+ if (values_resized == NULL) {
+ rte_free(values);
+ return -1;
+ }
+ values = values_resized;
+ }
+
+ values[n_tokens] = (uint8_t) strtoul(tok, &error, 0);
+ if ((error == NULL) || (*error != '\0')) {
+ printf("Failed with convert '%s'\n", tok);
+ rte_free(values);
+ return -1;
+ }
+
+ tok = strtok(NULL, CPERF_VALUE_DELIMITER);
+ if (tok == NULL)
+ break;
+
+ n_tokens++;
+ }
+
+ values_resized = (uint8_t *) rte_realloc(values,
+ sizeof(uint8_t) * (n_tokens + 1), 0);
+
+ if (values_resized == NULL) {
+ rte_free(values);
+ return -1;
+ }
+
+ *data = values_resized;
+ *data_length = n_tokens + 1;
+
+ return 0;
+}
+
+/* checks the type of key and assigns data */
+static int
+parse_entry(char *entry, struct cperf_test_vector *vector,
+ struct cperf_options *opts, uint8_t tc_found)
+{
+ int status;
+ uint32_t data_length;
+
+ uint8_t *data = NULL;
+ char *token, *key_token;
+
+ if (entry == NULL) {
+ printf("Expected entry value\n");
+ return -1;
+ }
+
+ /* get key */
+ token = strtok(entry, CPERF_ENTRY_DELIMITER);
+ key_token = token;
+ /* get values for key */
+ token = strtok(NULL, CPERF_ENTRY_DELIMITER);
+
+ if (key_token == NULL || token == NULL) {
+ printf("Expected 'key = values' but was '%.40s'..\n", entry);
+ return -1;
+ }
+
+ status = parse_values(token, &data, &data_length);
+ if (status)
+ return -1;
+
+ /* compare keys */
+ if (strstr(key_token, "plaintext")) {
+ rte_free(vector->plaintext.data);
+ vector->plaintext.data = data;
+ if (tc_found)
+ vector->plaintext.length = data_length;
+ else {
+ if (opts->max_buffer_size > data_length) {
+ printf("Global plaintext shorter than "
+ "buffer_sz\n");
+ return -1;
+ }
+ vector->plaintext.length = opts->max_buffer_size;
+ }
+
+ } else if (strstr(key_token, "cipher_key")) {
+ rte_free(vector->cipher_key.data);
+ vector->cipher_key.data = data;
+ if (tc_found)
+ vector->cipher_key.length = data_length;
+ else {
+ if (opts->cipher_key_sz > data_length) {
+ printf("Global cipher_key shorter than "
+ "cipher_key_sz\n");
+ return -1;
+ }
+ vector->cipher_key.length = opts->cipher_key_sz;
+ }
+
+ } else if (strstr(key_token, "auth_key")) {
+ rte_free(vector->auth_key.data);
+ vector->auth_key.data = data;
+ if (tc_found)
+ vector->auth_key.length = data_length;
+ else {
+ if (opts->auth_key_sz > data_length) {
+ printf("Global auth_key shorter than "
+ "auth_key_sz\n");
+ return -1;
+ }
+ vector->auth_key.length = opts->auth_key_sz;
+ }
+
+ } else if (strstr(key_token, "iv")) {
+ rte_free(vector->iv.data);
+ vector->iv.data = data;
+ vector->iv.phys_addr = rte_malloc_virt2phy(vector->iv.data);
+ if (tc_found)
+ vector->iv.length = data_length;
+ else {
+ if (opts->cipher_iv_sz > data_length) {
+ printf("Global iv shorter than "
+ "cipher_iv_sz\n");
+ return -1;
+ }
+ vector->iv.length = opts->cipher_iv_sz;
+ }
+
+ } else if (strstr(key_token, "ciphertext")) {
+ rte_free(vector->ciphertext.data);
+ vector->ciphertext.data = data;
+ if (tc_found)
+ vector->ciphertext.length = data_length;
+ else {
+ if (opts->max_buffer_size > data_length) {
+ printf("Global ciphertext shorter than "
+ "buffer_sz\n");
+ return -1;
+ }
+ vector->ciphertext.length = opts->max_buffer_size;
+ }
+
+ } else if (strstr(key_token, "aad")) {
+ rte_free(vector->aad.data);
+ vector->aad.data = data;
+ vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ if (tc_found)
+ vector->aad.length = data_length;
+ else {
+ if (opts->auth_aad_sz > data_length) {
+ printf("Global aad shorter than "
+ "auth_aad_sz\n");
+ return -1;
+ }
+ vector->aad.length = opts->auth_aad_sz;
+ }
+
+ } else if (strstr(key_token, "digest")) {
+ rte_free(vector->digest.data);
+ vector->digest.data = data;
+ vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.data);
+ if (tc_found)
+ vector->digest.length = data_length;
+ else {
+ if (opts->auth_digest_sz > data_length) {
+ printf("Global digest shorter than "
+ "auth_digest_sz\n");
+ return -1;
+ }
+ vector->digest.length = opts->auth_digest_sz;
+ }
+ } else {
+ printf("Not valid key: '%s'\n", trim_space(key_token));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* searches in the file for test keys and values */
+static int
+parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
+{
+ uint8_t tc_found = 0;
+ uint8_t tc_data_start = 0;
+ ssize_t read;
+ size_t len = 0;
+ int status = 0;
+
+ FILE *fp;
+ char *line = NULL;
+ char *entry = NULL;
+
+ fp = fopen(opts->test_file, "r");
+ if (fp == NULL) {
+ printf("File %s does not exists\n", opts->test_file);
+ return -1;
+ }
+
+ while ((read = getline(&line, &len, fp)) != -1) {
+
+ /* ignore comments and new lines */
+ if (line[0] == '#' || line[0] == '/' || line[0] == '\n'
+ || line[0] == '\r' || line[0] == ' ')
+ continue;
+
+ trim_space(line);
+
+ /* next test case is started */
+ if (line[0] == '[' && line[strlen(line) - 1] == ']' && tc_found)
+ break;
+ /* test case section started, end of global data */
+ else if (line[0] == '[' && line[strlen(line) - 1] == ']')
+ tc_data_start = 1;
+
+ /* test name unspecified, end after global data */
+ if (tc_data_start && opts->test_name == NULL)
+ break;
+ /* searching for a suitable test */
+ else if (tc_data_start && tc_found == 0) {
+ if (!strcmp(line, opts->test_name)) {
+ tc_found = 1;
+ continue;
+ } else
+ continue;
+ }
+
+ /* buffer for multiline */
+ entry = (char *) rte_realloc(entry,
+ sizeof(char) * strlen(line) + 1, 0);
+ if (entry == NULL)
+ return -1;
+
+ memset(entry, 0, strlen(line) + 1);
+ strncpy(entry, line, strlen(line));
+
+ /* check if entry ends with , or = */
+ if (entry[strlen(entry) - 1] == ','
+ || entry[strlen(entry) - 1] == '=') {
+ while ((read = getline(&line, &len, fp)) != -1) {
+ trim_space(line);
+
+ /* extend entry about length of new line */
+ char *entry_extended = (char *) rte_realloc(
+ entry, sizeof(char)
+ * (strlen(line) + strlen(entry))
+ + 1, 0);
+
+ if (entry_extended == NULL)
+ goto err;
+ entry = entry_extended;
+
+ strncat(entry, line, strlen(line));
+
+ if (entry[strlen(entry) - 1] != ',')
+ break;
+ }
+ }
+ status = parse_entry(entry, vector, opts, tc_found);
+ if (status) {
+ printf("An error occurred while parsing!\n");
+ goto err;
+ }
+ }
+
+ if (tc_found == 0 && opts->test_name != NULL) {
+ printf("Not found '%s' case in test file\n", opts->test_name);
+ goto err;
+ }
+
+ fclose(fp);
+ free(line);
+ rte_free(entry);
+
+ return 0;
+
+err:
+ if (fp)
+ fclose(fp);
+ if (line)
+ free(line);
+ if (entry)
+ rte_free(entry);
+
+ return -1;
+}
+
+struct cperf_test_vector*
+cperf_test_vector_get_from_file(struct cperf_options *opts)
+{
+ int status;
+ struct cperf_test_vector *test_vector = NULL;
+
+ if (opts == NULL || opts->test_file == NULL)
+ return test_vector;
+
+ test_vector = (struct cperf_test_vector *) rte_zmalloc(NULL,
+ sizeof(struct cperf_test_vector), 0);
+ if (test_vector == NULL)
+ return test_vector;
+
+ /* filling the vector with data from a file */
+ status = parse_file(test_vector, opts);
+ if (status) {
+ free_test_vector(test_vector, opts);
+ return NULL;
+ }
+
+ /* other values not included in the file */
+ test_vector->data.cipher_offset = 0;
+ test_vector->data.cipher_length = opts->max_buffer_size;
+
+ test_vector->data.auth_offset = 0;
+ test_vector->data.auth_length = opts->max_buffer_size;
+
+ return test_vector;
+}
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.h b/app/test-crypto-perf/cperf_test_vector_parsing.h
new file mode 100644
index 00000000..e3df98bd
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.h
@@ -0,0 +1,73 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_
+#define APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_
+
+#define CPERF_VALUE_DELIMITER ","
+#define CPERF_ENTRY_DELIMITER "="
+
+/**
+ * Frees the allocated memory for test vector
+ *
+ * @param vector
+ * Destination vector test to release
+ * @param opts
+ * Test options
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int
+free_test_vector(struct cperf_test_vector *vector, struct cperf_options *opts);
+
+/**
+ * Displays data in test vector
+ *
+ * @param vector
+ * Vector to display
+ */
+void
+show_test_vector(struct cperf_test_vector *test_vector);
+
+/**
+ * Completes test vector with data from file
+ *
+ * @param opts
+ * Test options
+ * @return
+ * NULL on error.
+ * Test vector pointer on successful.
+ */
+struct cperf_test_vector*
+cperf_test_vector_get_from_file(struct cperf_options *opts);
+
+#endif /* APP_CRYPTO_PERF_CPERF_TEST_VECTOR_PARSING_H_ */
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
new file mode 100644
index 00000000..757957f7
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -0,0 +1,500 @@
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "cperf_test_vectors.h"
+
+uint8_t plaintext[2048] = {
+ 0x71, 0x75, 0x83, 0x98, 0x75, 0x42, 0x51, 0x09, 0x94, 0x02, 0x13, 0x20,
+ 0x15, 0x64, 0x46, 0x32, 0x08, 0x18, 0x91, 0x82, 0x86, 0x52, 0x23, 0x93,
+ 0x44, 0x54, 0x28, 0x68, 0x78, 0x78, 0x70, 0x06, 0x42, 0x74, 0x41, 0x27,
+ 0x73, 0x38, 0x53, 0x77, 0x51, 0x96, 0x53, 0x24, 0x03, 0x88, 0x74, 0x14,
+ 0x70, 0x23, 0x88, 0x30, 0x85, 0x18, 0x89, 0x27, 0x41, 0x71, 0x61, 0x23,
+ 0x04, 0x83, 0x30, 0x57, 0x26, 0x47, 0x23, 0x75, 0x25, 0x62, 0x53, 0x80,
+ 0x38, 0x34, 0x21, 0x33, 0x34, 0x51, 0x46, 0x29, 0x94, 0x64, 0x22, 0x67,
+ 0x25, 0x45, 0x70, 0x26, 0x74, 0x39, 0x46, 0x71, 0x08, 0x85, 0x27, 0x18,
+ 0x93, 0x39, 0x72, 0x11, 0x57, 0x26, 0x88, 0x46, 0x47, 0x49, 0x86, 0x92,
+ 0x03, 0x37, 0x96, 0x40, 0x84, 0x53, 0x67, 0x47, 0x60, 0x60, 0x37, 0x67,
+ 0x02, 0x68, 0x76, 0x62, 0x42, 0x01, 0x59, 0x11, 0x01, 0x89, 0x40, 0x87,
+ 0x58, 0x20, 0x51, 0x21, 0x66, 0x26, 0x26, 0x73, 0x03, 0x06, 0x14, 0x25,
+ 0x98, 0x42, 0x44, 0x67, 0x24, 0x78, 0x71, 0x45, 0x32, 0x61, 0x20, 0x26,
+ 0x08, 0x88, 0x44, 0x26, 0x40, 0x63, 0x76, 0x23, 0x78, 0x55, 0x81, 0x97,
+ 0x95, 0x89, 0x39, 0x07, 0x14, 0x50, 0x50, 0x73, 0x07, 0x20, 0x86, 0x83,
+ 0x74, 0x57, 0x72, 0x36, 0x68, 0x61, 0x14, 0x41, 0x56, 0x49, 0x64, 0x72,
+ 0x75, 0x81, 0x47, 0x91, 0x08, 0x76, 0x47, 0x06, 0x55, 0x77, 0x61, 0x45,
+ 0x50, 0x10, 0x07, 0x46, 0x46, 0x89, 0x80, 0x07, 0x24, 0x95, 0x39, 0x43,
+ 0x03, 0x75, 0x24, 0x35, 0x57, 0x82, 0x09, 0x64, 0x29, 0x24, 0x26, 0x66,
+ 0x67, 0x29, 0x05, 0x90, 0x82, 0x02, 0x45, 0x71, 0x21, 0x34, 0x25, 0x48,
+ 0x68, 0x26, 0x01, 0x18, 0x73, 0x18, 0x46, 0x15, 0x14, 0x33, 0x28, 0x44,
+ 0x24, 0x82, 0x20, 0x12, 0x99, 0x43, 0x68, 0x43, 0x25, 0x14, 0x34, 0x33,
+ 0x31, 0x13, 0x77, 0x44, 0x95, 0x22, 0x99, 0x02, 0x30, 0x50, 0x74, 0x43,
+ 0x81, 0x78, 0x32, 0x17, 0x09, 0x85, 0x04, 0x37, 0x31, 0x98, 0x76, 0x79,
+ 0x64, 0x10, 0x39, 0x89, 0x59, 0x90, 0x50, 0x15, 0x77, 0x39, 0x28, 0x14,
+ 0x30, 0x19, 0x68, 0x77, 0x89, 0x48, 0x86, 0x16, 0x11, 0x33, 0x84, 0x56,
+ 0x10, 0x20, 0x94, 0x72, 0x41, 0x69, 0x13, 0x00, 0x56, 0x27, 0x01, 0x57,
+ 0x46, 0x65, 0x65, 0x19, 0x33, 0x07, 0x62, 0x19, 0x91, 0x60, 0x29, 0x11,
+ 0x41, 0x25, 0x88, 0x21, 0x93, 0x85, 0x87, 0x40, 0x91, 0x25, 0x32, 0x86,
+ 0x76, 0x54, 0x92, 0x52, 0x72, 0x46, 0x61, 0x84, 0x20, 0x14, 0x65, 0x83,
+ 0x69, 0x90, 0x80, 0x11, 0x35, 0x70, 0x42, 0x64, 0x74, 0x85, 0x15, 0x23,
+ 0x06, 0x55, 0x67, 0x49, 0x76, 0x47, 0x11, 0x95, 0x00, 0x85, 0x05, 0x12,
+ 0x58, 0x53, 0x25, 0x73, 0x62, 0x81, 0x63, 0x82, 0x32, 0x75, 0x16, 0x48,
+ 0x04, 0x96, 0x75, 0x16, 0x43, 0x83, 0x41, 0x85, 0x95, 0x67, 0x27, 0x83,
+ 0x22, 0x43, 0x02, 0x27, 0x69, 0x62, 0x78, 0x50, 0x57, 0x66, 0x99, 0x89,
+ 0x05, 0x06, 0x35, 0x86, 0x37, 0x27, 0x48, 0x46, 0x50, 0x80, 0x96, 0x40,
+ 0x42, 0x36, 0x21, 0x54, 0x49, 0x18, 0x63, 0x38, 0x45, 0x76, 0x23, 0x20,
+ 0x28, 0x06, 0x17, 0x32, 0x58, 0x50, 0x49, 0x54, 0x29, 0x46, 0x18, 0x12,
+ 0x17, 0x50, 0x02, 0x80, 0x99, 0x53, 0x15, 0x02, 0x07, 0x14, 0x19, 0x60,
+ 0x56, 0x43, 0x76, 0x71, 0x49, 0x99, 0x54, 0x83, 0x28, 0x94, 0x30, 0x30,
+ 0x57, 0x05, 0x89, 0x80, 0x11, 0x03, 0x78, 0x35, 0x73, 0x52, 0x67, 0x39,
+ 0x67, 0x07, 0x04, 0x49, 0x23, 0x83, 0x86, 0x89, 0x57, 0x71, 0x08, 0x41,
+ 0x15, 0x97, 0x19, 0x72, 0x03, 0x27, 0x72, 0x52, 0x66, 0x67, 0x99, 0x15,
+ 0x33, 0x64, 0x69, 0x78, 0x07, 0x83, 0x53, 0x71, 0x21, 0x50, 0x05, 0x48,
+ 0x59, 0x85, 0x01, 0x36, 0x65, 0x02, 0x52, 0x01, 0x09, 0x49, 0x28, 0x77,
+ 0x25, 0x35, 0x67, 0x77, 0x81, 0x64, 0x24, 0x29, 0x42, 0x32, 0x59, 0x22,
+ 0x93, 0x48, 0x59, 0x03, 0x85, 0x87, 0x15, 0x55, 0x23, 0x42, 0x58, 0x17,
+ 0x18, 0x37, 0x70, 0x83, 0x80, 0x12, 0x44, 0x83, 0x45, 0x70, 0x55, 0x86,
+ 0x03, 0x23, 0x01, 0x56, 0x94, 0x12, 0x41, 0x34, 0x82, 0x90, 0x83, 0x46,
+ 0x17, 0x56, 0x66, 0x96, 0x75, 0x80, 0x59, 0x07, 0x15, 0x84, 0x19, 0x52,
+ 0x37, 0x44, 0x44, 0x83, 0x72, 0x43, 0x25, 0x42, 0x26, 0x86, 0x87, 0x86,
+ 0x91, 0x62, 0x14, 0x90, 0x34, 0x26, 0x14, 0x33, 0x59, 0x70, 0x73, 0x15,
+ 0x49, 0x40, 0x66, 0x88, 0x42, 0x66, 0x16, 0x42, 0x55, 0x92, 0x82, 0x06,
+ 0x20, 0x96, 0x36, 0x96, 0x13, 0x07, 0x84, 0x94, 0x37, 0x66, 0x62, 0x78,
+ 0x60, 0x58, 0x80, 0x50, 0x69, 0x03, 0x97, 0x16, 0x64, 0x45, 0x21, 0x39,
+ 0x79, 0x28, 0x52, 0x17, 0x14, 0x77, 0x31, 0x60, 0x86, 0x70, 0x09, 0x53,
+ 0x39, 0x32, 0x52, 0x31, 0x35, 0x79, 0x24, 0x70, 0x25, 0x48, 0x23, 0x49,
+ 0x10, 0x64, 0x54, 0x30, 0x82, 0x34, 0x51, 0x20, 0x46, 0x04, 0x29, 0x25,
+ 0x65, 0x09, 0x55, 0x30, 0x30, 0x52, 0x85, 0x32, 0x79, 0x19, 0x59, 0x07,
+ 0x05, 0x12, 0x11, 0x03, 0x21, 0x90, 0x36, 0x62, 0x23, 0x67, 0x36, 0x67,
+ 0x47, 0x39, 0x92, 0x88, 0x45, 0x43, 0x71, 0x16, 0x48, 0x27, 0x68, 0x39,
+ 0x98, 0x38, 0x03, 0x31, 0x85, 0x10, 0x06, 0x95, 0x54, 0x79, 0x28, 0x79,
+ 0x56, 0x16, 0x65, 0x69, 0x00, 0x54, 0x09, 0x91, 0x06, 0x10, 0x10, 0x86,
+ 0x75, 0x01, 0x02, 0x71, 0x01, 0x09, 0x32, 0x94, 0x66, 0x43, 0x68, 0x36,
+ 0x19, 0x52, 0x02, 0x04, 0x45, 0x49, 0x40, 0x94, 0x07, 0x87, 0x86, 0x79,
+ 0x84, 0x07, 0x75, 0x30, 0x73, 0x02, 0x57, 0x81, 0x65, 0x02, 0x28, 0x96,
+ 0x57, 0x07, 0x70, 0x34, 0x39, 0x35, 0x75, 0x19, 0x47, 0x57, 0x08, 0x75,
+ 0x86, 0x57, 0x11, 0x32, 0x09, 0x47, 0x83, 0x93, 0x20, 0x94, 0x90, 0x88,
+ 0x39, 0x63, 0x22, 0x88, 0x54, 0x54, 0x95, 0x75, 0x67, 0x26, 0x02, 0x49,
+ 0x26, 0x17, 0x35, 0x16, 0x27, 0x65, 0x64, 0x26, 0x93, 0x92, 0x77, 0x85,
+ 0x84, 0x40, 0x59, 0x29, 0x49, 0x69, 0x94, 0x71, 0x72, 0x21, 0x55, 0x03,
+ 0x19, 0x74, 0x09, 0x40, 0x57, 0x68, 0x41, 0x19, 0x11, 0x21, 0x63, 0x56,
+ 0x29, 0x77, 0x57, 0x81, 0x44, 0x40, 0x76, 0x77, 0x02, 0x71, 0x66, 0x35,
+ 0x89, 0x02, 0x64, 0x51, 0x61, 0x02, 0x46, 0x91, 0x38, 0x93, 0x62, 0x57,
+ 0x18, 0x98, 0x12, 0x87, 0x29, 0x48, 0x65, 0x39, 0x99, 0x45, 0x54, 0x69,
+ 0x51, 0x16, 0x25, 0x75, 0x60, 0x70, 0x33, 0x72, 0x01, 0x60, 0x26, 0x51,
+ 0x44, 0x14, 0x39, 0x12, 0x95, 0x48, 0x87, 0x33, 0x90, 0x16, 0x42, 0x78,
+ 0x48, 0x58, 0x96, 0x93, 0x75, 0x23, 0x07, 0x13, 0x86, 0x07, 0x96, 0x30,
+ 0x22, 0x82, 0x91, 0x36, 0x72, 0x16, 0x48, 0x77, 0x64, 0x99, 0x07, 0x34,
+ 0x78, 0x60, 0x61, 0x13, 0x48, 0x93, 0x46, 0x62, 0x48, 0x38, 0x37, 0x96,
+ 0x58, 0x64, 0x39, 0x90, 0x69, 0x46, 0x81, 0x98, 0x61, 0x89, 0x15, 0x59,
+ 0x78, 0x98, 0x21, 0x34, 0x00, 0x69, 0x97, 0x80, 0x28, 0x81, 0x53, 0x49,
+ 0x79, 0x53, 0x92, 0x20, 0x29, 0x40, 0x70, 0x06, 0x09, 0x55, 0x99, 0x41,
+ 0x51, 0x35, 0x55, 0x27, 0x39, 0x06, 0x29, 0x83, 0x66, 0x03, 0x68, 0x14,
+ 0x11, 0x69, 0x95, 0x51, 0x71, 0x55, 0x24, 0x60, 0x52, 0x58, 0x88, 0x11,
+ 0x88, 0x25, 0x37, 0x86, 0x01, 0x52, 0x93, 0x52, 0x02, 0x24, 0x91, 0x58,
+ 0x56, 0x37, 0x50, 0x88, 0x39, 0x09, 0x61, 0x19, 0x08, 0x86, 0x29, 0x51,
+ 0x63, 0x38, 0x81, 0x14, 0x75, 0x75, 0x39, 0x99, 0x22, 0x04, 0x32, 0x63,
+ 0x14, 0x68, 0x41, 0x79, 0x09, 0x57, 0x87, 0x29, 0x26, 0x94, 0x05, 0x71,
+ 0x82, 0x41, 0x26, 0x98, 0x68, 0x18, 0x55, 0x42, 0x78, 0x05, 0x74, 0x17,
+ 0x34, 0x34, 0x07, 0x62, 0x94, 0x72, 0x21, 0x08, 0x54, 0x72, 0x21, 0x08,
+ 0x31, 0x53, 0x82, 0x35, 0x27, 0x40, 0x85, 0x77, 0x08, 0x52, 0x58, 0x48,
+ 0x03, 0x86, 0x65, 0x51, 0x96, 0x43, 0x89, 0x19, 0x15, 0x08, 0x49, 0x62,
+ 0x57, 0x46, 0x17, 0x68, 0x56, 0x04, 0x70, 0x63, 0x75, 0x88, 0x13, 0x27,
+ 0x87, 0x44, 0x46, 0x27, 0x02, 0x97, 0x71, 0x07, 0x40, 0x17, 0x24, 0x61,
+ 0x16, 0x94, 0x86, 0x85, 0x67, 0x58, 0x87, 0x92, 0x02, 0x84, 0x75, 0x19,
+ 0x43, 0x60, 0x68, 0x03, 0x54, 0x75, 0x33, 0x17, 0x97, 0x75, 0x12, 0x62,
+ 0x43, 0x08, 0x35, 0x75, 0x32, 0x21, 0x08, 0x82, 0x78, 0x04, 0x74, 0x09,
+ 0x13, 0x48, 0x63, 0x68, 0x67, 0x09, 0x08, 0x50, 0x11, 0x71, 0x64, 0x72,
+ 0x63, 0x76, 0x21, 0x62, 0x80, 0x57, 0x19, 0x15, 0x26, 0x88, 0x02, 0x26,
+ 0x83, 0x17, 0x61, 0x76, 0x28, 0x10, 0x22, 0x37, 0x56, 0x71, 0x51, 0x60,
+ 0x12, 0x79, 0x24, 0x83, 0x78, 0x47, 0x78, 0x20, 0x52, 0x27, 0x19, 0x88,
+ 0x81, 0x04, 0x70, 0x20, 0x25, 0x10, 0x04, 0x01, 0x72, 0x57, 0x30, 0x93,
+ 0x96, 0x23, 0x02, 0x94, 0x61, 0x44, 0x17, 0x65, 0x77, 0x60, 0x27, 0x43,
+ 0x24, 0x59, 0x46, 0x76, 0x00, 0x11, 0x31, 0x99, 0x41, 0x48, 0x75, 0x32,
+ 0x05, 0x15, 0x45, 0x31, 0x57, 0x89, 0x10, 0x47, 0x53, 0x14, 0x66, 0x54,
+ 0x60, 0x55, 0x36, 0x93, 0x30, 0x03, 0x63, 0x80, 0x65, 0x43, 0x17, 0x36,
+ 0x18, 0x64, 0x21, 0x38, 0x16, 0x19, 0x19, 0x51, 0x73, 0x80, 0x38, 0x27,
+ 0x30, 0x89, 0x13, 0x43, 0x54, 0x11, 0x78, 0x05, 0x24, 0x38, 0x83, 0x56,
+ 0x50, 0x59, 0x12, 0x47, 0x69, 0x70, 0x70, 0x91, 0x28, 0x02, 0x08, 0x91,
+ 0x66, 0x09, 0x31, 0x65, 0x46, 0x20, 0x04, 0x85, 0x89, 0x53, 0x91, 0x42,
+ 0x34, 0x09, 0x36, 0x92, 0x42, 0x06, 0x87, 0x88, 0x23, 0x54, 0x87, 0x85,
+ 0x52, 0x98, 0x95, 0x76, 0x13, 0x50, 0x59, 0x89, 0x18, 0x14, 0x17, 0x47,
+ 0x10, 0x97, 0x39, 0x14, 0x33, 0x79, 0x83, 0x62, 0x55, 0x18, 0x30, 0x83,
+ 0x03, 0x45, 0x38, 0x37, 0x35, 0x20, 0x94, 0x84, 0x89, 0x80, 0x89, 0x10,
+ 0x48, 0x77, 0x33, 0x36, 0x50, 0x07, 0x93, 0x02, 0x45, 0x42, 0x91, 0x12,
+ 0x98, 0x09, 0x77, 0x20, 0x31, 0x95, 0x10, 0x29, 0x89, 0x02, 0x38, 0x92,
+ 0x90, 0x19, 0x51, 0x10, 0x19, 0x82, 0x23, 0x68, 0x06, 0x00, 0x67, 0x50,
+ 0x25, 0x03, 0x41, 0x69, 0x53, 0x42, 0x23, 0x99, 0x29, 0x21, 0x63, 0x22,
+ 0x72, 0x54, 0x72, 0x40, 0x23, 0x39, 0x74, 0x92, 0x53, 0x28, 0x67, 0x56,
+ 0x46, 0x84, 0x59, 0x85, 0x10, 0x92, 0x31, 0x20, 0x39, 0x95, 0x65, 0x15,
+ 0x76, 0x35, 0x37, 0x21, 0x98, 0x41, 0x68, 0x74, 0x94, 0x94, 0x86, 0x90,
+ 0x35, 0x07, 0x06, 0x38, 0x78, 0x32, 0x00, 0x60, 0x86, 0x12, 0x34, 0x65,
+ 0x67, 0x35, 0x76, 0x94, 0x78, 0x22, 0x99, 0x42, 0x82, 0x40, 0x05, 0x74,
+ 0x18, 0x59, 0x03, 0x83, 0x89, 0x05, 0x19, 0x28, 0x88, 0x35, 0x59, 0x10,
+ 0x12, 0x96, 0x48, 0x67, 0x59, 0x87, 0x26, 0x85, 0x74, 0x64, 0x78, 0x56,
+ 0x91, 0x81, 0x45, 0x90, 0x21, 0x80, 0x32, 0x19, 0x61, 0x38, 0x61, 0x70,
+ 0x35, 0x08, 0x93, 0x53, 0x21, 0x95, 0x08, 0x27, 0x90, 0x28, 0x94, 0x27,
+ 0x35, 0x78, 0x03, 0x57, 0x74, 0x84, 0x73, 0x63, 0x27, 0x98, 0x14, 0x21,
+ 0x22, 0x36, 0x75, 0x31, 0x81, 0x65, 0x85, 0x51, 0x02, 0x45, 0x18, 0x06,
+ 0x39, 0x13, 0x29, 0x29, 0x73, 0x26, 0x99, 0x51, 0x38, 0x43, 0x35, 0x58,
+ 0x70, 0x92, 0x32, 0x13, 0x80, 0x16, 0x26, 0x44, 0x22, 0x28, 0x05, 0x45,
+ 0x86, 0x90, 0x38, 0x19, 0x40, 0x06, 0x30, 0x56, 0x94, 0x09, 0x02, 0x02,
+ 0x96, 0x29, 0x22, 0x44, 0x87, 0x38, 0x09, 0x95, 0x58, 0x46, 0x42, 0x78,
+ 0x72, 0x77, 0x86, 0x31, 0x97, 0x19, 0x86, 0x51, 0x73, 0x76, 0x63, 0x98,
+ 0x39, 0x40, 0x20, 0x20, 0x67, 0x42, 0x55, 0x50, 0x63, 0x76, 0x81, 0x87,
+ 0x13, 0x81, 0x19, 0x54, 0x11, 0x77, 0x90, 0x26, 0x47, 0x25, 0x92, 0x88,
+ 0x18, 0x56, 0x23, 0x73, 0x91, 0x52, 0x39, 0x08, 0x59, 0x51, 0x81, 0x57,
+ 0x78, 0x17, 0x13, 0x90, 0x90, 0x50, 0x65, 0x59, 0x99, 0x77, 0x42, 0x28,
+ 0x21, 0x59, 0x97, 0x64, 0x25, 0x17, 0x92, 0x24, 0x50, 0x00, 0x28, 0x40,
+ 0x85, 0x33, 0x78, 0x86, 0x79, 0x40, 0x28, 0x30, 0x14, 0x12, 0x01, 0x72,
+ 0x41, 0x43, 0x06, 0x87, 0x67, 0x31, 0x66, 0x77, 0x07, 0x50, 0x55, 0x50,
+ 0x22, 0x80, 0x42, 0x06, 0x38, 0x01, 0x63, 0x66, 0x70, 0x12, 0x52, 0x91,
+ 0x90, 0x97, 0x21, 0x28, 0x22, 0x65, 0x02, 0x80, 0x72, 0x31, 0x17, 0x76,
+ 0x35, 0x16, 0x03, 0x56, 0x59, 0x93, 0x36, 0x37, 0x67, 0x54, 0x46, 0x87,
+ 0x29, 0x01, 0x30, 0x80, 0x47, 0x47, 0x31, 0x98, 0x34, 0x30, 0x23, 0x86,
+ 0x86, 0x14, 0x05, 0x75, 0x09, 0x88, 0x77, 0x92, 0x59, 0x43, 0x98, 0x72,
+ 0x55, 0x54, 0x25, 0x59, 0x22, 0x27, 0x21, 0x62, 0x97, 0x10, 0x61, 0x73,
+ 0x86, 0x95, 0x99, 0x10, 0x62, 0x35, 0x25, 0x16, 0x62, 0x60, 0x51, 0x48,
+ 0x69, 0x69, 0x92, 0x27, 0x19, 0x43, 0x40, 0x52, 0x70, 0x23, 0x37, 0x28,
+ 0x73, 0x10, 0x32, 0x55, 0x85, 0x46, 0x97, 0x59, 0x88, 0x48, 0x54, 0x06,
+ 0x58, 0x04, 0x82, 0x98, 0x88, 0x34, 0x05, 0x41, 0x94, 0x44, 0x35, 0x10,
+ 0x96, 0x48, 0x21, 0x17, 0x24, 0x40, 0x26, 0x15, 0x49, 0x28, 0x12, 0x17,
+ 0x10, 0x17, 0x91, 0x42, 0x84, 0x15, 0x83, 0x36, 0x29, 0x49, 0x92, 0x77,
+ 0x74, 0x11, 0x72, 0x97, 0x64, 0x53, 0x23, 0x29, 0x16, 0x35, 0x22, 0x10,
+ 0x87, 0x07, 0x44, 0x78, 0x18, 0x19, 0x79, 0x03, 0x58, 0x24, 0x15, 0x63,
+ 0x55, 0x75, 0x56, 0x14, 0x63, 0x65, 0x86, 0x61, 0x92, 0x94, 0x30, 0x92,
+ 0x69, 0x78, 0x40, 0x95, 0x19, 0x81, 0x41, 0x66, 0x97, 0x00, 0x17, 0x37,
+ 0x20, 0x82, 0x14, 0x26, 0x42, 0x63, 0x84, 0x20, 0x96, 0x11, 0x68, 0x37,
+ 0x60, 0x28, 0x69, 0x85, 0x45, 0x04, 0x62, 0x20, 0x49, 0x39, 0x74, 0x84,
+ 0x60, 0x23, 0x38, 0x33, 0x42, 0x49, 0x38, 0x82, 0x30, 0x63, 0x21, 0x51,
+ 0x69, 0x09, 0x05, 0x55, 0x78, 0x90, 0x68, 0x69, 0x22, 0x20, 0x17, 0x26,
+ 0x54, 0x01, 0x10, 0x04, 0x68, 0x19, 0x88, 0x40, 0x91, 0x74, 0x81, 0x29,
+ 0x07, 0x45, 0x33, 0x77, 0x12, 0x47, 0x08, 0x60, 0x09, 0x42, 0x84, 0x15,
+ 0x63, 0x92, 0x64, 0x77, 0x07, 0x44, 0x11, 0x07, 0x79, 0x81, 0x24, 0x05,
+ 0x21, 0x60, 0x81, 0x70, 0x66, 0x36, 0x69, 0x68, 0x45, 0x01, 0x11, 0x95,
+ 0x67, 0x95, 0x55, 0x07, 0x96, 0x63, 0x84, 0x04, 0x74, 0x72, 0x61, 0x91,
+ 0x60, 0x09, 0x90, 0x14, 0x34, 0x94, 0x06, 0x12, 0x01, 0x94, 0x40, 0x14,
+ 0x12, 0x53, 0x64, 0x81, 0x75, 0x99, 0x36, 0x99, 0x11, 0x69, 0x95, 0x51,
+ 0x71, 0x55, 0x24, 0x60, 0x52, 0x58, 0x88, 0x11, 0x88, 0x25, 0x37, 0x86,
+ 0x66, 0x36, 0x69, 0x68, 0x45, 0x01, 0x11, 0x95
+};
+
+/* cipher text */
+uint8_t ciphertext[2048] = {
+ 0xE2, 0x19, 0x24, 0x56, 0x13, 0x59, 0xA6, 0x5D, 0xDF, 0xD0, 0x72, 0xAA,
+ 0x23, 0xC7, 0x36, 0x3A, 0xBB, 0x3E, 0x8B, 0x64, 0xD5, 0xBF, 0xDE, 0x65,
+ 0xA2, 0x75, 0xD9, 0x45, 0x6C, 0x3C, 0xD2, 0x6A, 0xC7, 0xD0, 0x9A, 0xD0,
+ 0x87, 0xB8, 0xE4, 0x94, 0x11, 0x62, 0x5A, 0xC3, 0xC3, 0x01, 0xA3, 0x86,
+ 0xBC, 0xBC, 0x9C, 0xC0, 0x81, 0x9F, 0xBF, 0x5C, 0x6F, 0x3F, 0x13, 0xF1,
+ 0xAE, 0xCF, 0x26, 0xB3, 0xBC, 0x49, 0xD6, 0x3B, 0x7A, 0x2E, 0x99, 0x9E,
+ 0x1B, 0x04, 0x50, 0x6C, 0x48, 0x6B, 0x4E, 0x72, 0xFC, 0xC8, 0xA7, 0x0C,
+ 0x2C, 0xD9, 0xED, 0xE4, 0x82, 0xC4, 0x81, 0xA6, 0xB4, 0xCC, 0xAD, 0x10,
+ 0xF3, 0x1C, 0x39, 0x05, 0x41, 0x2D, 0x57, 0x32, 0xE7, 0x16, 0xF8, 0x4D,
+ 0xF0, 0xDE, 0x40, 0x5B, 0x5F, 0x80, 0xDC, 0xA7, 0xC3, 0x2D, 0x3D, 0x9E,
+ 0x27, 0xD4, 0xE8, 0x10, 0x8E, 0xEB, 0xA5, 0x68, 0x6F, 0x3D, 0xC0, 0x44,
+ 0xE7, 0x77, 0x73, 0xB9, 0x92, 0x8E, 0xA2, 0x26, 0x5C, 0x6F, 0x33, 0x4B,
+ 0x0B, 0xEF, 0x37, 0x55, 0xBE, 0xEC, 0x98, 0x83, 0x1E, 0xDF, 0xB2, 0x9E,
+ 0x5D, 0x1D, 0x78, 0x14, 0xD7, 0x85, 0x0E, 0xF8, 0x12, 0x30, 0x8E, 0x5D,
+ 0x08, 0x77, 0x0B, 0x2E, 0x9B, 0xF9, 0xA6, 0x72, 0xD2, 0x41, 0xC1, 0x8E,
+ 0x6B, 0x5E, 0x11, 0x85, 0x22, 0x6E, 0xE4, 0xA3, 0xEA, 0x4C, 0x91, 0xE1,
+ 0x7D, 0xD0, 0xEB, 0x9F, 0xD9, 0xD7, 0x05, 0x77, 0xD9, 0xA1, 0xC2, 0xFD,
+ 0x41, 0x63, 0x51, 0xB4, 0x7A, 0x1F, 0x21, 0xF0, 0xBF, 0x11, 0x4D, 0x9B,
+ 0x97, 0xAB, 0xB4, 0x94, 0x36, 0x34, 0xC9, 0x2D, 0x8B, 0xE2, 0x61, 0xCF,
+ 0xAF, 0x69, 0xD5, 0x5C, 0xE9, 0xED, 0xE3, 0xA0, 0x69, 0xD3, 0xE5, 0xAE,
+ 0x67, 0x6C, 0xC7, 0x11, 0xB1, 0x21, 0x96, 0xD6, 0xDB, 0xA8, 0x1D, 0xC9,
+ 0x83, 0x0B, 0xE2, 0xC6, 0x6E, 0x94, 0xE9, 0x50, 0x12, 0x9B, 0x01, 0x72,
+ 0xAA, 0xFD, 0x8B, 0x7C, 0xEC, 0x0D, 0x01, 0xA4, 0x5D, 0x00, 0xE9, 0x79,
+ 0x58, 0xF5, 0x67, 0xF9, 0x61, 0xC3, 0x11, 0xB4, 0x7E, 0x76, 0x0A, 0x4C,
+ 0x60, 0xD6, 0xBD, 0xC8, 0x31, 0xD3, 0x0C, 0xD0, 0x5B, 0xDF, 0x7B, 0x05,
+ 0x9A, 0xBB, 0xC6, 0x2E, 0x9F, 0xF8, 0x18, 0x80, 0x6D, 0x1B, 0x21, 0xE5,
+ 0xAC, 0x75, 0xBC, 0x0D, 0x72, 0x51, 0x61, 0xD7, 0xEA, 0xA2, 0xAC, 0x0E,
+ 0xC1, 0xE7, 0x49, 0x37, 0xE7, 0x7C, 0xDE, 0xBD, 0x56, 0x00, 0x44, 0x6D,
+ 0xAB, 0x81, 0x2B, 0x26, 0x4A, 0xAA, 0x60, 0xE6, 0x43, 0x8D, 0x88, 0x1C,
+ 0x48, 0x55, 0x53, 0x25, 0xE8, 0x3C, 0x46, 0xF0, 0xA6, 0x33, 0x2D, 0xA2,
+ 0xDC, 0x99, 0x57, 0x38, 0x59, 0xCF, 0x53, 0xFA, 0x3E, 0x78, 0x46, 0xA0,
+ 0xA9, 0x50, 0x12, 0x72, 0xAC, 0x15, 0xC6, 0xA7, 0x42, 0x0F, 0x59, 0x6E,
+ 0xEA, 0xB0, 0x3D, 0xB8, 0x94, 0x32, 0xD1, 0xB6, 0xE8, 0x90, 0x06, 0x66,
+ 0x0C, 0xDE, 0xA9, 0x35, 0xC7, 0xDD, 0x72, 0x42, 0x38, 0x33, 0x32, 0x2F,
+ 0x2C, 0x3F, 0xBD, 0x01, 0xD6, 0x47, 0xFC, 0x89, 0x31, 0x38, 0x2E, 0xB9,
+ 0x6B, 0xED, 0xDB, 0x85, 0x38, 0xB1, 0xA5, 0x50, 0xFA, 0xFB, 0xA7, 0x31,
+ 0xEC, 0xB6, 0xBB, 0x82, 0x50, 0xB4, 0x88, 0x5C, 0xED, 0xE5, 0x4B, 0x5B,
+ 0xBF, 0xB3, 0x18, 0xFB, 0xAD, 0x24, 0x41, 0x55, 0x80, 0xCD, 0xA3, 0xA1,
+ 0xD6, 0xD5, 0xB6, 0x06, 0xE9, 0x85, 0x12, 0x33, 0x52, 0x56, 0xF1, 0xB7,
+ 0xDC, 0x57, 0x9E, 0xB4, 0x00, 0x1E, 0xCB, 0x62, 0x13, 0x4C, 0x90, 0x9A,
+ 0x9D, 0x64, 0x80, 0xD1, 0x5E, 0xB3, 0xCB, 0x8A, 0x73, 0x4E, 0x7B, 0xBE,
+ 0x4D, 0xA7, 0xF7, 0xB7, 0x9C, 0x1C, 0x7F, 0x27, 0x1E, 0x7F, 0x58, 0xB2,
+ 0x74, 0xAF, 0x94, 0x0E, 0x19, 0x23, 0xE1, 0x6B, 0xD8, 0x20, 0x4F, 0x2C,
+ 0x13, 0xE8, 0x8C, 0x37, 0x46, 0x27, 0x55, 0x68, 0xDA, 0x3F, 0x7A, 0xC6,
+ 0xEF, 0x87, 0x1D, 0x3B, 0x95, 0x43, 0x5E, 0x75, 0xE0, 0x02, 0x22, 0x0E,
+ 0x11, 0x60, 0xAB, 0x1A, 0x91, 0x94, 0xC4, 0xFA, 0xD9, 0x92, 0x2B, 0xE5,
+ 0x03, 0xE0, 0x7A, 0x17, 0x5C, 0x67, 0x22, 0xB3, 0xCB, 0x77, 0x9E, 0x22,
+ 0x01, 0x5F, 0x5D, 0x64, 0xE4, 0x2F, 0xC4, 0x61, 0xCA, 0xC7, 0xFD, 0x20,
+ 0x24, 0x30, 0xAB, 0x3F, 0x1A, 0x08, 0x85, 0x08, 0x39, 0xDE, 0x19, 0x1C,
+ 0x1A, 0xEA, 0xB8, 0x7E, 0xE5, 0xBC, 0xD9, 0xB2, 0x59, 0xC8, 0x81, 0x02,
+ 0x1D, 0x5C, 0xC0, 0xDD, 0x8D, 0x56, 0xB6, 0x2E, 0x85, 0x26, 0xA8, 0x34,
+ 0x92, 0x36, 0x9A, 0x84, 0xBD, 0x27, 0xC1, 0x9D, 0x5E, 0x14, 0xC4, 0xB7,
+ 0x02, 0xA8, 0xC9, 0xC2, 0xAD, 0xDC, 0x98, 0x42, 0x51, 0xDE, 0x94, 0x28,
+ 0x39, 0xEF, 0xE9, 0x7F, 0x05, 0x3F, 0x1D, 0x67, 0x72, 0x04, 0xCF, 0x7D,
+ 0x38, 0x49, 0xC4, 0x59, 0xA5, 0xF6, 0xB6, 0x02, 0x31, 0xD0, 0x05, 0x74,
+ 0x4B, 0xD0, 0x89, 0xD1, 0x7F, 0xC6, 0xDB, 0x7E, 0x75, 0x62, 0xA3, 0xC2,
+ 0x2E, 0xB0, 0xCC, 0x9A, 0xD3, 0xA4, 0x14, 0xB6, 0xF2, 0x91, 0x44, 0x3F,
+ 0x84, 0xE0, 0x90, 0x4A, 0x6A, 0x34, 0x8C, 0x35, 0x3C, 0xB2, 0xA9, 0x35,
+ 0x88, 0xB0, 0x88, 0xF8, 0x7E, 0x5C, 0xD2, 0x08, 0x5E, 0x08, 0x15, 0x03,
+ 0xBC, 0xF5, 0x42, 0x6B, 0x28, 0xED, 0xDD, 0xAA, 0x4D, 0x78, 0x10, 0x31,
+ 0x32, 0xA2, 0xC5, 0xCA, 0xEE, 0x9A, 0x62, 0x52, 0x3E, 0x48, 0x83, 0xA4,
+ 0xCA, 0xD4, 0xC7, 0xA7, 0xA5, 0x3F, 0x44, 0x1C, 0x86, 0xAD, 0x52, 0x7D,
+ 0x80, 0x1D, 0x9E, 0x32, 0x3F, 0x2A, 0x2E, 0xD8, 0x89, 0xC1, 0xA4, 0xD6,
+ 0xC1, 0x90, 0x2E, 0x1A, 0x20, 0x4B, 0x87, 0x32, 0x35, 0x25, 0xD8, 0xB8,
+ 0x57, 0x15, 0x85, 0x1E, 0x3C, 0x8A, 0xDC, 0x1A, 0x49, 0x3D, 0x70, 0x35,
+ 0x99, 0xAA, 0xDE, 0x2C, 0xD4, 0xAF, 0x79, 0x72, 0xAB, 0x97, 0x84, 0x20,
+ 0xB6, 0x4F, 0x34, 0x3F, 0xEA, 0xAE, 0x5F, 0x8F, 0x3A, 0x42, 0xDB, 0x68,
+ 0xE5, 0x84, 0x63, 0x2E, 0x7A, 0x0E, 0xBD, 0x28, 0x6A, 0x24, 0xB6, 0xAB,
+ 0xE4, 0xAC, 0x20, 0x7C, 0x81, 0xD0, 0x69, 0x89, 0xF8, 0xDE, 0xA9, 0x02,
+ 0xFD, 0x1F, 0x08, 0xDA, 0x26, 0xC2, 0x24, 0xCA, 0xEB, 0x44, 0x16, 0x8D,
+ 0x55, 0x5F, 0xB9, 0xA9, 0x5A, 0x18, 0x50, 0xB1, 0x54, 0xF1, 0xBF, 0x06,
+ 0xC2, 0xB0, 0x95, 0xC2, 0xAE, 0xE5, 0xBF, 0xB3, 0xFD, 0xC9, 0xBF, 0x75,
+ 0x42, 0x7D, 0xA0, 0xA8, 0x95, 0xF9, 0x62, 0x3B, 0x9C, 0x0D, 0x81, 0xF3,
+ 0x9C, 0xFC, 0x19, 0x5B, 0xF7, 0xD1, 0x9C, 0xF0, 0xAA, 0xFE, 0xEF, 0x35,
+ 0x1E, 0x81, 0x9E, 0x02, 0x46, 0x52, 0x9B, 0x99, 0x0D, 0x12, 0x8B, 0x71,
+ 0x6C, 0x32, 0xB5, 0x23, 0x17, 0x03, 0xC5, 0xB0, 0xA1, 0xC3, 0x4B, 0x10,
+ 0x01, 0x4D, 0x4C, 0x4A, 0x46, 0x8F, 0xD9, 0x79, 0xBB, 0x10, 0x44, 0xB0,
+ 0x3C, 0x7D, 0x46, 0xFD, 0x38, 0xDF, 0xAF, 0x6E, 0x58, 0x7D, 0xE1, 0xEB,
+ 0xBB, 0x8C, 0xDC, 0x79, 0xDA, 0x41, 0xD1, 0x8B, 0x0B, 0x11, 0x4F, 0xE5,
+ 0x1C, 0xC1, 0x59, 0xA7, 0x1E, 0x5A, 0xC1, 0xEE, 0x27, 0x33, 0xC8, 0x55,
+ 0xA9, 0x32, 0xEA, 0xF7, 0x45, 0xB0, 0x08, 0xE9, 0x32, 0xDF, 0x70, 0x24,
+ 0x82, 0xD3, 0x2A, 0x3E, 0x4F, 0x42, 0xB9, 0x25, 0x10, 0xD1, 0x73, 0xFA,
+ 0xFD, 0xC1, 0x84, 0xF2, 0xF7, 0x0E, 0xBC, 0x9D, 0x90, 0x39, 0xD7, 0xFD,
+ 0x45, 0x77, 0xBA, 0x29, 0xF9, 0x87, 0x45, 0xC1, 0x32, 0x44, 0xB0, 0x27,
+ 0x6B, 0xFC, 0x8A, 0xFE, 0x00, 0x6F, 0x61, 0x98, 0xD0, 0x60, 0xC8, 0x10,
+ 0xE5, 0xBC, 0x88, 0x13, 0x45, 0x44, 0xA5, 0xEB, 0x6E, 0xCB, 0x11, 0xAF,
+ 0x30, 0xDC, 0x8B, 0xF8, 0x30, 0x46, 0xDA, 0x76, 0xF1, 0xE5, 0x14, 0x51,
+ 0x8A, 0x02, 0x5A, 0x5A, 0xAA, 0x7B, 0x2D, 0x57, 0x0A, 0x5C, 0x73, 0xD1,
+ 0x88, 0xCE, 0xBE, 0x3D, 0x06, 0x3F, 0x48, 0x1D, 0x44, 0x24, 0x6F, 0x4F,
+ 0x7F, 0x6A, 0xF2, 0x16, 0x34, 0x35, 0x38, 0x73, 0x8A, 0xE5, 0x25, 0xF4,
+ 0x34, 0x9E, 0x5B, 0x40, 0x90, 0x04, 0x57, 0x1B, 0x57, 0x75, 0x8F, 0xEA,
+ 0x1C, 0xF8, 0x7A, 0x68, 0x01, 0x1C, 0x8D, 0xBA, 0xF4, 0xE3, 0xD3, 0x8F,
+ 0x7F, 0xE4, 0x50, 0x35, 0x6B, 0x6B, 0xF6, 0xFC, 0x5F, 0x9B, 0x98, 0x78,
+ 0x16, 0x68, 0x72, 0x74, 0x71, 0x78, 0x25, 0x68, 0xE5, 0x1E, 0x66, 0xE2,
+ 0x4E, 0xC8, 0xDB, 0x92, 0x8E, 0x88, 0x64, 0x74, 0xDE, 0xDB, 0x85, 0x56,
+ 0x9F, 0xF9, 0xC4, 0x29, 0x54, 0xA8, 0xFB, 0xBA, 0xEA, 0xAB, 0xC7, 0x49,
+ 0x5C, 0x6C, 0xD7, 0x61, 0x8C, 0xE2, 0x2B, 0xF5, 0xA0, 0xA8, 0xD2, 0x41,
+ 0xC0, 0x54, 0xAB, 0xA7, 0x56, 0x5C, 0xE7, 0xA5, 0xEA, 0xBC, 0x47, 0xD1,
+ 0x0D, 0xD9, 0xC0, 0xA9, 0xC4, 0xA7, 0x3E, 0xD1, 0x2B, 0x1E, 0x34, 0x31,
+ 0x36, 0x9D, 0xB9, 0x51, 0xD3, 0xAD, 0x29, 0xE6, 0x9B, 0xD8, 0x4B, 0x93,
+ 0x33, 0x2F, 0x30, 0xEF, 0x18, 0x90, 0x69, 0x11, 0x09, 0xEA, 0xBA, 0xE0,
+ 0x10, 0x93, 0x63, 0x71, 0xA8, 0x83, 0x59, 0xDB, 0xFC, 0x12, 0x22, 0x84,
+ 0xC7, 0x01, 0x20, 0x99, 0xEC, 0x59, 0xA9, 0xE6, 0x9B, 0x5B, 0x8B, 0xB8,
+ 0x68, 0x52, 0x61, 0x8B, 0x4E, 0xF3, 0x50, 0x69, 0xF1, 0x49, 0x9B, 0xAF,
+ 0x53, 0xAD, 0xA0, 0x9D, 0x23, 0xE0, 0xE0, 0xC4, 0x31, 0xE4, 0x8E, 0x1C,
+ 0x51, 0x14, 0xFC, 0x95, 0x9C, 0xA6, 0x34, 0x85, 0xB0, 0x36, 0xFC, 0x7A,
+ 0x53, 0x03, 0x31, 0x0E, 0xCB, 0x34, 0x3E, 0xDF, 0xD1, 0x71, 0xBC, 0xDB,
+ 0xA1, 0xAF, 0x59, 0x4A, 0x03, 0x19, 0xA7, 0x8E, 0xB5, 0x82, 0x15, 0x24,
+ 0x69, 0x68, 0xBD, 0x9C, 0x2E, 0xFA, 0x06, 0xB5, 0x70, 0xC5, 0x70, 0xC4,
+ 0x14, 0x99, 0x01, 0x49, 0xBD, 0x6E, 0xAE, 0x10, 0xA1, 0xE4, 0xEF, 0xDD,
+ 0xE5, 0x51, 0x22, 0x9D, 0xF7, 0x93, 0xAB, 0x41, 0xBD, 0x86, 0x7A, 0xCC,
+ 0x51, 0x94, 0xEC, 0x22, 0xBE, 0x0D, 0x67, 0xFD, 0xA3, 0xFD, 0xCF, 0xF8,
+ 0x74, 0x0A, 0x5E, 0x1C, 0x71, 0xAD, 0xB6, 0xD0, 0xD7, 0xF8, 0x71, 0x34,
+ 0xAB, 0x62, 0xE7, 0xA8, 0x6B, 0x8F, 0x1E, 0x43, 0x46, 0xA5, 0xE4, 0xB4,
+ 0x52, 0x81, 0x66, 0xB3, 0xE5, 0x10, 0x23, 0x21, 0x2B, 0x31, 0x0F, 0xB8,
+ 0xB6, 0xC5, 0xA5, 0xC9, 0x90, 0x07, 0x83, 0xD0, 0xC3, 0x10, 0x7A, 0x04,
+ 0xBD, 0x8A, 0x3C, 0x7B, 0xF9, 0x0E, 0x51, 0x81, 0x96, 0xC8, 0xAE, 0xF9,
+ 0x27, 0xDE, 0x62, 0x7A, 0x41, 0x60, 0x35, 0x8F, 0x77, 0xBC, 0x95, 0x11,
+ 0x2C, 0xC4, 0x6C, 0x47, 0x7A, 0xEB, 0x29, 0xE5, 0x8E, 0xB5, 0xD6, 0xA5,
+ 0x54, 0x1B, 0xD0, 0xE0, 0x0F, 0x7D, 0x5C, 0x51, 0xD8, 0x6C, 0x92, 0x2F,
+ 0x13, 0x4E, 0x90, 0x77, 0xF8, 0x8D, 0x69, 0x78, 0x96, 0x96, 0x49, 0x9F,
+ 0x3C, 0x2E, 0x5C, 0xA6, 0x73, 0x27, 0x7D, 0xAD, 0x8D, 0xE3, 0x9B, 0x4A,
+ 0x2F, 0x50, 0x0A, 0x42, 0x7E, 0xF2, 0x3B, 0x50, 0x5C, 0x81, 0xC9, 0x49,
+ 0x01, 0x96, 0x83, 0x0A, 0xEC, 0x7F, 0xED, 0x1C, 0xA5, 0x7D, 0xF1, 0xE6,
+ 0xC4, 0xB3, 0x8F, 0xF9, 0x0F, 0xDB, 0x7B, 0xC1, 0x35, 0xF7, 0x63, 0x4A,
+ 0x39, 0xD4, 0x0E, 0x9E, 0x05, 0xD9, 0x42, 0xAA, 0xAB, 0x52, 0xCA, 0x4E,
+ 0x98, 0x3B, 0x43, 0x1A, 0x91, 0x25, 0xA9, 0x34, 0xD5, 0x66, 0xB2, 0xF4,
+ 0xFF, 0xDE, 0x64, 0x91, 0x90, 0xB9, 0x17, 0x70, 0xA0, 0xD6, 0xEA, 0xB6,
+ 0x36, 0xF4, 0x44, 0xCE, 0x86, 0x7B, 0x18, 0x74, 0x9C, 0x18, 0xAD, 0xB6,
+ 0xE0, 0x74, 0xC1, 0x0E, 0x29, 0x5D, 0x6A, 0x36, 0xD1, 0x3E, 0xB8, 0x2A,
+ 0xE4, 0x23, 0x1D, 0xB2, 0xAE, 0xF5, 0x5B, 0x8E, 0x2C, 0xD9, 0xD1, 0xE1,
+ 0x4F, 0x58, 0xA6, 0xE3, 0x88, 0x2E, 0xF9, 0xCF, 0x32, 0x3E, 0x8E, 0x37,
+ 0x95, 0xFF, 0xAD, 0x68, 0x11, 0x5E, 0x7F, 0x3D, 0x38, 0x06, 0x7C, 0x33,
+ 0x32, 0x78, 0x09, 0xEC, 0xCA, 0x3E, 0x08, 0xF1, 0xD0, 0x95, 0x19, 0xC9,
+ 0x7E, 0x62, 0xB2, 0x02, 0xA3, 0x5D, 0xF8, 0x3F, 0xA2, 0xB0, 0x8B, 0x38,
+ 0xB1, 0x8C, 0xEA, 0xB3, 0xE4, 0xBF, 0xD3, 0x6C, 0x6D, 0x3D, 0xD1, 0xC6,
+ 0xDA, 0x6B, 0x7A, 0xBA, 0x05, 0xEA, 0x9E, 0xA5, 0xE9, 0x00, 0xCC, 0x80,
+ 0x57, 0xAB, 0xD9, 0x0A, 0xD1, 0x00, 0x82, 0x2A, 0x51, 0x4B, 0xA2, 0x96,
+ 0xEB, 0x96, 0x14, 0xA8, 0x46, 0xDF, 0x1D, 0x48, 0xAE, 0xFA, 0x12, 0xA8,
+ 0x89, 0x8E, 0xEF, 0xBC, 0x3C, 0xA1, 0x6E, 0xDD, 0x90, 0x66, 0x2E, 0x56,
+ 0x6B, 0xF7, 0x1D, 0xF0, 0x46, 0x11, 0x4A, 0xA6, 0x07, 0x73, 0xC4, 0xE3,
+ 0x97, 0xFE, 0x7E, 0x22, 0x6F, 0x22, 0xB4, 0x6F, 0xB0, 0x32, 0x0A, 0x5E,
+ 0x85, 0x7E, 0x54, 0xB4, 0x24, 0xBD, 0x36, 0xA7, 0x94, 0xE7, 0x37, 0xFD,
+ 0x1A, 0xAF, 0xF4, 0x44, 0xB4, 0x35, 0x4F, 0xE0, 0x41, 0x0E, 0x7D, 0x73,
+ 0x29, 0x28, 0xDA, 0xAF, 0x69, 0xB2, 0xC5, 0xA7, 0x2A, 0x0A, 0xB5, 0x9C,
+ 0xC2, 0xAC, 0x5F, 0x59, 0x5C, 0xEE, 0x44, 0x49, 0x6F, 0x4F, 0x64, 0x43,
+ 0x6F, 0x43, 0x44, 0xAA, 0xA0, 0x4E, 0x94, 0x7C, 0x26, 0x5A, 0xF1, 0xD9,
+ 0xE6, 0x09, 0x80, 0x7A, 0x7D, 0x2E, 0xA2, 0xB9, 0x1A, 0x7A, 0x8F, 0x2A,
+ 0x97, 0x77, 0x23, 0xB4, 0x10, 0xAD, 0x20, 0x7B, 0xA3, 0x0F, 0xFD, 0x44,
+ 0x38, 0xAD, 0x94, 0x39, 0x88, 0x1C, 0xC4, 0xC8, 0xDF, 0xF1, 0x04, 0xA6,
+ 0x51, 0x5D, 0x54, 0x53, 0x60, 0xE4, 0x8A, 0x89, 0x4A, 0x9C, 0xE1, 0x68,
+ 0x4D, 0xFE, 0x69, 0x94, 0x0B, 0x8E, 0xED, 0x6C, 0xFE, 0x11, 0xA7, 0x77,
+ 0xBF, 0x08, 0x41, 0x67, 0x22, 0x59, 0x51, 0x48, 0xEE, 0x59, 0x02, 0x0E,
+ 0x60, 0x6D, 0xAE, 0x8C, 0xC6, 0x39, 0xB7, 0x55, 0xC5, 0x3B, 0x87, 0xA9,
+ 0xBD, 0xD8, 0xEA, 0x48, 0x21, 0xE4, 0x57, 0x51, 0x56, 0x03, 0xF4, 0xBE,
+ 0xBD, 0xBD, 0xC5, 0x26, 0x9B, 0x27, 0xE3, 0xAE, 0xD5, 0x1E, 0x30, 0xE9,
+ 0x7C, 0x9D, 0xDB, 0xE1, 0x09, 0x9D, 0x82, 0x49, 0x15, 0x38, 0x69, 0xFC,
+ 0x1D, 0x52, 0x1A, 0x75, 0xE6, 0xDD, 0x1D, 0xBE, 0x06, 0xC4, 0x9F, 0x14,
+ 0x4C, 0x12, 0xDE, 0xDF, 0x4A, 0xE1, 0x3B, 0xE7, 0xD1, 0xE3, 0x71, 0xD1,
+ 0xFA, 0xD8, 0x0E, 0x63, 0x27, 0xA9, 0xC7, 0x9D, 0xC0, 0x01, 0xC2, 0xDD,
+ 0xFC, 0xA6, 0x1F, 0x59, 0x87, 0xC5, 0x56, 0x99, 0x80, 0xEB, 0xF0, 0xB8,
+ 0xB3, 0x00, 0x9A, 0x61, 0xDB, 0x50, 0x79, 0x48, 0x37, 0x35, 0xDA, 0xD8,
+ 0xF2, 0x37, 0xA7, 0x43, 0xA7, 0xEB, 0x88, 0x2C, 0x68, 0xB4, 0xBB, 0x14,
+ 0x45, 0x31, 0x6B, 0x87, 0x65, 0xE7, 0x82, 0xB4, 0x74, 0xD2, 0xFF, 0x7F,
+ 0x60, 0x15, 0x94, 0x75, 0xEE, 0x30, 0x3C, 0x4E, 0xFC, 0x41, 0xD1, 0x5B,
+ 0xDD, 0x84, 0x6E, 0x13, 0x6C, 0xF8, 0x12, 0xE6, 0xB7, 0xA4, 0xB9, 0xC8,
+ 0x13, 0x89, 0x0C, 0x34, 0xA6, 0xAF, 0x09, 0xEB, 0xF2, 0xB3, 0x79, 0x77,
+ 0x80, 0xD8, 0x77, 0x64, 0xAD, 0x32, 0x3D, 0xD2, 0x06, 0xDF, 0x72, 0x11,
+ 0x4A, 0xA7, 0x70, 0xCE, 0xF9, 0xE6, 0x81, 0x35, 0xA4, 0xA7, 0x52, 0xB5,
+ 0x13, 0x68, 0x5C, 0x69, 0x45, 0xE2, 0x77, 0x2D, 0xBE, 0x2C, 0xE9, 0x38,
+ 0x25, 0x28, 0x7B, 0x63, 0x2C, 0x19, 0x8F, 0x59
+};
+
+/* aad */
+uint8_t aad[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B
+};
+
+/* iv */
+uint8_t iv[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F
+};
+
+/* cipher key */
+uint8_t cipher_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F
+};
+
+/* auth key */
+uint8_t auth_key[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+ 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+ 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F
+};
+
+/* Digests */
+uint8_t digest[2048] = { 0x00 };
+
+struct cperf_test_vector*
+cperf_test_vector_get_dummy(struct cperf_options *options)
+{
+ struct cperf_test_vector *t_vec;
+
+ t_vec = (struct cperf_test_vector *)rte_malloc(NULL,
+ sizeof(struct cperf_test_vector), 0);
+ if (t_vec == NULL)
+ return t_vec;
+
+ t_vec->plaintext.data = plaintext;
+ t_vec->plaintext.length = options->max_buffer_size;
+
+ if (options->op_type == CPERF_CIPHER_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER ||
+ options->op_type == CPERF_AEAD) {
+ if (options->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ t_vec->cipher_key.length = 0;
+ t_vec->ciphertext.data = plaintext;
+ t_vec->cipher_key.data = NULL;
+ t_vec->iv.data = NULL;
+ } else {
+ t_vec->cipher_key.length = options->cipher_key_sz;
+ t_vec->ciphertext.data = ciphertext;
+ t_vec->cipher_key.data = cipher_key;
+ t_vec->iv.data = rte_malloc(NULL, options->cipher_iv_sz,
+ 16);
+ if (t_vec->iv.data == NULL) {
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->iv.data, iv, options->cipher_iv_sz);
+ }
+ t_vec->ciphertext.length = options->max_buffer_size;
+ t_vec->iv.phys_addr = rte_malloc_virt2phy(t_vec->iv.data);
+ t_vec->iv.length = options->cipher_iv_sz;
+ t_vec->data.cipher_offset = 0;
+ t_vec->data.cipher_length = options->max_buffer_size;
+ }
+
+ if (options->op_type == CPERF_AUTH_ONLY ||
+ options->op_type == CPERF_CIPHER_THEN_AUTH ||
+ options->op_type == CPERF_AUTH_THEN_CIPHER ||
+ options->op_type == CPERF_AEAD) {
+ uint8_t aad_alloc = 0;
+
+ t_vec->auth_key.length = options->auth_key_sz;
+
+ switch (options->auth_algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ t_vec->auth_key.data = NULL;
+ aad_alloc = 0;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ t_vec->auth_key.data = NULL;
+ aad_alloc = 1;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ t_vec->auth_key.data = auth_key;
+ aad_alloc = 1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ /* auth key should be the same as cipher key */
+ t_vec->auth_key.data = cipher_key;
+ aad_alloc = 1;
+ break;
+ default:
+ t_vec->auth_key.data = auth_key;
+ aad_alloc = 0;
+ break;
+ }
+
+ if (aad_alloc && options->auth_aad_sz) {
+ t_vec->aad.data = rte_malloc(NULL,
+ options->auth_aad_sz, 16);
+ if (t_vec->aad.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->iv.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ memcpy(t_vec->aad.data, aad, options->auth_aad_sz);
+ } else {
+ t_vec->aad.data = NULL;
+ }
+
+ t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.length = options->auth_aad_sz;
+ t_vec->digest.data = rte_malloc(NULL, options->auth_digest_sz,
+ 16);
+ if (t_vec->digest.data == NULL) {
+ if (options->op_type != CPERF_AUTH_ONLY)
+ rte_free(t_vec->iv.data);
+ rte_free(t_vec->aad.data);
+ rte_free(t_vec);
+ return NULL;
+ }
+ t_vec->digest.phys_addr =
+ rte_malloc_virt2phy(t_vec->digest.data);
+ t_vec->digest.length = options->auth_digest_sz;
+ memcpy(t_vec->digest.data, digest, options->auth_digest_sz);
+ t_vec->data.auth_offset = 0;
+ t_vec->data.auth_length = options->max_buffer_size;
+ }
+
+ return t_vec;
+}
diff --git a/app/test-crypto-perf/cperf_test_vectors.h b/app/test-crypto-perf/cperf_test_vectors.h
new file mode 100644
index 00000000..e64f1168
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_vectors.h
@@ -0,0 +1,98 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_TEST_VECTRORS_
+#define _CPERF_TEST_VECTRORS_
+
+#include "cperf_options.h"
+
+struct cperf_test_vector {
+ struct {
+ uint8_t *data;
+ uint32_t length;
+ } plaintext;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } cipher_key;
+
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } auth_key;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint8_t *data;
+ uint32_t length;
+ } ciphertext;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } aad;
+
+ struct {
+ uint8_t *data;
+ phys_addr_t phys_addr;
+ uint16_t length;
+ } digest;
+
+ struct {
+ uint32_t auth_offset;
+ uint32_t auth_length;
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ } data;
+};
+
+struct cperf_test_vector*
+cperf_test_vector_get_dummy(struct cperf_options *options);
+
+extern uint8_t ciphertext[2048];
+
+extern uint8_t cipher_key[];
+extern uint8_t auth_key[];
+
+extern uint8_t iv[];
+extern uint8_t aad[];
+
+extern uint8_t digest[2048];
+
+#endif
diff --git a/app/test-crypto-perf/cperf_test_verify.c b/app/test-crypto-perf/cperf_test_verify.c
new file mode 100644
index 00000000..454221e6
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_verify.c
@@ -0,0 +1,579 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+
+#include "cperf_test_verify.h"
+#include "cperf_ops.h"
+
+struct cperf_verify_ctx {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint8_t lcore_id;
+
+ struct rte_mempool *pkt_mbuf_pool_in;
+ struct rte_mempool *pkt_mbuf_pool_out;
+ struct rte_mbuf **mbufs_in;
+ struct rte_mbuf **mbufs_out;
+
+ struct rte_mempool *crypto_op_pool;
+
+ struct rte_cryptodev_sym_session *sess;
+
+ cperf_populate_ops_t populate_ops;
+
+ const struct cperf_options *options;
+ const struct cperf_test_vector *test_vector;
+};
+
+struct cperf_op_result {
+ enum rte_crypto_op_status status;
+};
+
+static void
+cperf_verify_test_free(struct cperf_verify_ctx *ctx, uint32_t mbuf_nb)
+{
+ uint32_t i;
+
+ if (ctx) {
+ if (ctx->sess)
+ rte_cryptodev_sym_session_free(ctx->dev_id, ctx->sess);
+
+ if (ctx->mbufs_in) {
+ for (i = 0; i < mbuf_nb; i++)
+ rte_pktmbuf_free(ctx->mbufs_in[i]);
+
+ rte_free(ctx->mbufs_in);
+ }
+
+ if (ctx->mbufs_out) {
+ for (i = 0; i < mbuf_nb; i++) {
+ if (ctx->mbufs_out[i] != NULL)
+ rte_pktmbuf_free(ctx->mbufs_out[i]);
+ }
+
+ rte_free(ctx->mbufs_out);
+ }
+
+ if (ctx->pkt_mbuf_pool_in)
+ rte_mempool_free(ctx->pkt_mbuf_pool_in);
+
+ if (ctx->pkt_mbuf_pool_out)
+ rte_mempool_free(ctx->pkt_mbuf_pool_out);
+
+ if (ctx->crypto_op_pool)
+ rte_mempool_free(ctx->crypto_op_pool);
+
+ rte_free(ctx);
+ }
+}
+
+static struct rte_mbuf *
+cperf_mbuf_create(struct rte_mempool *mempool,
+ uint32_t segments_nb,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector)
+{
+ struct rte_mbuf *mbuf;
+ uint32_t segment_sz = options->max_buffer_size / segments_nb;
+ uint32_t last_sz = options->max_buffer_size % segments_nb;
+ uint8_t *mbuf_data;
+ uint8_t *test_data =
+ (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ test_vector->plaintext.data :
+ test_vector->ciphertext.data;
+
+ mbuf = rte_pktmbuf_alloc(mempool);
+ if (mbuf == NULL)
+ goto error;
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+
+ while (segments_nb) {
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mempool);
+ if (m == NULL)
+ goto error;
+
+ rte_pktmbuf_chain(mbuf, m);
+
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, segment_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, segment_sz);
+ test_data += segment_sz;
+ segments_nb--;
+ }
+
+ if (last_sz) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf, last_sz);
+ if (mbuf_data == NULL)
+ goto error;
+
+ memcpy(mbuf_data, test_data, last_sz);
+ }
+
+ if (options->op_type != CPERF_CIPHER_ONLY) {
+ mbuf_data = (uint8_t *)rte_pktmbuf_append(mbuf,
+ options->auth_digest_sz);
+ if (mbuf_data == NULL)
+ goto error;
+ }
+
+ if (options->op_type == CPERF_AEAD) {
+ uint8_t *aead = (uint8_t *)rte_pktmbuf_prepend(mbuf,
+ RTE_ALIGN_CEIL(options->auth_aad_sz, 16));
+
+ if (aead == NULL)
+ goto error;
+
+ memcpy(aead, test_vector->aad.data, test_vector->aad.length);
+ }
+
+ return mbuf;
+error:
+ if (mbuf != NULL)
+ rte_pktmbuf_free(mbuf);
+
+ return NULL;
+}
+
+void *
+cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *op_fns)
+{
+ struct cperf_verify_ctx *ctx = NULL;
+ unsigned int mbuf_idx = 0;
+ char pool_name[32] = "";
+
+ ctx = rte_malloc(NULL, sizeof(struct cperf_verify_ctx), 0);
+ if (ctx == NULL)
+ goto err;
+
+ ctx->dev_id = dev_id;
+ ctx->qp_id = qp_id;
+
+ ctx->populate_ops = op_fns->populate_ops;
+ ctx->options = options;
+ ctx->test_vector = test_vector;
+
+ ctx->sess = op_fns->sess_create(dev_id, options, test_vector);
+ if (ctx->sess == NULL)
+ goto err;
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_in_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_in = rte_pktmbuf_pool_create(pool_name,
+ options->pool_sz * options->segments_nb, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ (options->max_buffer_size / options->segments_nb) +
+ (options->max_buffer_size % options->segments_nb) +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_in == NULL)
+ goto err;
+
+ /* Generate mbufs_in with plaintext populated for test */
+ ctx->mbufs_in = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) * ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ ctx->mbufs_in[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_in, options->segments_nb,
+ options, test_vector);
+ if (ctx->mbufs_in[mbuf_idx] == NULL)
+ goto err;
+ }
+
+ if (options->out_of_place == 1) {
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_pool_out_cdev_%d",
+ dev_id);
+
+ ctx->pkt_mbuf_pool_out = rte_pktmbuf_pool_create(
+ pool_name, options->pool_sz, 0, 0,
+ RTE_PKTMBUF_HEADROOM +
+ RTE_CACHE_LINE_ROUNDUP(
+ options->max_buffer_size +
+ options->auth_digest_sz),
+ rte_socket_id());
+
+ if (ctx->pkt_mbuf_pool_out == NULL)
+ goto err;
+ }
+
+ ctx->mbufs_out = rte_malloc(NULL,
+ (sizeof(struct rte_mbuf *) *
+ ctx->options->pool_sz), 0);
+
+ for (mbuf_idx = 0; mbuf_idx < options->pool_sz; mbuf_idx++) {
+ if (options->out_of_place == 1) {
+ ctx->mbufs_out[mbuf_idx] = cperf_mbuf_create(
+ ctx->pkt_mbuf_pool_out, 1,
+ options, test_vector);
+ if (ctx->mbufs_out[mbuf_idx] == NULL)
+ goto err;
+ } else {
+ ctx->mbufs_out[mbuf_idx] = NULL;
+ }
+ }
+
+ snprintf(pool_name, sizeof(pool_name), "cperf_op_pool_cdev_%d",
+ dev_id);
+
+ ctx->crypto_op_pool = rte_crypto_op_pool_create(pool_name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, options->pool_sz, 0, 0,
+ rte_socket_id());
+ if (ctx->crypto_op_pool == NULL)
+ goto err;
+
+ return ctx;
+err:
+ cperf_verify_test_free(ctx, mbuf_idx);
+
+ return NULL;
+}
+
+static int
+cperf_verify_op(struct rte_crypto_op *op,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *vector)
+{
+ const struct rte_mbuf *m;
+ uint32_t len;
+ uint16_t nb_segs;
+ uint8_t *data;
+ uint32_t cipher_offset, auth_offset;
+ uint8_t cipher, auth;
+ int res = 0;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ return 1;
+
+ if (op->sym->m_dst)
+ m = op->sym->m_dst;
+ else
+ m = op->sym->m_src;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ data = rte_malloc(NULL, len, 0);
+ if (data == NULL)
+ return 1;
+
+ if (op->sym->m_dst)
+ m = op->sym->m_dst;
+ else
+ m = op->sym->m_src;
+ nb_segs = m->nb_segs;
+ len = 0;
+ while (m && nb_segs != 0) {
+ memcpy(data + len, rte_pktmbuf_mtod(m, uint8_t *),
+ m->data_len);
+ len += m->data_len;
+ m = m->next;
+ nb_segs--;
+ }
+
+ switch (options->op_type) {
+ case CPERF_CIPHER_ONLY:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 0;
+ auth_offset = 0;
+ break;
+ case CPERF_CIPHER_THEN_AUTH:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = options->test_buffer_size;
+ break;
+ case CPERF_AUTH_ONLY:
+ cipher = 0;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = options->test_buffer_size;
+ break;
+ case CPERF_AUTH_THEN_CIPHER:
+ cipher = 1;
+ cipher_offset = 0;
+ auth = 1;
+ auth_offset = options->test_buffer_size;
+ break;
+ case CPERF_AEAD:
+ cipher = 1;
+ cipher_offset = vector->aad.length;
+ auth = 1;
+ auth_offset = vector->aad.length + options->test_buffer_size;
+ break;
+ }
+
+ if (cipher == 1) {
+ if (options->cipher_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ res += memcmp(data + cipher_offset,
+ vector->ciphertext.data,
+ options->test_buffer_size);
+ else
+ res += memcmp(data + cipher_offset,
+ vector->plaintext.data,
+ options->test_buffer_size);
+ }
+
+ if (auth == 1) {
+ if (options->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ res += memcmp(data + auth_offset,
+ vector->digest.data,
+ options->auth_digest_sz);
+ }
+
+ return !!res;
+}
+
+int
+cperf_verify_test_runner(void *test_ctx)
+{
+ struct cperf_verify_ctx *ctx = test_ctx;
+
+ uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0;
+ uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0;
+ uint64_t ops_failed = 0;
+
+ static int only_once;
+
+ uint64_t i, m_idx = 0;
+ uint16_t ops_unused = 0;
+
+ struct rte_crypto_op *ops[ctx->options->max_burst_size];
+ struct rte_crypto_op *ops_processed[ctx->options->max_burst_size];
+
+ uint32_t lcore = rte_lcore_id();
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ struct rte_cryptodev_info dev_info;
+ int linearize = 0;
+
+ /* Check if source mbufs require coalescing */
+ if (ctx->options->segments_nb > 1) {
+ rte_cryptodev_info_get(ctx->dev_id, &dev_info);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER) == 0)
+ linearize = 1;
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ ctx->lcore_id = lcore;
+
+ if (!ctx->options->csv)
+ printf("\n# Running verify test on device: %u, lcore: %u\n",
+ ctx->dev_id, lcore);
+
+ while (ops_enqd_total < ctx->options->total_ops) {
+
+ uint16_t burst_size = ((ops_enqd_total + ctx->options->max_burst_size)
+ <= ctx->options->total_ops) ?
+ ctx->options->max_burst_size :
+ ctx->options->total_ops -
+ ops_enqd_total;
+
+ uint16_t ops_needed = burst_size - ops_unused;
+
+ /* Allocate crypto ops from pool */
+ if (ops_needed != rte_crypto_op_bulk_alloc(
+ ctx->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops, ops_needed))
+ return -1;
+
+ /* Setup crypto op, attach mbuf etc */
+ (ctx->populate_ops)(ops, &ctx->mbufs_in[m_idx],
+ &ctx->mbufs_out[m_idx],
+ ops_needed, ctx->sess, ctx->options,
+ ctx->test_vector);
+
+#ifdef CPERF_LINEARIZATION_ENABLE
+ if (linearize) {
+ /* PMD doesn't support scatter-gather and source buffer
+ * is segmented.
+ * We need to linearize it before enqueuing.
+ */
+ for (i = 0; i < burst_size; i++)
+ rte_pktmbuf_linearize(ops[i]->sym->m_src);
+ }
+#endif /* CPERF_LINEARIZATION_ENABLE */
+
+ /* Enqueue burst of ops on crypto device */
+ ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id,
+ ops, burst_size);
+ if (ops_enqd < burst_size)
+ ops_enqd_failed++;
+
+ /**
+ * Calculate number of ops not enqueued (mainly for hw
+ * accelerators whose ingress queue can fill up).
+ */
+ ops_unused = burst_size - ops_enqd;
+ ops_enqd_total += ops_enqd;
+
+
+ /* Dequeue processed burst of ops from crypto device */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->max_burst_size);
+
+ m_idx += ops_needed;
+ if (m_idx + ctx->options->max_burst_size > ctx->options->pool_sz)
+ m_idx = 0;
+
+ if (ops_deqd == 0) {
+ /**
+ * Count dequeue polls which didn't return any
+ * processed operations. This statistic is mainly
+ * relevant to hw accelerators.
+ */
+ ops_deqd_failed++;
+ continue;
+ }
+
+ for (i = 0; i < ops_deqd; i++) {
+ if (cperf_verify_op(ops_processed[i], ctx->options,
+ ctx->test_vector))
+ ops_failed++;
+ /* free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ rte_crypto_op_free(ops_processed[i]);
+ }
+ ops_deqd_total += ops_deqd;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+
+ while (ops_deqd_total < ctx->options->total_ops) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, NULL, 0);
+
+ /* dequeue burst */
+ ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id,
+ ops_processed, ctx->options->max_burst_size);
+ if (ops_deqd == 0) {
+ ops_deqd_failed++;
+ continue;
+ }
+
+ for (i = 0; i < ops_deqd; i++) {
+ if (cperf_verify_op(ops_processed[i], ctx->options,
+ ctx->test_vector))
+ ops_failed++;
+ /* free crypto ops so they can be reused. We don't free
+ * the mbufs here as we don't want to reuse them as
+ * the crypto operation will change the data and cause
+ * failures.
+ */
+ rte_crypto_op_free(ops_processed[i]);
+ }
+ ops_deqd_total += ops_deqd;
+ }
+
+ if (!ctx->options->csv) {
+ if (!only_once)
+ printf("%12s%12s%12s%12s%12s%12s%12s%12s\n\n",
+ "lcore id", "Buf Size", "Burst size",
+ "Enqueued", "Dequeued", "Failed Enq",
+ "Failed Deq", "Failed Ops");
+ only_once = 1;
+
+ printf("%12u%12u%12u%12"PRIu64"%12"PRIu64"%12"PRIu64
+ "%12"PRIu64"%12"PRIu64"\n",
+ ctx->lcore_id,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_failed);
+ } else {
+ if (!only_once)
+ printf("\n# lcore id, Buffer Size(B), "
+ "Burst Size,Enqueued,Dequeued,Failed Enq,"
+ "Failed Deq,Failed Ops\n");
+ only_once = 1;
+
+ printf("%10u;%10u;%u;%"PRIu64";%"PRIu64";%"PRIu64";%"PRIu64";"
+ "%"PRIu64"\n",
+ ctx->lcore_id,
+ ctx->options->max_buffer_size,
+ ctx->options->max_burst_size,
+ ops_enqd_total,
+ ops_deqd_total,
+ ops_enqd_failed,
+ ops_deqd_failed,
+ ops_failed);
+ }
+
+ return 0;
+}
+
+
+
+void
+cperf_verify_test_destructor(void *arg)
+{
+ struct cperf_verify_ctx *ctx = arg;
+
+ if (ctx == NULL)
+ return;
+
+ cperf_verify_test_free(ctx, ctx->options->pool_sz);
+}
diff --git a/app/test-crypto-perf/cperf_test_verify.h b/app/test-crypto-perf/cperf_test_verify.h
new file mode 100644
index 00000000..3fa78ee6
--- /dev/null
+++ b/app/test-crypto-perf/cperf_test_verify.h
@@ -0,0 +1,58 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CPERF_VERIFY_
+#define _CPERF_VERIFY_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+#include "cperf.h"
+#include "cperf_ops.h"
+#include "cperf_options.h"
+#include "cperf_test_vectors.h"
+
+
+void *
+cperf_verify_test_constructor(uint8_t dev_id, uint16_t qp_id,
+ const struct cperf_options *options,
+ const struct cperf_test_vector *test_vector,
+ const struct cperf_op_fns *ops_fn);
+
+int
+cperf_verify_test_runner(void *test_ctx);
+
+void
+cperf_verify_test_destructor(void *test_ctx);
+
+#endif /* _CPERF_VERIFY_ */
diff --git a/app/test-crypto-perf/data/aes_cbc_128_sha.data b/app/test-crypto-perf/data/aes_cbc_128_sha.data
new file mode 100644
index 00000000..0b054f5a
--- /dev/null
+++ b/app/test-crypto-perf/data/aes_cbc_128_sha.data
@@ -0,0 +1,502 @@
+# List of tests for AES-128 CBC:
+# 1) [sha1_hmac_buff_x]
+# 2) [sha224_hmac_buff_x]
+# 3) [sha256_hmac_buff_x]
+# 4) [sha384_hmac_buff_x]
+# 5) [sha512_hmac_buff_x]
+# where x is one of the values: 32, 64, 128, 256, 512, 1024, 2048
+
+##########
+# GLOBAL #
+##########
+plaintext =
+0xff, 0xca, 0xfb, 0xf1, 0x38, 0x20, 0x2f, 0x7b, 0x24, 0x98, 0x26, 0x7d, 0x1d, 0x9f, 0xb3, 0x93,
+0xd9, 0xef, 0xbd, 0xad, 0x4e, 0x40, 0xbd, 0x60, 0xe9, 0x48, 0x59, 0x90, 0x67, 0xd7, 0x2b, 0x7b,
+0x8a, 0xe0, 0x4d, 0xb0, 0x70, 0x38, 0xcc, 0x48, 0x61, 0x7d, 0xee, 0xd6, 0x35, 0x49, 0xae, 0xb4,
+0xaf, 0x6b, 0xdd, 0xe6, 0x21, 0xc0, 0x60, 0xce, 0x0a, 0xf4, 0x1c, 0x2e, 0x1c, 0x8d, 0xe8, 0x7b,
+0x59, 0xda, 0x19, 0x4f, 0xec, 0x07, 0x8e, 0xe2, 0xf0, 0x61, 0xf9, 0x27, 0x61, 0x6f, 0xf8, 0xdf,
+0x62, 0x4d, 0xaf, 0x06, 0xfe, 0x41, 0xa6, 0xa6, 0xf9, 0xa2, 0x06, 0x40, 0xb3, 0x04, 0xbd, 0xe6,
+0xc8, 0x17, 0xfb, 0x56, 0x6f, 0xa9, 0x3b, 0x8e, 0xa6, 0x58, 0xdc, 0x91, 0x17, 0x58, 0x42, 0x95,
+0xa3, 0x7c, 0x81, 0x78, 0xa6, 0x3d, 0x3f, 0x75, 0x74, 0x17, 0x1a, 0xd3, 0x6c, 0x2f, 0x48, 0x39,
+0x20, 0x20, 0xc1, 0x9a, 0x29, 0x84, 0x7d, 0x2d, 0x52, 0xa1, 0xf9, 0x5c, 0xf3, 0x4f, 0x91, 0xfc,
+0x75, 0xcf, 0xd6, 0x2d, 0xe7, 0x9a, 0x59, 0x6e, 0x00, 0x0e, 0x8d, 0x22, 0x17, 0xbd, 0xa0, 0xdd,
+0x79, 0x1f, 0x71, 0xe6, 0xcd, 0x2f, 0xb1, 0xb6, 0xbc, 0xc3, 0xdb, 0x02, 0x91, 0x41, 0x9b, 0x09,
+0xa9, 0xd2, 0x7e, 0xbd, 0x2c, 0x18, 0xae, 0xc0, 0x93, 0x0c, 0x02, 0x9a, 0xdb, 0x4e, 0xaa, 0xeb,
+0x84, 0x4b, 0x43, 0x5e, 0xf0, 0x98, 0xf2, 0x5f, 0x86, 0x70, 0x96, 0x90, 0x15, 0x30, 0xcf, 0x3a,
+0xc9, 0x33, 0x21, 0xec, 0x59, 0x86, 0xfc, 0x65, 0x7d, 0xbe, 0xb9, 0xf8, 0x97, 0xf9, 0x30, 0xa9,
+0x6d, 0xfc, 0x0c, 0x6e, 0x36, 0x67, 0xd5, 0xa6, 0x67, 0xd9, 0xbd, 0x9b, 0x34, 0x5d, 0xa7, 0xdd,
+0xda, 0x46, 0x33, 0x25, 0x60, 0x4a, 0x18, 0xf1, 0x55, 0x07, 0xb2, 0xb7, 0x26, 0x7b, 0xa6, 0x1e,
+0x77, 0xbe, 0x7f, 0x35, 0x46, 0xdf, 0x56, 0x9c, 0x22, 0x19, 0xc8, 0x85, 0xa2, 0x45, 0xb2, 0xad,
+0xf9, 0x26, 0x66, 0xab, 0xfc, 0x97, 0x4b, 0x51, 0x32, 0x36, 0xbc, 0xad, 0xcf, 0x54, 0x3a, 0x4f,
+0x94, 0xdb, 0xd2, 0xf9, 0x67, 0x1b, 0x3b, 0xe5, 0xb2, 0x1d, 0xc5, 0x52, 0x64, 0x2c, 0x06, 0x44,
+0xcf, 0x18, 0x83, 0xe0, 0xd8, 0x04, 0x92, 0xa9, 0xc4, 0x3c, 0x8b, 0xa3, 0x2b, 0xbc, 0x88, 0x7e,
+0xc0, 0x76, 0xa7, 0xe2, 0x7b, 0x47, 0x90, 0xf2, 0xaa, 0x0a, 0x34, 0x1b, 0x91, 0x12, 0xd2, 0xd0,
+0x82, 0x45, 0xf4, 0x57, 0xf1, 0xbd, 0x91, 0x5e, 0xab, 0x41, 0x4c, 0xdf, 0x91, 0x4c, 0xdd, 0x67,
+0x04, 0xa0, 0x98, 0x23, 0x8c, 0x24, 0xbe, 0xd6, 0x80, 0xb3, 0x6d, 0x04, 0xa1, 0x77, 0x43, 0xa5,
+0xee, 0xb7, 0xce, 0xb1, 0x48, 0x43, 0x94, 0x61, 0x15, 0x20, 0x9d, 0xce, 0xd0, 0x14, 0x95, 0x37,
+0xc8, 0x64, 0xa3, 0x2d, 0x3d, 0xe3, 0xff, 0xb4, 0x55, 0x83, 0x84, 0x41, 0x50, 0x57, 0xbd, 0x5a,
+0x0c, 0xe4, 0xda, 0x3b, 0x36, 0x4d, 0x21, 0xb5, 0x6f, 0x73, 0x2a, 0x8c, 0x78, 0x4f, 0x9b, 0x83,
+0xda, 0x11, 0x3c, 0xf0, 0xc9, 0x7e, 0xa6, 0x48, 0x34, 0x53, 0x62, 0xd3, 0x0c, 0xff, 0xb1, 0x74,
+0xd6, 0xea, 0xa5, 0xfc, 0x13, 0x1c, 0x05, 0xa8, 0xc0, 0xbc, 0x95, 0x9c, 0x8c, 0xf6, 0x8c, 0xc3,
+0xf3, 0x69, 0xab, 0x93, 0x65, 0xc0, 0xb7, 0x7e, 0xb0, 0x16, 0x7c, 0xb5, 0x5f, 0x05, 0x28, 0xc9,
+0x09, 0x4e, 0x2a, 0x32, 0x87, 0xb3, 0xab, 0xf8, 0x4c, 0xab, 0xeb, 0x3b, 0x6a, 0xa0, 0x1d, 0x7f,
+0xef, 0xe5, 0x9b, 0xa4, 0xb7, 0xd7, 0xc2, 0x5e, 0x03, 0x0f, 0x99, 0xeb, 0xb1, 0xb1, 0xa6, 0x9d,
+0x1c, 0x7c, 0x5c, 0x94, 0x8b, 0x6e, 0x11, 0x7a, 0xb3, 0x6d, 0x1e, 0x61, 0x64, 0xc3, 0x7d, 0x1c,
+0xb3, 0x54, 0x65, 0x08, 0x3b, 0xda, 0x97, 0xb9, 0x75, 0xc1, 0x2b, 0x3e, 0xa8, 0x5c, 0x3c, 0x2d,
+0x81, 0x5b, 0xbf, 0x5a, 0x13, 0x0e, 0xeb, 0x66, 0xc0, 0x0b, 0x8f, 0x04, 0x68, 0x68, 0x9b, 0xe3,
+0x0d, 0x84, 0xe0, 0xcf, 0x83, 0xd7, 0x62, 0x48, 0xc1, 0x31, 0xa5, 0xd5, 0xbc, 0xe3, 0xa3, 0xa5,
+0xb6, 0xd1, 0xfd, 0x81, 0x91, 0x4d, 0xbd, 0xc4, 0x62, 0x4f, 0xe3, 0xd5, 0x99, 0x14, 0xf1, 0xcd,
+0xf4, 0x7d, 0x13, 0xda, 0x68, 0x0a, 0xca, 0xd6, 0x82, 0x0b, 0xf6, 0xea, 0xad, 0x78, 0xa4, 0xc8,
+0x14, 0x7a, 0xec, 0x11, 0xd3, 0x16, 0x86, 0x9f, 0x17, 0x37, 0x6a, 0x06, 0x56, 0xaa, 0x1b, 0xd1,
+0xaf, 0x85, 0x95, 0x21, 0x36, 0x69, 0xec, 0x1b, 0x56, 0x84, 0x01, 0x3f, 0x4d, 0x34, 0x3d, 0x2d,
+0x38, 0x57, 0x2d, 0x7e, 0xd9, 0x7b, 0x2d, 0x81, 0x86, 0xd4, 0x7c, 0x83, 0x12, 0x1d, 0x9d, 0x27,
+0x72, 0x1b, 0x5e, 0xf4, 0x15, 0xa5, 0xcd, 0xb7, 0x5f, 0xbb, 0x49, 0xa1, 0xd9, 0xdd, 0x8d, 0xad,
+0xa9, 0x2c, 0x65, 0x18, 0x91, 0xfd, 0xd2, 0xd4, 0x09, 0x60, 0x0c, 0xfd, 0xa4, 0xe1, 0x25, 0x87,
+0x32, 0x64, 0x7b, 0x99, 0xd7, 0x61, 0x2f, 0xd4, 0x73, 0xdd, 0x85, 0x26, 0x08, 0x92, 0xc0, 0xe1,
+0x4f, 0x0c, 0x76, 0x5b, 0x26, 0x69, 0xdb, 0x78, 0x35, 0x65, 0xb9, 0x58, 0x1f, 0x9c, 0x0f, 0x18,
+0x95, 0xfe, 0x40, 0xfc, 0xf7, 0x93, 0x71, 0x70, 0x8b, 0x73, 0xdc, 0xb0, 0x88, 0x72, 0x19, 0x26,
+0x94, 0x26, 0xa7, 0xaa, 0x00, 0x72, 0x61, 0x53, 0xd2, 0x5d, 0x8f, 0x5e, 0x51, 0x88, 0x2d, 0xa4,
+0x28, 0xd5, 0xaf, 0x2d, 0xd2, 0x84, 0x39, 0x75, 0x1e, 0xe7, 0xf0, 0x23, 0xc0, 0x4f, 0x8d, 0xdd,
+0x5c, 0x90, 0xef, 0x6e, 0x53, 0xe0, 0x54, 0x67, 0xe1, 0x5b, 0x10, 0xf1, 0xf5, 0xf8, 0x64, 0x34,
+0x94, 0xeb, 0x37, 0xf7, 0xe9, 0xaa, 0x6c, 0xa4, 0xd8, 0x74, 0x6d, 0xca, 0x8d, 0x1a, 0x31, 0x73,
+0xca, 0xb4, 0xc7, 0x47, 0x34, 0x7f, 0xf8, 0x24, 0x9b, 0xfa, 0xc9, 0xcc, 0xa8, 0x61, 0xb4, 0x0e,
+0x4d, 0x68, 0xc7, 0xa0, 0xcb, 0xea, 0xf0, 0xcc, 0x0a, 0x6c, 0xf2, 0x33, 0x42, 0x99, 0x6c, 0xd8,
+0x74, 0x7f, 0x1e, 0x8a, 0xa3, 0x0a, 0x48, 0x4b, 0x7e, 0xbe, 0xdb, 0x7f, 0x56, 0x69, 0x43, 0xe8,
+0xbf, 0x12, 0xc4, 0x7b, 0xc2, 0xd9, 0xfa, 0x5c, 0xeb, 0x45, 0xca, 0x07, 0x3d, 0xc0, 0xcd, 0x68,
+0x8b, 0xd0, 0x79, 0xea, 0x0a, 0x78, 0x06, 0xdc, 0x81, 0xd7, 0x32, 0x18, 0xb3, 0x65, 0xbe, 0x47,
+0xbb, 0xfa, 0x17, 0x09, 0xe9, 0x31, 0x95, 0x30, 0xef, 0x07, 0x44, 0xec, 0xd0, 0x98, 0x98, 0xc0,
+0x6b, 0x71, 0x5b, 0x23, 0xb8, 0xb6, 0xd2, 0x21, 0xff, 0x51, 0xdd, 0xae, 0x48, 0x29, 0x75, 0x0c,
+0xc3, 0x3d, 0x91, 0xfe, 0x9d, 0xa8, 0x5e, 0xb2, 0x34, 0xb2, 0xd3, 0x81, 0xf6, 0x27, 0x9c, 0xac,
+0x6b, 0x20, 0x56, 0x86, 0xa5, 0x4f, 0x7a, 0xdb, 0xf9, 0xac, 0xa9, 0x8e, 0xe3, 0x73, 0x21, 0x99,
+0x71, 0x2d, 0xaf, 0x27, 0x92, 0x0c, 0xc7, 0xd3, 0x85, 0xb3, 0x40, 0xda, 0x13, 0x4a, 0x04, 0x41,
+0x54, 0xf8, 0xf2, 0x55, 0xb7, 0x80, 0xdd, 0x77, 0xba, 0x01, 0x7a, 0x31, 0xbd, 0x6b, 0xdc, 0x5c,
+0x59, 0xf4, 0x2b, 0xca, 0x25, 0xbb, 0x50, 0xba, 0xfa, 0x42, 0x38, 0xd2, 0x28, 0x10, 0x8b, 0x7b,
+0x96, 0x45, 0x30, 0xbb, 0x7f, 0xf4, 0x5a, 0xf7, 0x28, 0x6f, 0x47, 0xdc, 0xd2, 0x82, 0xf2, 0xf7,
+0xdd, 0x20, 0xb5, 0x0c, 0x7e, 0x53, 0x85, 0xa7, 0xfc, 0x3b, 0x1a, 0xc0, 0x07, 0x7b, 0xa1, 0x43,
+0x05, 0x18, 0x19, 0xd3, 0xfc, 0x41, 0xc2, 0xce, 0xd9, 0x5b, 0x4b, 0x63, 0xe2, 0x8f, 0x86, 0x3a,
+0xd1, 0xd0, 0x1d, 0x74, 0x2e, 0xbc, 0xd3, 0xce, 0x08, 0x0c, 0x10, 0x7a, 0x42, 0x60, 0xc5, 0x3a,
+0xa6, 0xd8, 0xb0, 0x52, 0xcf, 0x53, 0x28, 0x70, 0x45, 0xb7, 0x72, 0x7d, 0x77, 0x66, 0x54, 0x3d,
+0x38, 0x26, 0xcf, 0xd5, 0xbf, 0xe4, 0x80, 0x10, 0xba, 0x2b, 0xe8, 0xdc, 0xc3, 0xfe, 0x28, 0xa3,
+0x52, 0x58, 0x70, 0x4a, 0xde, 0x84, 0x33, 0x5e, 0x93, 0x04, 0xa4, 0x7c, 0xe7, 0xea, 0x8e, 0xba,
+0xeb, 0x8a, 0x19, 0x26, 0x6a, 0x7f, 0x7c, 0x4a, 0x5b, 0xb4, 0x0d, 0xfc, 0xc8, 0x11, 0x1b, 0x41,
+0x68, 0x5d, 0x2a, 0x25, 0x04, 0x4f, 0xc8, 0xf4, 0x65, 0xfc, 0xb9, 0x58, 0xeb, 0xb4, 0x67, 0x50,
+0x24, 0xf5, 0x43, 0xf6, 0x91, 0x4a, 0xb0, 0x0f, 0x32, 0xe0, 0x07, 0x75, 0x69, 0x1b, 0x3c, 0xeb,
+0xb2, 0x65, 0x26, 0x6f, 0xb8, 0x79, 0xe0, 0x78, 0x8c, 0xdc, 0x39, 0x24, 0x48, 0x76, 0x11, 0xd4,
+0x3a, 0xc5, 0xd2, 0x2b, 0xaa, 0x55, 0xfb, 0x92, 0x12, 0x2d, 0x88, 0x05, 0xd1, 0xb1, 0x31, 0x36,
+0x1f, 0xc2, 0x44, 0x1c, 0xab, 0x2e, 0xcd, 0x1c, 0x72, 0x86, 0xf6, 0x83, 0x87, 0x2e, 0x8b, 0xdb,
+0xaa, 0x16, 0x0e, 0x1b, 0xe6, 0x5c, 0x4d, 0x2f, 0x82, 0xbd, 0x49, 0x11, 0x60, 0x22, 0x0f, 0xde,
+0x3b, 0x2b, 0x20, 0x1d, 0x56, 0xb7, 0x21, 0xae, 0x0b, 0x26, 0x4f, 0xde, 0x3d, 0xa6, 0x3f, 0x61,
+0x81, 0xe2, 0x76, 0x60, 0x08, 0xc5, 0x4b, 0x18, 0x0b, 0xd1, 0xf5, 0xff, 0x8d, 0x1a, 0x96, 0x76,
+0x51, 0x15, 0x05, 0x4d, 0x8c, 0x6b, 0x12, 0x90, 0x47, 0xd4, 0xa4, 0x38, 0xb9, 0x48, 0xe4, 0x4c,
+0x05, 0x69, 0x6a, 0x8b, 0x9d, 0x7c, 0xa1, 0xbc, 0x77, 0xeb, 0x86, 0x93, 0x0a, 0x15, 0x84, 0xba,
+0x8f, 0xf5, 0x7c, 0x44, 0x75, 0x31, 0x79, 0x16, 0xc1, 0x81, 0x1a, 0xb6, 0xe6, 0x6c, 0x3d, 0xb8,
+0x15, 0x46, 0xf5, 0xbe, 0x46, 0x04, 0xa6, 0xec, 0xec, 0xd1, 0x74, 0x8b, 0x87, 0x2b, 0xdb, 0xd0,
+0x9f, 0xb3, 0x99, 0x9d, 0x87, 0x8c, 0xc6, 0xaa, 0xd4, 0x64, 0x45, 0xbd, 0xe8, 0xed, 0xa3, 0xc1,
+0x2a, 0x41, 0x1e, 0x26, 0xaf, 0x86, 0x16, 0xed, 0x80, 0x08, 0xca, 0x64, 0x21, 0x3a, 0xce, 0x21,
+0x4c, 0x41, 0xb9, 0x13, 0x2d, 0xf7, 0x1b, 0xdf, 0x2b, 0x33, 0x69, 0xe7, 0x5c, 0x8c, 0x7b, 0xfb,
+0xe3, 0x41, 0xe9, 0xce, 0xd7, 0xff, 0x0e, 0x54, 0xfe, 0xb0, 0x71, 0x78, 0xdc, 0xde, 0x7e, 0xdd,
+0x1f, 0x1c, 0x4a, 0x8f, 0x3e, 0x16, 0xfd, 0x91, 0x82, 0x94, 0xd4, 0xc2, 0xf7, 0xb2, 0x77, 0x89,
+0x16, 0x2c, 0xba, 0xb6, 0xbd, 0xed, 0x95, 0x43, 0x05, 0x9b, 0xf2, 0xc4, 0xbe, 0x46, 0x43, 0x90,
+0x1d, 0xd8, 0x24, 0x02, 0xd2, 0xea, 0xf4, 0x08, 0xd9, 0xf7, 0x84, 0x0e, 0xc6, 0xe7, 0x44, 0xdb,
+0xb8, 0xac, 0x0a, 0x53, 0x39, 0x61, 0x43, 0xdc, 0x22, 0x28, 0x8f, 0x22, 0x2f, 0x73, 0xbf, 0x59,
+0x2d, 0x3c, 0x8c, 0x0b, 0xcc, 0x2a, 0x67, 0xe0, 0x5b, 0x5c, 0x65, 0x5e, 0x6d, 0x98, 0x99, 0xaa,
+0x3b, 0x89, 0x12, 0xe2, 0x99, 0xf6, 0x15, 0xa7, 0xd2, 0x6a, 0x79, 0xb4, 0xf6, 0x0b, 0xf5, 0x0d,
+0x2d, 0x4c, 0xcb, 0x1b, 0x35, 0x93, 0x61, 0x32, 0xa1, 0x8a, 0xa8, 0x27, 0xe8, 0x95, 0x5a, 0x56,
+0x59, 0x04, 0xfe, 0xce, 0xc2, 0xd8, 0x92, 0x97, 0xb2, 0x54, 0x63, 0xd0, 0x3b, 0xde, 0x10, 0x34,
+0x32, 0x16, 0x05, 0x51, 0x1d, 0xfc, 0x96, 0x8e, 0xf1, 0xf6, 0x4b, 0xd7, 0x48, 0x22, 0xce, 0xca,
+0x1c, 0x6b, 0xab, 0x1f, 0x59, 0xa2, 0x74, 0xd6, 0xcd, 0x15, 0x07, 0xab, 0xa2, 0xd5, 0x22, 0x81,
+0xec, 0x20, 0x14, 0x36, 0xac, 0xe4, 0x25, 0x7d, 0xe6, 0x09, 0x00, 0x2c, 0x92, 0x4d, 0x4e, 0xbf,
+0xbf, 0xa1, 0xd4, 0xbe, 0x6b, 0xd4, 0x1f, 0x95, 0x9b, 0xf3, 0xda, 0x99, 0xad, 0xa4, 0x6c, 0x73,
+0x55, 0xd1, 0x9d, 0x4b, 0x16, 0xd4, 0x06, 0xec, 0x46, 0x3d, 0xb7, 0xe7, 0xce, 0xd0, 0x1d, 0x94,
+0x65, 0xde, 0x61, 0xb3, 0xc1, 0x10, 0x65, 0xe5, 0x68, 0x9b, 0xb0, 0xb4, 0x43, 0x0b, 0x92, 0xaf,
+0xb7, 0x40, 0xa2, 0xe5, 0x06, 0x3d, 0x72, 0x00, 0xc5, 0x39, 0xab, 0x35, 0x29, 0x22, 0x4c, 0xa5,
+0xa5, 0x3f, 0x22, 0x90, 0x53, 0xd2, 0x36, 0x63, 0x1e, 0xd3, 0x33, 0xa5, 0xbb, 0x3d, 0xa3, 0x0c,
+0x14, 0x9c, 0x2e, 0x6d, 0x9a, 0x7a, 0xf7, 0xf1, 0x56, 0x66, 0xe5, 0x8d, 0x53, 0x83, 0x34, 0x3f,
+0xa9, 0x83, 0x84, 0x68, 0x90, 0xc9, 0x51, 0xc2, 0xd4, 0x8e, 0x6c, 0xc7, 0x6d, 0xa7, 0x19, 0x61,
+0xa7, 0x2e, 0x36, 0xbc, 0xd2, 0x0f, 0x17, 0x49, 0xd4, 0x6b, 0x36, 0x63, 0xfb, 0x1d, 0xf4, 0xb0,
+0x6b, 0xcf, 0x34, 0x5f, 0xd2, 0x77, 0xae, 0x12, 0xaf, 0xb3, 0xdf, 0x52, 0xf7, 0xc2, 0xc8, 0xf2,
+0x63, 0x61, 0xb6, 0x3e, 0x39, 0xf2, 0xa7, 0x1a, 0x89, 0x9d, 0x0e, 0x8f, 0xaf, 0xe1, 0x01, 0x24,
+0xa6, 0x3a, 0xd5, 0x9a, 0x62, 0x67, 0xa3, 0x66, 0xee, 0xbc, 0xc5, 0x94, 0x4b, 0xc3, 0x15, 0xa1,
+0x7e, 0x07, 0x07, 0x2b, 0xb7, 0x43, 0x2a, 0xb4, 0xb8, 0x25, 0x88, 0x86, 0x23, 0xab, 0xdf, 0x05,
+0xbe, 0x46, 0x56, 0xd7, 0xda, 0xd6, 0x75, 0x53, 0xd9, 0xc8, 0x26, 0x8f, 0x39, 0x67, 0xed, 0x21,
+0x53, 0x1c, 0x9c, 0x89, 0x46, 0xd3, 0xfe, 0x54, 0xe6, 0x1d, 0x02, 0xb9, 0x25, 0x82, 0x66, 0xe6,
+0xf9, 0x45, 0xd9, 0x3f, 0xa5, 0x71, 0xc1, 0x46, 0x66, 0x7a, 0x27, 0x8a, 0x82, 0xc9, 0x21, 0xe9,
+0x17, 0xab, 0x6c, 0xef, 0x45, 0xe5, 0x88, 0x93, 0x87, 0x80, 0xb3, 0x85, 0x25, 0x96, 0x19, 0x41,
+0xab, 0xd6, 0xba, 0x92, 0x76, 0x21, 0x8a, 0x58, 0xbd, 0xe2, 0x4b, 0xec, 0x45, 0x59, 0x2c, 0x13,
+0x1a, 0xb5, 0x13, 0x25, 0x44, 0xe7, 0x71, 0x26, 0x0a, 0x34, 0x33, 0xb9, 0x57, 0x15, 0xa4, 0x90,
+0x60, 0x11, 0x05, 0x8e, 0xc8, 0x8e, 0x74, 0x52, 0x4b, 0x31, 0x71, 0xeb, 0x66, 0x7e, 0xee, 0xb1,
+0x0a, 0x21, 0x52, 0xc0, 0x1a, 0xe9, 0xa1, 0x5a, 0xe3, 0x3a, 0x24, 0xfb, 0xf3, 0x1e, 0xd6, 0x83,
+0x1d, 0xfb, 0x81, 0xa8, 0x91, 0x60, 0x9e, 0xbc, 0x59, 0x20, 0xc9, 0x9e, 0x71, 0x19, 0x83, 0x2b,
+0x6a, 0x48, 0x4e, 0x6b, 0x46, 0x82, 0x89, 0xda, 0x60, 0xff, 0x1a, 0x46, 0x94, 0x55, 0xda, 0xe5,
+0x99, 0xfa, 0x84, 0xd7, 0x3b, 0xb9, 0xa5, 0x34, 0x87, 0x86, 0x5e, 0x6d, 0x75, 0x9a, 0xe7, 0x09,
+0xb8, 0xe6, 0x71, 0x15, 0x10, 0x56, 0xd7, 0xc1, 0xc8, 0xb2, 0x62, 0xbc, 0xec, 0xe0, 0x94, 0xa0,
+0xcd, 0xb4, 0x04, 0xa9, 0xc3, 0x51, 0xee, 0xf8, 0x2e, 0x42, 0x9a, 0xaa, 0x34, 0xd3, 0xb9, 0xb0,
+0x36, 0xf9, 0x47, 0xc1, 0x07, 0x49, 0xde, 0xb8, 0x32, 0x8a, 0x87, 0x68, 0x56, 0x9a, 0x35, 0x79,
+0xd1, 0xac, 0x49, 0x38, 0xc6, 0xfe, 0xfd, 0xdf, 0x6f, 0x3c, 0xda, 0x48, 0xbd, 0x23, 0xfd, 0x85,
+0xf0, 0x96, 0xee, 0x1c, 0x27, 0x18, 0x86, 0xa6, 0xf0, 0x7b, 0xd8, 0x3c, 0xc7, 0x22, 0x3e, 0x2f,
+0xac, 0xb1, 0x37, 0xbd, 0x84, 0x4b, 0xe3, 0x92, 0x82, 0xd0, 0x25, 0x14, 0x22, 0x65, 0xed, 0xeb,
+0xef, 0xb9, 0xb6, 0xe4, 0x95, 0x18, 0x0d, 0x2b, 0x8d, 0x4f, 0xaf, 0xc0, 0xa0, 0x05, 0x8b, 0x35,
+0x5b, 0x94, 0xb2, 0x68, 0x26, 0x4f, 0x4a, 0x9e, 0x85, 0x0e, 0x46, 0xe0, 0x4f, 0x60, 0x66, 0x01,
+0xa4, 0x39, 0xe8, 0x8b, 0x2a, 0x50, 0xf5, 0x18, 0x70, 0xe2, 0xfc, 0xd6, 0xbe, 0xd3, 0x46, 0x4b
+
+ciphertext =
+0x75, 0x95, 0xb3, 0x48, 0x38, 0xf9, 0xe4, 0x88, 0xec, 0xf8, 0x3b, 0x09, 0x40, 0xd4, 0xd6, 0xea,
+0xf1, 0x80, 0x6d, 0xfb, 0xba, 0x9e, 0xee, 0xac, 0x6a, 0xf9, 0x8f, 0xb6, 0xe1, 0xff, 0xea, 0x19,
+0x17, 0xc2, 0x77, 0x8d, 0xc2, 0x8d, 0x6c, 0x89, 0xd1, 0x5f, 0xa6, 0xf3, 0x2c, 0xa7, 0x6a, 0x7f,
+0x50, 0x1b, 0xc9, 0x4d, 0xb4, 0x36, 0x64, 0x6e, 0xa6, 0xd9, 0x39, 0x8b, 0xcf, 0x8e, 0x0c, 0x55,
+0x4d, 0xb8, 0xe8, 0xf5, 0xb6, 0x5a, 0xd4, 0x49, 0x63, 0x24, 0xa2, 0xdf, 0xf0, 0x7a, 0x1c, 0x3b,
+0x74, 0x0c, 0x1d, 0x9b, 0xe2, 0x2b, 0x31, 0xb5, 0xe3, 0xca, 0x9f, 0xe4, 0x23, 0xe8, 0x64, 0x83,
+0x1a, 0xf1, 0xaa, 0xbf, 0xd0, 0x6a, 0xd6, 0x43, 0xd1, 0x2d, 0x2b, 0x7f, 0x38, 0x8d, 0x55, 0xd8,
+0xb2, 0xa9, 0x96, 0xec, 0xf5, 0xf9, 0x56, 0x50, 0x65, 0xc8, 0x63, 0x35, 0x22, 0xb0, 0xf4, 0x11,
+0xf3, 0x96, 0x16, 0xc3, 0x55, 0x90, 0xd0, 0x42, 0x3a, 0x5b, 0xc7, 0xfa, 0x67, 0x9b, 0x9f, 0xbd,
+0xca, 0xa5, 0x89, 0xd1, 0xe6, 0xfb, 0xbe, 0x3c, 0x7a, 0x29, 0xcb, 0xd7, 0xd2, 0xaf, 0xfc, 0x2d,
+0x1e, 0xb2, 0x6c, 0x4f, 0x5e, 0x31, 0x67, 0x6f, 0xa9, 0x8e, 0x54, 0x1f, 0xdb, 0x87, 0xd4, 0x11,
+0xa4, 0x99, 0x50, 0xcc, 0x52, 0xd0, 0xfa, 0x43, 0x70, 0x03, 0xa6, 0xff, 0xe0, 0xcb, 0x22, 0xbd,
+0xf3, 0x66, 0xf2, 0x4d, 0x82, 0x13, 0x94, 0x28, 0x89, 0xae, 0x82, 0xdc, 0xa5, 0x3e, 0xf2, 0xd2,
+0x01, 0xda, 0xef, 0xb4, 0x81, 0x77, 0x86, 0x29, 0x35, 0xad, 0xca, 0x14, 0x84, 0xdb, 0x86, 0xbd,
+0x8c, 0xf0, 0x8b, 0xc4, 0x2a, 0xb0, 0x87, 0xd8, 0x0b, 0xcd, 0x2c, 0x32, 0xe1, 0xce, 0xca, 0x15,
+0x82, 0x6e, 0xf5, 0xc8, 0x20, 0x38, 0xa2, 0x60, 0xaf, 0xe1, 0xdc, 0xe7, 0x7b, 0xa4, 0x75, 0x4b,
+0xf2, 0xd1, 0xfb, 0x25, 0x36, 0xbe, 0x84, 0x67, 0x94, 0x06, 0x20, 0xc0, 0x21, 0x30, 0x29, 0xdf,
+0x63, 0xf5, 0x56, 0x3a, 0x07, 0xef, 0x8d, 0xad, 0x62, 0x0f, 0x60, 0xbe, 0xb0, 0x8e, 0x10, 0x27,
+0xc6, 0x46, 0x90, 0xe5, 0x59, 0xaa, 0x16, 0x55, 0xca, 0x68, 0x6c, 0xbf, 0x19, 0x4e, 0xcd, 0xe8,
+0xb4, 0xf8, 0x94, 0xc4, 0x55, 0x9d, 0x7c, 0x3c, 0x06, 0x4e, 0x1a, 0x34, 0x1b, 0x16, 0x80, 0xe4,
+0x56, 0x98, 0xd4, 0xaa, 0xee, 0x66, 0xea, 0x91, 0x71, 0x44, 0xbd, 0xcb, 0xb1, 0xc5, 0x57, 0x49,
+0xa8, 0x4e, 0xe2, 0x03, 0xc6, 0xd5, 0x39, 0xd8, 0x69, 0xa8, 0xa0, 0xad, 0xad, 0x0d, 0x8d, 0xa7,
+0x80, 0xd3, 0xba, 0xc1, 0xae, 0xfe, 0xf5, 0xa2, 0x55, 0x8d, 0xa6, 0x2f, 0xb1, 0x4c, 0x67, 0x3e,
+0xb4, 0xaa, 0xed, 0xab, 0x89, 0xbc, 0xa5, 0x6c, 0x96, 0xe1, 0xc2, 0x27, 0x80, 0x55, 0xe3, 0x1b,
+0x5c, 0x20, 0xad, 0x02, 0x83, 0xa1, 0xc9, 0x51, 0xbd, 0x63, 0xf6, 0x08, 0x64, 0x38, 0x75, 0x7c,
+0x50, 0xd9, 0xae, 0xbb, 0x1b, 0xab, 0x33, 0xd5, 0x61, 0xf1, 0xda, 0xe4, 0x52, 0x6d, 0x97, 0x3c,
+0xdb, 0xec, 0x62, 0x37, 0x5b, 0xe9, 0x84, 0xda, 0x26, 0x26, 0xa2, 0xc2, 0x00, 0x67, 0x50, 0x82,
+0x0c, 0xa2, 0xd4, 0xb0, 0xc9, 0xe7, 0x6e, 0x2a, 0x2a, 0xaa, 0x5a, 0x8a, 0x3c, 0xfa, 0xb8, 0xd6,
+0x95, 0xdf, 0xb5, 0x20, 0x08, 0x65, 0xf4, 0x2f, 0x49, 0x2a, 0xeb, 0x06, 0xd4, 0x1a, 0x3a, 0x3d,
+0xc7, 0xfe, 0xd2, 0x5c, 0xb5, 0x00, 0x14, 0x67, 0x32, 0xb2, 0x17, 0xe2, 0x17, 0x50, 0x97, 0xf0,
+0x11, 0xb5, 0x1a, 0xe4, 0xa9, 0xe1, 0x40, 0x21, 0xbb, 0x75, 0x96, 0xb2, 0xc1, 0x90, 0x8a, 0x66,
+0xfd, 0x2c, 0x1a, 0xa6, 0xc0, 0x4a, 0x53, 0xcb, 0x3e, 0x10, 0x0f, 0x0a, 0x73, 0x3c, 0x6b, 0x4f,
+0xba, 0x14, 0x57, 0xce, 0xc8, 0xb7, 0xd8, 0x33, 0xf1, 0xc4, 0xba, 0x02, 0x13, 0xaa, 0xc6, 0x15,
+0x9e, 0xd6, 0xfd, 0x77, 0x05, 0x81, 0x92, 0x61, 0x3b, 0x35, 0x3f, 0xbd, 0x38, 0x22, 0x2a, 0x5f,
+0xc3, 0x09, 0xc5, 0x73, 0x22, 0x2d, 0x27, 0x8a, 0x42, 0xac, 0x06, 0xe2, 0x8b, 0x9e, 0x3d, 0x73,
+0xfb, 0xf2, 0x71, 0x06, 0x07, 0x26, 0xc7, 0x25, 0xdd, 0x19, 0x7a, 0x54, 0xd8, 0xb8, 0x66, 0x6b,
+0x73, 0xad, 0xc8, 0xa2, 0x24, 0x39, 0x4a, 0xab, 0xdc, 0x5a, 0x6e, 0x32, 0xd9, 0x4a, 0x12, 0xe4,
+0xbd, 0x39, 0xf8, 0x72, 0x6a, 0xdc, 0x46, 0xfb, 0x18, 0x08, 0x01, 0xc5, 0xd3, 0xe7, 0xa2, 0xe9,
+0xf4, 0xe4, 0xcc, 0xaf, 0x91, 0x1c, 0xc7, 0x57, 0xdc, 0x18, 0x53, 0x2a, 0x66, 0xeb, 0x29, 0xf9,
+0xc5, 0x0e, 0x5a, 0x1c, 0x0d, 0xcc, 0xca, 0xb1, 0x67, 0x75, 0xff, 0x91, 0x58, 0x71, 0xff, 0x01,
+0x56, 0xaa, 0x51, 0x75, 0xfc, 0x61, 0x8a, 0x2a, 0x1c, 0xb3, 0x0a, 0x4b, 0x9a, 0xea, 0xe3, 0xc4,
+0x2a, 0x07, 0xd2, 0xce, 0x6d, 0xfc, 0x34, 0xf8, 0xb0, 0xe9, 0xe3, 0x4b, 0x71, 0x1f, 0x5f, 0x0e,
+0xb9, 0x87, 0x25, 0x1c, 0xad, 0x7b, 0x52, 0xa0, 0x56, 0xcf, 0x90, 0xbe, 0x7d, 0xc0, 0x6c, 0x34,
+0x28, 0x49, 0x77, 0xd4, 0x66, 0x12, 0x40, 0xa9, 0xd4, 0x32, 0xbe, 0x10, 0xad, 0x11, 0x73, 0xed,
+0x10, 0x60, 0xc5, 0x76, 0x63, 0xe8, 0x1a, 0x12, 0x26, 0x94, 0xa4, 0xa7, 0xee, 0xc3, 0xd3, 0x47,
+0xb6, 0x2f, 0xa1, 0x18, 0xe5, 0x7a, 0xf4, 0x85, 0x97, 0x1e, 0x09, 0xdf, 0xd6, 0x92, 0xc5, 0x2e,
+0x3e, 0xe5, 0xa9, 0x70, 0x7b, 0x89, 0x91, 0x5b, 0x72, 0x9a, 0x53, 0x5c, 0xdd, 0xb9, 0xd5, 0xe0,
+0xab, 0xb3, 0xc5, 0x14, 0x74, 0xcb, 0x67, 0xdc, 0xbb, 0x7c, 0x98, 0x31, 0xde, 0x2a, 0x61, 0x79,
+0x48, 0xdf, 0xb5, 0x1f, 0xb6, 0x3f, 0xbd, 0x15, 0xc8, 0xdf, 0x69, 0xc1, 0x11, 0xfc, 0xd2, 0xcf,
+0x33, 0xac, 0xe3, 0xdf, 0xc9, 0x26, 0xc7, 0x3c, 0x3d, 0xa8, 0x2b, 0xf1, 0xb7, 0x34, 0x01, 0x9e,
+0x53, 0x5a, 0x98, 0xe7, 0x45, 0x3a, 0x46, 0x90, 0xe1, 0xa3, 0x5f, 0xd3, 0xc4, 0xbc, 0x64, 0xea,
+0x9d, 0x90, 0xcc, 0xfc, 0x35, 0xa3, 0xd1, 0x8b, 0xc1, 0x9b, 0x6f, 0xce, 0xdb, 0xe7, 0x43, 0x3b,
+0x3d, 0x2e, 0xff, 0xc5, 0x81, 0x27, 0x2f, 0xd2, 0x66, 0x85, 0x7c, 0x8c, 0x3d, 0x4f, 0x3d, 0xca,
+0xce, 0x3a, 0xdf, 0xbc, 0xa2, 0x76, 0x3b, 0xe9, 0xc0, 0xf0, 0x22, 0xd8, 0x3c, 0xb8, 0x67, 0x7b,
+0x9b, 0xf1, 0x8d, 0x30, 0x0c, 0x1a, 0xd9, 0xe1, 0xff, 0x85, 0x26, 0xf1, 0xe8, 0x99, 0x92, 0x24,
+0xec, 0xb0, 0x7a, 0x20, 0xe3, 0x36, 0xe0, 0xe7, 0xc6, 0x9d, 0xfe, 0x7e, 0x52, 0x08, 0x00, 0x8c,
+0xc3, 0x8b, 0x8d, 0x3b, 0xf8, 0x07, 0x8c, 0x1e, 0x26, 0xbd, 0x1b, 0x82, 0x80, 0xe4, 0xec, 0x7a,
+0xf5, 0x3b, 0xb6, 0x2a, 0x59, 0xf0, 0x0c, 0x3d, 0x36, 0xaf, 0x8a, 0x59, 0xc1, 0x57, 0xc1, 0x9c,
+0xf1, 0x6e, 0x81, 0x98, 0xc2, 0x18, 0x0a, 0xb8, 0xe3, 0xe8, 0xa6, 0xd6, 0x54, 0x3a, 0xfd, 0xb2,
+0x3e, 0x13, 0x3e, 0xfb, 0xf9, 0x34, 0xc4, 0x8c, 0x6f, 0xbe, 0x11, 0x5b, 0x2d, 0x81, 0x7b, 0x20,
+0xc9, 0xd3, 0xe6, 0x71, 0x3e, 0xae, 0xbf, 0x23, 0x09, 0xa0, 0x87, 0xe7, 0x49, 0x2d, 0xc9, 0x6a,
+0x8d, 0xa3, 0x5e, 0x8e, 0xeb, 0x18, 0x33, 0x3a, 0xf8, 0x00, 0x3d, 0x91, 0xf0, 0x6c, 0x80, 0x38,
+0x3b, 0x0c, 0xa1, 0xb1, 0x17, 0xb1, 0xe0, 0x6d, 0x63, 0x7c, 0xa4, 0xf5, 0x9a, 0x65, 0xc8, 0x3e,
+0x09, 0xc5, 0x57, 0x79, 0x7a, 0x2a, 0x17, 0x8b, 0xbb, 0xe2, 0x75, 0xb8, 0x87, 0x14, 0x7b, 0xc6,
+0x21, 0xa8, 0x9e, 0x31, 0xdc, 0x15, 0xee, 0x43, 0xf6, 0xc0, 0x11, 0x30, 0xa8, 0x45, 0xd3, 0x4b,
+0x61, 0xfe, 0x9a, 0x19, 0xae, 0x01, 0x40, 0xf7, 0x56, 0xcc, 0xc6, 0xa9, 0x35, 0x10, 0xe7, 0x58,
+0xbb, 0x13, 0x79, 0x19, 0x11, 0x47, 0x90, 0xf4, 0xa3, 0x40, 0xf2, 0xa1, 0xe0, 0xd0, 0xb0, 0xe4,
+0xca, 0xf3, 0x03, 0x3a, 0xd5, 0xd9, 0x67, 0xbc, 0x35, 0x6d, 0x74, 0xa0, 0xd2, 0x10, 0x9a, 0x5e,
+0x14, 0x7e, 0xb9, 0x10, 0x17, 0x1c, 0x1d, 0x44, 0x31, 0xe4, 0xcc, 0xa6, 0x95, 0xd2, 0x45, 0x1a,
+0xfc, 0x9a, 0x7c, 0x62, 0xf2, 0xd8, 0xc4, 0x4b, 0x4c, 0x87, 0x13, 0xb7, 0x61, 0xe5, 0x7e, 0xa7,
+0x47, 0xac, 0x97, 0xf0, 0x86, 0x2b, 0xe6, 0x1e, 0x8c, 0xd0, 0x66, 0x86, 0xfa, 0x18, 0x1f, 0x12,
+0xa7, 0x84, 0x6f, 0x0d, 0x66, 0x1e, 0xe5, 0xf3, 0xb8, 0x1b, 0x37, 0xe4, 0x9a, 0x12, 0x81, 0x10,
+0x18, 0xad, 0xdd, 0x9d, 0x5a, 0x4b, 0xce, 0xf5, 0xcb, 0x31, 0x6d, 0x2e, 0xa5, 0x82, 0x40, 0x87,
+0x5c, 0x08, 0x62, 0xc2, 0xc2, 0x5d, 0xea, 0x78, 0x0a, 0xc1, 0x96, 0x99, 0xe5, 0xf4, 0x12, 0x5c,
+0xf1, 0xee, 0x70, 0x59, 0xc6, 0x5e, 0xc5, 0xfa, 0xb3, 0xa3, 0x62, 0x71, 0xd8, 0x22, 0x6a, 0x99,
+0xf9, 0xb7, 0xbe, 0x58, 0x45, 0x9a, 0x5a, 0xc1, 0xa9, 0x3f, 0x99, 0x7a, 0x16, 0x46, 0x52, 0x21,
+0x4b, 0x0c, 0x52, 0xce, 0xa6, 0x6c, 0x44, 0xf7, 0x77, 0xc2, 0x10, 0x11, 0x13, 0xe2, 0x19, 0x2e,
+0x5e, 0xb5, 0x4a, 0x5b, 0xfc, 0x66, 0x9d, 0xe1, 0xd0, 0x9d, 0xde, 0x46, 0xf2, 0xad, 0x35, 0x97,
+0x64, 0xa9, 0x05, 0x0e, 0x3b, 0x0f, 0xf9, 0xc7, 0xe0, 0xcd, 0x3b, 0x8c, 0xff, 0x6b, 0xde, 0xb0,
+0x7f, 0x3e, 0x1f, 0x3f, 0x7b, 0x66, 0xbd, 0x52, 0x40, 0x18, 0xde, 0x91, 0x61, 0xca, 0xae, 0x40,
+0x56, 0x9b, 0x46, 0x5f, 0xd9, 0x2f, 0x13, 0x62, 0x7e, 0x22, 0xec, 0x4b, 0x64, 0x8d, 0x21, 0xa2,
+0xe9, 0x83, 0xbb, 0xec, 0x7f, 0xd9, 0xb4, 0xfb, 0x4f, 0x21, 0x9e, 0xb4, 0x66, 0x15, 0x13, 0x95,
+0x0f, 0x50, 0xb4, 0x9f, 0x77, 0xe8, 0xad, 0x24, 0x0e, 0x00, 0xb3, 0x73, 0x29, 0xd0, 0xc4, 0x25,
+0xf7, 0x91, 0xe6, 0xac, 0xf4, 0x5f, 0x7f, 0xac, 0xd7, 0x68, 0x6b, 0x94, 0xd8, 0x7a, 0xcb, 0xb8,
+0xd8, 0xcb, 0x24, 0x06, 0x88, 0x2e, 0x8e, 0x91, 0xaf, 0xce, 0x6f, 0x36, 0x2f, 0x2d, 0x1a, 0xac,
+0xcc, 0x06, 0xb4, 0x0e, 0x66, 0x6e, 0x79, 0x15, 0xe5, 0xaa, 0x33, 0xeb, 0xb1, 0xe5, 0xa3, 0x62,
+0x7a, 0x76, 0xfc, 0x4a, 0xbd, 0xa2, 0xbe, 0x85, 0x44, 0x6c, 0x31, 0xae, 0x5b, 0xd9, 0x85, 0x5e,
+0xb7, 0x88, 0xdb, 0x29, 0xa1, 0x1e, 0x78, 0x98, 0x56, 0xbf, 0xfb, 0x4c, 0x63, 0xac, 0x96, 0xfb,
+0xa1, 0x18, 0x91, 0xc2, 0x21, 0x90, 0x7c, 0xfa, 0x9d, 0x6d, 0x09, 0xb9, 0xae, 0x9e, 0x90, 0xf3,
+0x33, 0x31, 0x95, 0xa3, 0xf4, 0xc1, 0xfa, 0x89, 0xad, 0x6d, 0x30, 0x1c, 0x42, 0x5b, 0x56, 0x6f,
+0x85, 0x26, 0x6a, 0xf6, 0x95, 0xf6, 0x3c, 0xbc, 0x9b, 0xb1, 0x70, 0x50, 0xeb, 0x9e, 0x40, 0xa2,
+0x97, 0x50, 0x2b, 0x90, 0x7b, 0x38, 0x64, 0xf1, 0xae, 0xa1, 0x23, 0xeb, 0x34, 0x22, 0x1a, 0x97,
+0x9d, 0xdb, 0x48, 0x44, 0x7d, 0x1a, 0x56, 0xfa, 0xdd, 0x18, 0xc9, 0xac, 0xe9, 0x2a, 0x98, 0x97,
+0x48, 0xff, 0x79, 0x66, 0x44, 0xfd, 0x2e, 0xa6, 0x15, 0x0b, 0x51, 0x3e, 0x0a, 0xaf, 0x62, 0x16,
+0x1a, 0x37, 0xab, 0x72, 0xa5, 0xf1, 0x0b, 0xa7, 0x8c, 0x00, 0xf2, 0xaa, 0xd3, 0x34, 0x01, 0xb1,
+0xd0, 0x2c, 0x88, 0xb8, 0x25, 0xd6, 0x62, 0x02, 0x52, 0xa4, 0x4a, 0xa2, 0xb1, 0xe3, 0x07, 0x91,
+0x41, 0x30, 0x55, 0x2f, 0x14, 0x61, 0x29, 0xd0, 0x94, 0x1e, 0x4e, 0xe3, 0x02, 0x39, 0xc9, 0xb1,
+0xfc, 0x43, 0xec, 0x83, 0x28, 0xf1, 0x98, 0x0e, 0xe9, 0x26, 0x79, 0x1c, 0x48, 0xa9, 0x22, 0x21,
+0x4a, 0x82, 0xaf, 0x43, 0x35, 0xf4, 0x9c, 0x39, 0x08, 0x8b, 0x93, 0xb8, 0x42, 0x40, 0x7b, 0x99,
+0x4c, 0xfa, 0x63, 0x90, 0x4e, 0x31, 0x5f, 0x9f, 0x60, 0x60, 0xa7, 0x1b, 0xb8, 0x38, 0x83, 0x63,
+0xb3, 0xe7, 0x2e, 0xcc, 0x1a, 0x21, 0xdd, 0x4b, 0xfb, 0x62, 0x2c, 0x30, 0xae, 0x15, 0x6b, 0xe2,
+0x37, 0x63, 0xc8, 0xa1, 0x16, 0x57, 0x83, 0x14, 0xcc, 0xae, 0xe4, 0x31, 0x1b, 0x06, 0xf7, 0xbe,
+0xf8, 0x56, 0xef, 0xd4, 0x60, 0x9e, 0x68, 0x0c, 0xa0, 0x82, 0x7e, 0x71, 0x87, 0x9e, 0xd2, 0xa7,
+0x5d, 0x86, 0xc6, 0x3d, 0x88, 0x4a, 0xd9, 0x01, 0x1e, 0x44, 0xa1, 0xc0, 0x91, 0x42, 0xd2, 0xfc,
+0xab, 0xf2, 0x7a, 0x94, 0x16, 0xf1, 0x39, 0x50, 0x83, 0x1c, 0x65, 0x9d, 0xc3, 0x26, 0x93, 0xdf,
+0x65, 0x0c, 0xe3, 0x83, 0xb5, 0x7f, 0x72, 0x73, 0xef, 0xd7, 0x62, 0xe9, 0x7f, 0xe2, 0xd1, 0xcc,
+0x9e, 0x77, 0x9f, 0xab, 0x30, 0x26, 0x2a, 0x2c, 0x18, 0xb1, 0x3c, 0x64, 0xcf, 0x54, 0x49, 0x75,
+0xe1, 0xbe, 0x51, 0xdc, 0xaa, 0xdf, 0xeb, 0xc0, 0x41, 0xc7, 0x24, 0x4b, 0xe3, 0xe0, 0xe7, 0xbc,
+0xed, 0x32, 0x15, 0x9f, 0x4b, 0x2f, 0x17, 0xc1, 0xce, 0x39, 0x38, 0x83, 0xcb, 0x97, 0x30, 0x7b,
+0x82, 0x46, 0x65, 0x55, 0x2d, 0xd8, 0x16, 0xa7, 0xd3, 0x33, 0x73, 0x5a, 0xb2, 0xe0, 0xae, 0xfc,
+0x12, 0x8a, 0xf4, 0x56, 0xd9, 0x7b, 0xd2, 0x02, 0xcf, 0x99, 0x37, 0x04, 0x56, 0x90, 0xab, 0x10,
+0x82, 0x3e, 0xcc, 0x2c, 0x8d, 0x53, 0x67, 0x9b, 0x43, 0x59, 0xc0, 0x80, 0xec, 0x18, 0x5e, 0x03,
+0x04, 0x5d, 0x1d, 0x5f, 0xb4, 0x03, 0x8f, 0xc7, 0x38, 0x10, 0x6c, 0xd7, 0xfe, 0x8f, 0x2c, 0xd4,
+0x0a, 0x1e, 0x47, 0x5f, 0x2a, 0x26, 0xd3, 0x4b, 0x3e, 0x46, 0x87, 0xd4, 0x94, 0xba, 0xe8, 0x19,
+0x89, 0x90, 0x70, 0x90, 0xa0, 0xee, 0x8d, 0x74, 0x87, 0x1c, 0x35, 0x6b, 0x48, 0x94, 0x3d, 0x80,
+0x4c, 0x8c, 0x84, 0x35, 0x86, 0x97, 0xb4, 0xe2, 0xae, 0x4c, 0xae, 0x30, 0xcf, 0x6e, 0x34, 0xa5,
+0xbb, 0xa5, 0xf5, 0xdd, 0x7e, 0xe8, 0xea, 0x37, 0x54, 0xe2, 0xc3, 0x91, 0x03, 0xcb, 0x8c, 0x4b,
+0x23, 0x73, 0x63, 0x5b, 0x35, 0x63, 0x5b, 0x89, 0xbb, 0x01, 0xce, 0x8d, 0x73, 0xa3, 0x4f, 0x89,
+0x76, 0x15, 0x5d, 0x50, 0x26, 0x01, 0x8c, 0x7b, 0x23, 0x6b, 0x84, 0xa6, 0x60, 0x44, 0x2a, 0x0b,
+0x33, 0x8f, 0x00, 0xad, 0x0e, 0x05, 0x75, 0x41, 0xae, 0x96, 0x1b, 0x2d, 0x0b, 0xe9, 0xdb, 0xba,
+0xbe, 0xe0, 0xc5, 0x65, 0x35, 0x02, 0xf2, 0x04, 0x6c, 0x3f, 0x81, 0xe0, 0x0c, 0x2c, 0xd7, 0xde,
+0xc8, 0xb2, 0x6c, 0x5d, 0x1e, 0x9b, 0xe0, 0x65, 0x1e, 0x13, 0xd8, 0x6a, 0x92, 0xa7, 0x59, 0x14,
+0x78, 0x92, 0xb7, 0x11, 0x06, 0xea, 0xc2, 0x8d, 0x61, 0x82, 0x5d, 0xfe, 0x18, 0x66, 0x02, 0x8e,
+0x7a, 0x09, 0x7f, 0xdc, 0x7e, 0xca, 0xa7, 0x76, 0x99, 0x50, 0x25, 0xf6, 0x7e, 0x30, 0xaa, 0xf7,
+0x82, 0xae, 0xfa, 0xe3, 0xdf, 0x56, 0xa9, 0xab, 0xa4, 0xa3, 0x2c, 0x4d, 0x02, 0x4c, 0x38, 0x02,
+0x2d, 0x7f, 0x37, 0x54, 0xca, 0x3f, 0x6e, 0x7b, 0x0e, 0xb2, 0xa5, 0x68, 0x76, 0x98, 0x85, 0xd4,
+0x83, 0x5b, 0x26, 0xb3, 0xcd, 0xe9, 0x0c, 0xdf, 0xa5, 0x35, 0x4d, 0xd8, 0x5c, 0x59, 0x53, 0xe8,
+0x81, 0xf0, 0x33, 0xc9, 0xc9, 0xef, 0x84, 0xf6, 0x5c, 0xf8, 0x6e, 0x32, 0xe7, 0x20, 0x94, 0x79
+
+cipher_key =
+0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2, 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+
+auth_key =
+0xaf, 0x96, 0x42, 0xf1, 0x8c, 0x50, 0xdc, 0x67, 0x1a, 0x43, 0x47, 0x62, 0xc7, 0x04, 0xab, 0x05,
+0xf5, 0x0c, 0xe7, 0xa2, 0xa6, 0x23, 0xd5, 0x3d, 0x95, 0xd8, 0xcd, 0x86, 0x79, 0xf5, 0x01, 0x47,
+0x4f, 0xf9, 0x1d, 0x9d, 0x36, 0xf7, 0x68, 0x1a, 0x64, 0x44, 0x58, 0x5d, 0xe5, 0x81, 0x15, 0x2a,
+0x41, 0xe4, 0x0e, 0xaa, 0x1f, 0x04, 0x21, 0xff, 0x2c, 0xf3, 0x73, 0x2b, 0x48, 0x1e, 0xd2, 0xf7,
+0xf6, 0xd9, 0xaf, 0xbf, 0x08, 0x3b, 0xbb, 0x19, 0x5f, 0xf6, 0x7d, 0x25, 0x85, 0xdf, 0x6b, 0x54,
+0xd0, 0xe7, 0x4b, 0x9e, 0xc7, 0xef, 0xca, 0x48, 0x6f, 0x21, 0xd7, 0x51, 0xc8, 0x21, 0xc1, 0x15,
+0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
+0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
+
+iv =
+0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+
+####################
+# sha_hmac_buff_32 #
+####################
+[sha1_hmac_buff_32]
+digest =
+0xAD, 0x0F, 0xE8, 0xAD, 0x30, 0xD6, 0x0A, 0x2B, 0x6B, 0x3D, 0x24, 0x79, 0x6D, 0xB6, 0x80, 0x43,
+0x1D, 0xD1, 0x8A, 0xA8
+
+[sha224_hmac_buff_32]
+digest =
+0xB4, 0x1C, 0xF4, 0x73, 0x04, 0x62, 0x2D, 0xAF, 0x59, 0x05, 0xE8, 0x55, 0xE4, 0x8B, 0x24, 0xB4,
+0x0F, 0x4C, 0x3D, 0xBE, 0xFF, 0xFF, 0x8D, 0x19, 0x0D, 0x38, 0xA1, 0xFC
+
+[sha256_hmac_buff_32]
+digest =
+0x77, 0x54, 0x8D, 0x73, 0x7D, 0xB9, 0x78, 0x2F, 0x3D, 0xEE, 0xA2, 0xE7, 0xC9, 0x03, 0xF3, 0xB0,
+0x17, 0x8E, 0x71, 0x81, 0xB7, 0xA7, 0x5F, 0x9F, 0xF3, 0xED, 0x55, 0x1A, 0x69, 0x68, 0x52, 0xE5
+
+[sha384_hmac_buff_32]
+digest =
+0x1A, 0x3D, 0x1A, 0xD8, 0x0C, 0x04, 0x4B, 0x14, 0x9F, 0xF6, 0x9B, 0x80, 0x5C, 0xA2, 0x78, 0xFC,
+0xE6, 0xA3, 0xBA, 0x32, 0x09, 0x04, 0x52, 0x24, 0x76, 0xC9, 0xFC, 0x70, 0xA6, 0x00, 0xB4, 0x41,
+0xA1, 0x48, 0x1D, 0xCC, 0x54, 0x59, 0xE6, 0x94, 0x35, 0x36, 0xCC, 0xAB, 0x4B, 0xF5, 0xE4, 0xCA
+
+[sha512_hmac_buff_32]
+digest =
+0xED, 0xD7, 0x96, 0x0B, 0xC4, 0x8F, 0xBF, 0xF3, 0xEA, 0x7D, 0x5D, 0x57, 0x2A, 0x50, 0x50, 0xAC,
+0x33, 0xF2, 0xED, 0x56, 0x0A, 0xF7, 0x97, 0x4A, 0x36, 0x8D, 0x3B, 0xA5, 0x9F, 0x7A, 0x6D, 0x57,
+0xE0, 0x94, 0x10, 0xB9, 0x15, 0xC6, 0x1B, 0x7F, 0x17, 0xB3, 0x48, 0xB5, 0xF9, 0x93, 0xD8, 0xCA,
+0x74, 0x56, 0xED, 0xBD, 0x55, 0x14, 0xD2, 0xB6, 0x36, 0x07, 0x06, 0x73, 0x66, 0x41, 0x50, 0x84
+
+####################
+# sha_hmac_buff_64 #
+####################
+[sha1_hmac_buff_64]
+digest =
+0xC9, 0xC3, 0x65, 0x23, 0xCE, 0x9D, 0x6E, 0x35, 0xDC, 0x65, 0x3B, 0x21, 0x33, 0xFF, 0x1E, 0x74,
+0xAF, 0x48, 0x24, 0xD9
+
+[sha224_hmac_buff_64]
+digest =
+0x9F, 0xC3, 0x34, 0x0F, 0xB0, 0xA5, 0x4D, 0x89, 0xA4, 0x20, 0x01, 0xDF, 0x40, 0xAE, 0xA2, 0x41,
+0x4E, 0x97, 0x38, 0xBA, 0xA7, 0xF9, 0x94, 0x1F, 0x56, 0x4E, 0x00, 0x0A
+
+[sha256_hmac_buff_64]
+digest =
+0x66, 0x52, 0xF3, 0xEB, 0xC7, 0x06, 0xEF, 0x21, 0x82, 0x7C, 0xCF, 0x7F, 0x5B, 0x6B, 0x77, 0x2F,
+0x28, 0x61, 0x06, 0x2B, 0x67, 0x4B, 0x2D, 0x62, 0x71, 0x53, 0xBE, 0x12, 0xF9, 0x5B, 0xD8, 0x64
+
+[sha384_hmac_buff_64]
+digest =
+0x63, 0x50, 0x09, 0x45, 0x87, 0x02, 0x7F, 0x85, 0xD3, 0xC8, 0xF2, 0x26, 0x01, 0xE8, 0x2C, 0x28,
+0xB4, 0x86, 0x5A, 0x8E, 0x8E, 0x95, 0x27, 0x6A, 0x74, 0xF0, 0x29, 0xC2, 0x2D, 0x13, 0x91, 0xD5,
+0x38, 0xDB, 0x06, 0x7E, 0xB4, 0x8C, 0xD6, 0x30, 0x54, 0x86, 0xE7, 0xBB, 0x5C, 0xB6, 0xDD, 0x90
+
+[sha512_hmac_buff_64]
+digest =
+0xBE, 0x52, 0xBF, 0x69, 0x2B, 0x7F, 0xD5, 0x37, 0x48, 0x76, 0xCF, 0xBD, 0x23, 0x18, 0x45, 0x90,
+0x74, 0x25, 0x07, 0x91, 0x13, 0x37, 0xF3, 0x26, 0xEE, 0x68, 0xEC, 0xFC, 0xCB, 0x60, 0x53, 0x96,
+0x54, 0xF6, 0xE8, 0xAC, 0xF7, 0xB1, 0x52, 0x31, 0x7E, 0x5D, 0x99, 0xF3, 0x86, 0xF2, 0x98, 0x7D,
+0xD4, 0x38, 0xD9, 0xF1, 0x4C, 0x08, 0x87, 0x7F, 0xB9, 0x17, 0x97, 0x39, 0xDB, 0x68, 0x39, 0x19
+#####################
+# sha_hmac_buff_128 #
+#####################
+[sha1_hmac_buff_128]
+digest =
+0xB0, 0x6F, 0x7D, 0xB3, 0x29, 0xC3, 0x2B, 0x5D, 0xB7, 0xF1, 0x13, 0xFB, 0x9E, 0x90, 0x5D, 0xF1,
+0x48, 0xBC, 0x71, 0xA4
+
+[sha224_hmac_buff_128]
+digest =
+0xFC, 0x59, 0xAF, 0xF2, 0x83, 0x2E, 0x07, 0x08, 0xBF, 0xB4, 0x3C, 0x24, 0xA8, 0x52, 0x7B, 0x9E,
+0x92, 0x83, 0xCE, 0x96, 0xEE, 0x8B, 0x65, 0x72, 0x00, 0x12, 0xC6, 0x98
+
+[sha256_hmac_buff_128]
+digest =
+0x7E, 0xCA, 0x95, 0xD5, 0x63, 0xA8, 0xCA, 0xA6, 0xC5, 0x41, 0x75, 0x12, 0x60, 0xDF, 0xFE, 0x16,
+0x70, 0xEB, 0xCC, 0x4E, 0xEB, 0x00, 0x86, 0xF0, 0xEC, 0x45, 0x44, 0x76, 0x62, 0x55, 0x48, 0x56
+
+[sha384_hmac_buff_128]
+digest =
+0x1B, 0x2A, 0xAA, 0x7F, 0x2E, 0x58, 0x1F, 0x64, 0xB4, 0xE6, 0x29, 0xE4, 0x74, 0x78, 0x09, 0xD7,
+0xBA, 0xDD, 0x18, 0xB6, 0xE4, 0x21, 0xF5, 0x8F, 0x40, 0x45, 0x65, 0xD1, 0xBE, 0x4F, 0x7B, 0x27,
+0xF4, 0x64, 0x72, 0x55, 0x53, 0xAB, 0x39, 0x05, 0x7A, 0x6D, 0xAA, 0x12, 0x75, 0x03, 0x67, 0x4E
+
+[sha512_hmac_buff_128]
+digest =
+0x5F, 0x8F, 0xA8, 0xFA, 0xEA, 0x05, 0x29, 0xBD, 0x3B, 0xBA, 0xF6, 0xA7, 0x93, 0x9E, 0x16, 0xF1,
+0x8B, 0x10, 0x2F, 0x6D, 0x08, 0x18, 0x54, 0xD2, 0x39, 0xEB, 0xF9, 0x70, 0xCC, 0x55, 0xA0, 0xC3,
+0x08, 0x9B, 0x8E, 0x55, 0x81, 0x1A, 0xCE, 0x0D, 0x09, 0x97, 0x4E, 0x34, 0xD1, 0xE6, 0x25, 0x05,
+0x94, 0xC7, 0x05, 0x30, 0xAF, 0x2F, 0x7F, 0x54, 0xAA, 0xB8, 0xC5, 0x8E, 0x3D, 0xBB, 0xF2, 0x12
+
+#####################
+# sha_hmac_buff_256 #
+#####################
+[sha1_hmac_buff_256]
+digest =
+0xF9, 0xCA, 0x12, 0x7D, 0x60, 0x68, 0xB7, 0xAF, 0xDC, 0xAC, 0x41, 0x13, 0x1C, 0xA8, 0xC1, 0x85,
+0x65, 0x11, 0x31, 0x4C
+
+[sha224_hmac_buff_256]
+digest =
+0x49, 0xED, 0xA4, 0x27, 0x51, 0x6A, 0x46, 0xE4, 0x31, 0x12, 0x72, 0x92, 0xB8, 0x81, 0x16, 0x97,
+0x19, 0x4F, 0x3B, 0xAC, 0xD1, 0xCE, 0x06, 0x40, 0xD4, 0xEA, 0x8E, 0xC3
+
+[sha256_hmac_buff_256]
+digest =
+0xB9, 0xFB, 0x21, 0x16, 0x0C, 0x08, 0xD1, 0xE0, 0x49, 0xB8, 0xC8, 0x7E, 0xCC, 0xF0, 0xBA, 0x29,
+0x32, 0xCE, 0x53, 0x03, 0xE8, 0xFB, 0xD2, 0x44, 0xB7, 0xB9, 0xFE, 0xE8, 0x03, 0x86, 0xE2, 0x68
+
+[sha384_hmac_buff_256]
+digest =
+0x47, 0xEA, 0x51, 0xA7, 0xAD, 0xA2, 0x34, 0x3D, 0x4A, 0x3A, 0x86, 0x89, 0x78, 0x56, 0xCF, 0x21,
+0x94, 0xBF, 0x80, 0x33, 0x6B, 0x42, 0x73, 0x01, 0xAD, 0x6B, 0xE0, 0xEC, 0x10, 0xEE, 0x6E, 0xEC,
+0xED, 0x54, 0x50, 0x5E, 0x96, 0x3B, 0xE8, 0x2A, 0x8C, 0x33, 0x67, 0x9B, 0x17, 0x6C, 0xBB, 0xF8
+
+[sha512_hmac_buff_256]
+digest =
+0x01, 0xAE, 0xE7, 0x74, 0xCD, 0x86, 0x43, 0xBC, 0x8A, 0xF6, 0xAF, 0x6C, 0xDE, 0x9E, 0x9A, 0xB7,
+0x6B, 0xCF, 0x98, 0x95, 0x31, 0xE8, 0x37, 0x3B, 0x3F, 0xF3, 0xC1, 0x00, 0xA0, 0xA6, 0xE5, 0x15,
+0x60, 0x36, 0x7E, 0x7C, 0x96, 0xAB, 0x17, 0xB9, 0x79, 0x3D, 0x3E, 0x43, 0xBC, 0xA0, 0xA0, 0x8B,
+0x14, 0x14, 0x22, 0x86, 0xE9, 0xF6, 0x96, 0x38, 0x9F, 0x24, 0x45, 0x9C, 0xE8, 0x63, 0x2A, 0x22
+
+#####################
+# sha_hmac_buff_512 #
+#####################
+[sha1_hmac_buff_512]
+digest =
+0x45, 0x8D, 0x5B, 0x40, 0x0D, 0x34, 0x3A, 0x7B, 0xB2, 0xB1, 0xE7, 0x62, 0xDE, 0x2B, 0xD0, 0x46,
+0xCD, 0x4B, 0x55, 0x95
+
+[sha224_hmac_buff_512]
+digest =
+0xE1, 0x82, 0x07, 0x4F, 0x6B, 0x24, 0x4A, 0x57, 0xE9, 0x04, 0x14, 0xB1, 0x7F, 0xD2, 0x4C, 0xA0,
+0x89, 0x8B, 0xB2, 0xA2, 0x28, 0x9F, 0xFE, 0x7C, 0xD1, 0x7F, 0x35, 0x07
+
+[sha256_hmac_buff_512]
+digest =
+0xB9, 0x75, 0x4F, 0x70, 0xC7, 0x8C, 0xF2, 0x62, 0x89, 0x3C, 0x41, 0x4D, 0x1D, 0x15, 0x81, 0x2A,
+0x5A, 0xCB, 0x56, 0x62, 0xF8, 0xE9, 0x38, 0x13, 0xC9, 0x4D, 0xC3, 0x9D, 0xF0, 0x82, 0xAC, 0xD2
+
+[sha384_hmac_buff_512]
+digest =
+0x9C, 0xAE, 0x77, 0x8D, 0x7E, 0x26, 0x01, 0xA6, 0x46, 0x47, 0xDF, 0xB7, 0x23, 0x6F, 0x17, 0x6B,
+0x9F, 0x4D, 0x94, 0xBB, 0x78, 0xD8, 0x2D, 0x90, 0xB1, 0xC1, 0x65, 0x6D, 0x92, 0x4E, 0x54, 0x7A,
+0xA5, 0xF6, 0x80, 0x29, 0x82, 0x77, 0xAC, 0xC3, 0x58, 0xE5, 0x14, 0x75, 0x64, 0x9D, 0x02, 0x6E
+
+[sha512_hmac_buff_512]
+digest =
+0x33, 0xB6, 0xD1, 0xC4, 0x5F, 0xDB, 0xEF, 0xF4, 0x14, 0xE8, 0xDA, 0x07, 0x30, 0xB6, 0xC6, 0xC9,
+0x4F, 0xCF, 0x64, 0x48, 0x08, 0xA2, 0xC1, 0x9D, 0x03, 0xAD, 0x93, 0x62, 0x41, 0xB6, 0xB9, 0xEC,
+0x1B, 0xD1, 0xAC, 0xA1, 0xC5, 0x94, 0x67, 0x19, 0xA3, 0x4B, 0x53, 0xCE, 0x0C, 0x8A, 0x27, 0x07,
+0x37, 0x75, 0x93, 0xC3, 0xC6, 0x60, 0x19, 0x39, 0x9E, 0x02, 0x23, 0x9A, 0xE6, 0xA9, 0x34, 0x1A
+
+######################
+# sha_hmac_buff_1024 #
+######################
+[sha1_hmac_buff_1024]
+digest =
+0x0B, 0x26, 0x34, 0xAD, 0x56, 0x44, 0x39, 0x4A, 0x0D, 0x2F, 0x14, 0xFB, 0x60, 0x77, 0xDD, 0xFC,
+0x0B, 0x5F, 0x9F, 0x99
+
+[sha224_hmac_buff_1024]
+digest =
+0x56, 0x41, 0xC2, 0xF0, 0x73, 0x5C, 0x21, 0x13, 0x7E, 0x47, 0xCC, 0xAB, 0x21, 0x3D, 0x5E, 0xA7,
+0xC6, 0x1E, 0xFF, 0x26, 0x59, 0x0C, 0x71, 0x95, 0x72, 0x76, 0x0D, 0x00
+
+[sha256_hmac_buff_1024]
+digest =
+0x08, 0x91, 0x23, 0x89, 0x0F, 0xB0, 0xE4, 0x25, 0x9F, 0xC7, 0x46, 0x6B, 0xC3, 0x39, 0xE0, 0x9C,
+0xE2, 0xAE, 0xA3, 0xCF, 0xB8, 0xA0, 0x0A, 0xCF, 0x29, 0xEE, 0x0D, 0x83, 0x8A, 0xE5, 0xE4, 0x85
+
+[sha384_hmac_buff_1024]
+digest =
+0x38, 0xC7, 0x19, 0xA4, 0x46, 0x14, 0x79, 0xA4, 0xAB, 0x40, 0x61, 0xBC, 0xFB, 0x87, 0x16, 0xE2,
+0x08, 0x90, 0xAD, 0x33, 0x5D, 0x37, 0xB6, 0xCA, 0x80, 0xEE, 0x59, 0x9C, 0xBF, 0xA8, 0xEB, 0x78,
+0xC2, 0xE2, 0x2D, 0x6E, 0x2E, 0x98, 0x98, 0x6F, 0x07, 0x6A, 0x39, 0x57, 0x8F, 0xCE, 0xEE, 0x64
+
+[sha512_hmac_buff_1024]
+digest =
+0x17, 0x60, 0x08, 0x57, 0x43, 0x20, 0xF6, 0xB5, 0x6D, 0x0D, 0x7F, 0x7C, 0xB9, 0x09, 0x3F, 0x6D,
+0x3E, 0x75, 0x2F, 0x17, 0xDA, 0x19, 0x58, 0xF0, 0xEC, 0xED, 0x96, 0xA9, 0x57, 0x05, 0xCD, 0x23,
+0x0F, 0x1E, 0x38, 0x55, 0x2D, 0x8E, 0x36, 0x14, 0xF4, 0x99, 0x5E, 0x3C, 0x33, 0xBB, 0x99, 0xC9,
+0xCD, 0x7A, 0xF4, 0x87, 0x10, 0xB8, 0x6C, 0xB1, 0x14, 0x2D, 0xA8, 0xCE, 0xFE, 0xF8, 0x6F, 0xD9
+
+######################
+# sha_hmac_buff_2048 #
+######################
+[sha1_hmac_buff_2048]
+digest =
+0x58, 0xE4, 0xBB, 0x8D, 0x63, 0x5D, 0x23, 0xF1, 0xAB, 0xB5, 0xBD, 0xD8, 0x71, 0xC9, 0x05, 0x0A,
+0x65, 0x5D, 0x2D, 0x2D
+
+[sha224_hmac_buff_2048]
+digest =
+0xA3, 0x9A, 0x47, 0x68, 0x32, 0x3A, 0xA8, 0xE4, 0xBE, 0x23, 0xEA, 0xEE, 0x2D, 0x5E, 0x3C, 0x2E,
+0xD3, 0x99, 0xBE, 0x87, 0x19, 0x17, 0xC5, 0x13, 0x6D, 0xB7, 0x05, 0x97
+
+[sha256_hmac_buff_2048]
+digest =
+0x5B, 0x36, 0x1A, 0xF0, 0x55, 0xAC, 0xC3, 0xEA, 0x1B, 0x01, 0xCF, 0xCE, 0x89, 0x0D, 0x6A, 0xC3,
+0x5F, 0x9A, 0xD3, 0x49, 0xCC, 0xA4, 0xDF, 0xDD, 0x44, 0x1F, 0x9D, 0x6C, 0xB1, 0x92, 0xDF, 0xB9
+
+[sha384_hmac_buff_2048]
+digest =
+0x24, 0x17, 0xA2, 0x61, 0xFF, 0x46, 0xA2, 0x2E, 0xE5, 0xC3, 0xB4, 0x47, 0x10, 0x8C, 0x54, 0xD2,
+0xC2, 0x4D, 0x15, 0xA2, 0x8D, 0xEF, 0x98, 0x6E, 0xE0, 0xB1, 0x31, 0x3B, 0x7D, 0xDE, 0x41, 0x8E,
+0x98, 0xB9, 0xE9, 0xD2, 0xD8, 0xE5, 0x75, 0x6D, 0xC5, 0xF0, 0x1A, 0xC4, 0x1B, 0x8B, 0xC1, 0xA4
+
+[sha512_hmac_buff_2048]
+digest =
+0xD8, 0x77, 0x7A, 0x0F, 0x63, 0x1E, 0x92, 0x7B, 0x87, 0xCE, 0x07, 0x24, 0x7E, 0xE4, 0x36, 0x30,
+0x16, 0x76, 0x0D, 0xEC, 0xEF, 0x01, 0xF5, 0xD5, 0x44, 0xB7, 0xF3, 0x51, 0x31, 0x6A, 0xC2, 0x80,
+0xCD, 0x4C, 0x7F, 0xD4, 0xA6, 0x90, 0x85, 0xAE, 0x49, 0xB1, 0xF1, 0xB0, 0xC4, 0x16, 0x79, 0xC3,
+0xE3, 0x8B, 0x67, 0xC3, 0xAA, 0xC1, 0x9C, 0x8D, 0xE0, 0x22, 0xB3, 0xFD, 0x09, 0xD5, 0x40, 0xAC
diff --git a/app/test-crypto-perf/data/aes_cbc_192_sha.data b/app/test-crypto-perf/data/aes_cbc_192_sha.data
new file mode 100644
index 00000000..7bfe3da7
--- /dev/null
+++ b/app/test-crypto-perf/data/aes_cbc_192_sha.data
@@ -0,0 +1,504 @@
+# List of tests for AES-192 CBC:
+# 1) [sha1_hmac_buff_x]
+# 2) [sha224_hmac_buff_x]
+# 3) [sha256_hmac_buff_x]
+# 4) [sha384_hmac_buff_x]
+# 5) [sha512_hmac_buff_x]
+# where x is one of the values: 32, 64, 128, 256, 512, 1024, 2048
+
+##########
+# GLOBAL #
+##########
+plaintext =
+0xff, 0xca, 0xfb, 0xf1, 0x38, 0x20, 0x2f, 0x7b, 0x24, 0x98, 0x26, 0x7d, 0x1d, 0x9f, 0xb3, 0x93,
+0xd9, 0xef, 0xbd, 0xad, 0x4e, 0x40, 0xbd, 0x60, 0xe9, 0x48, 0x59, 0x90, 0x67, 0xd7, 0x2b, 0x7b,
+0x8a, 0xe0, 0x4d, 0xb0, 0x70, 0x38, 0xcc, 0x48, 0x61, 0x7d, 0xee, 0xd6, 0x35, 0x49, 0xae, 0xb4,
+0xaf, 0x6b, 0xdd, 0xe6, 0x21, 0xc0, 0x60, 0xce, 0x0a, 0xf4, 0x1c, 0x2e, 0x1c, 0x8d, 0xe8, 0x7b,
+0x59, 0xda, 0x19, 0x4f, 0xec, 0x07, 0x8e, 0xe2, 0xf0, 0x61, 0xf9, 0x27, 0x61, 0x6f, 0xf8, 0xdf,
+0x62, 0x4d, 0xaf, 0x06, 0xfe, 0x41, 0xa6, 0xa6, 0xf9, 0xa2, 0x06, 0x40, 0xb3, 0x04, 0xbd, 0xe6,
+0xc8, 0x17, 0xfb, 0x56, 0x6f, 0xa9, 0x3b, 0x8e, 0xa6, 0x58, 0xdc, 0x91, 0x17, 0x58, 0x42, 0x95,
+0xa3, 0x7c, 0x81, 0x78, 0xa6, 0x3d, 0x3f, 0x75, 0x74, 0x17, 0x1a, 0xd3, 0x6c, 0x2f, 0x48, 0x39,
+0x20, 0x20, 0xc1, 0x9a, 0x29, 0x84, 0x7d, 0x2d, 0x52, 0xa1, 0xf9, 0x5c, 0xf3, 0x4f, 0x91, 0xfc,
+0x75, 0xcf, 0xd6, 0x2d, 0xe7, 0x9a, 0x59, 0x6e, 0x00, 0x0e, 0x8d, 0x22, 0x17, 0xbd, 0xa0, 0xdd,
+0x79, 0x1f, 0x71, 0xe6, 0xcd, 0x2f, 0xb1, 0xb6, 0xbc, 0xc3, 0xdb, 0x02, 0x91, 0x41, 0x9b, 0x09,
+0xa9, 0xd2, 0x7e, 0xbd, 0x2c, 0x18, 0xae, 0xc0, 0x93, 0x0c, 0x02, 0x9a, 0xdb, 0x4e, 0xaa, 0xeb,
+0x84, 0x4b, 0x43, 0x5e, 0xf0, 0x98, 0xf2, 0x5f, 0x86, 0x70, 0x96, 0x90, 0x15, 0x30, 0xcf, 0x3a,
+0xc9, 0x33, 0x21, 0xec, 0x59, 0x86, 0xfc, 0x65, 0x7d, 0xbe, 0xb9, 0xf8, 0x97, 0xf9, 0x30, 0xa9,
+0x6d, 0xfc, 0x0c, 0x6e, 0x36, 0x67, 0xd5, 0xa6, 0x67, 0xd9, 0xbd, 0x9b, 0x34, 0x5d, 0xa7, 0xdd,
+0xda, 0x46, 0x33, 0x25, 0x60, 0x4a, 0x18, 0xf1, 0x55, 0x07, 0xb2, 0xb7, 0x26, 0x7b, 0xa6, 0x1e,
+0x77, 0xbe, 0x7f, 0x35, 0x46, 0xdf, 0x56, 0x9c, 0x22, 0x19, 0xc8, 0x85, 0xa2, 0x45, 0xb2, 0xad,
+0xf9, 0x26, 0x66, 0xab, 0xfc, 0x97, 0x4b, 0x51, 0x32, 0x36, 0xbc, 0xad, 0xcf, 0x54, 0x3a, 0x4f,
+0x94, 0xdb, 0xd2, 0xf9, 0x67, 0x1b, 0x3b, 0xe5, 0xb2, 0x1d, 0xc5, 0x52, 0x64, 0x2c, 0x06, 0x44,
+0xcf, 0x18, 0x83, 0xe0, 0xd8, 0x04, 0x92, 0xa9, 0xc4, 0x3c, 0x8b, 0xa3, 0x2b, 0xbc, 0x88, 0x7e,
+0xc0, 0x76, 0xa7, 0xe2, 0x7b, 0x47, 0x90, 0xf2, 0xaa, 0x0a, 0x34, 0x1b, 0x91, 0x12, 0xd2, 0xd0,
+0x82, 0x45, 0xf4, 0x57, 0xf1, 0xbd, 0x91, 0x5e, 0xab, 0x41, 0x4c, 0xdf, 0x91, 0x4c, 0xdd, 0x67,
+0x04, 0xa0, 0x98, 0x23, 0x8c, 0x24, 0xbe, 0xd6, 0x80, 0xb3, 0x6d, 0x04, 0xa1, 0x77, 0x43, 0xa5,
+0xee, 0xb7, 0xce, 0xb1, 0x48, 0x43, 0x94, 0x61, 0x15, 0x20, 0x9d, 0xce, 0xd0, 0x14, 0x95, 0x37,
+0xc8, 0x64, 0xa3, 0x2d, 0x3d, 0xe3, 0xff, 0xb4, 0x55, 0x83, 0x84, 0x41, 0x50, 0x57, 0xbd, 0x5a,
+0x0c, 0xe4, 0xda, 0x3b, 0x36, 0x4d, 0x21, 0xb5, 0x6f, 0x73, 0x2a, 0x8c, 0x78, 0x4f, 0x9b, 0x83,
+0xda, 0x11, 0x3c, 0xf0, 0xc9, 0x7e, 0xa6, 0x48, 0x34, 0x53, 0x62, 0xd3, 0x0c, 0xff, 0xb1, 0x74,
+0xd6, 0xea, 0xa5, 0xfc, 0x13, 0x1c, 0x05, 0xa8, 0xc0, 0xbc, 0x95, 0x9c, 0x8c, 0xf6, 0x8c, 0xc3,
+0xf3, 0x69, 0xab, 0x93, 0x65, 0xc0, 0xb7, 0x7e, 0xb0, 0x16, 0x7c, 0xb5, 0x5f, 0x05, 0x28, 0xc9,
+0x09, 0x4e, 0x2a, 0x32, 0x87, 0xb3, 0xab, 0xf8, 0x4c, 0xab, 0xeb, 0x3b, 0x6a, 0xa0, 0x1d, 0x7f,
+0xef, 0xe5, 0x9b, 0xa4, 0xb7, 0xd7, 0xc2, 0x5e, 0x03, 0x0f, 0x99, 0xeb, 0xb1, 0xb1, 0xa6, 0x9d,
+0x1c, 0x7c, 0x5c, 0x94, 0x8b, 0x6e, 0x11, 0x7a, 0xb3, 0x6d, 0x1e, 0x61, 0x64, 0xc3, 0x7d, 0x1c,
+0xb3, 0x54, 0x65, 0x08, 0x3b, 0xda, 0x97, 0xb9, 0x75, 0xc1, 0x2b, 0x3e, 0xa8, 0x5c, 0x3c, 0x2d,
+0x81, 0x5b, 0xbf, 0x5a, 0x13, 0x0e, 0xeb, 0x66, 0xc0, 0x0b, 0x8f, 0x04, 0x68, 0x68, 0x9b, 0xe3,
+0x0d, 0x84, 0xe0, 0xcf, 0x83, 0xd7, 0x62, 0x48, 0xc1, 0x31, 0xa5, 0xd5, 0xbc, 0xe3, 0xa3, 0xa5,
+0xb6, 0xd1, 0xfd, 0x81, 0x91, 0x4d, 0xbd, 0xc4, 0x62, 0x4f, 0xe3, 0xd5, 0x99, 0x14, 0xf1, 0xcd,
+0xf4, 0x7d, 0x13, 0xda, 0x68, 0x0a, 0xca, 0xd6, 0x82, 0x0b, 0xf6, 0xea, 0xad, 0x78, 0xa4, 0xc8,
+0x14, 0x7a, 0xec, 0x11, 0xd3, 0x16, 0x86, 0x9f, 0x17, 0x37, 0x6a, 0x06, 0x56, 0xaa, 0x1b, 0xd1,
+0xaf, 0x85, 0x95, 0x21, 0x36, 0x69, 0xec, 0x1b, 0x56, 0x84, 0x01, 0x3f, 0x4d, 0x34, 0x3d, 0x2d,
+0x38, 0x57, 0x2d, 0x7e, 0xd9, 0x7b, 0x2d, 0x81, 0x86, 0xd4, 0x7c, 0x83, 0x12, 0x1d, 0x9d, 0x27,
+0x72, 0x1b, 0x5e, 0xf4, 0x15, 0xa5, 0xcd, 0xb7, 0x5f, 0xbb, 0x49, 0xa1, 0xd9, 0xdd, 0x8d, 0xad,
+0xa9, 0x2c, 0x65, 0x18, 0x91, 0xfd, 0xd2, 0xd4, 0x09, 0x60, 0x0c, 0xfd, 0xa4, 0xe1, 0x25, 0x87,
+0x32, 0x64, 0x7b, 0x99, 0xd7, 0x61, 0x2f, 0xd4, 0x73, 0xdd, 0x85, 0x26, 0x08, 0x92, 0xc0, 0xe1,
+0x4f, 0x0c, 0x76, 0x5b, 0x26, 0x69, 0xdb, 0x78, 0x35, 0x65, 0xb9, 0x58, 0x1f, 0x9c, 0x0f, 0x18,
+0x95, 0xfe, 0x40, 0xfc, 0xf7, 0x93, 0x71, 0x70, 0x8b, 0x73, 0xdc, 0xb0, 0x88, 0x72, 0x19, 0x26,
+0x94, 0x26, 0xa7, 0xaa, 0x00, 0x72, 0x61, 0x53, 0xd2, 0x5d, 0x8f, 0x5e, 0x51, 0x88, 0x2d, 0xa4,
+0x28, 0xd5, 0xaf, 0x2d, 0xd2, 0x84, 0x39, 0x75, 0x1e, 0xe7, 0xf0, 0x23, 0xc0, 0x4f, 0x8d, 0xdd,
+0x5c, 0x90, 0xef, 0x6e, 0x53, 0xe0, 0x54, 0x67, 0xe1, 0x5b, 0x10, 0xf1, 0xf5, 0xf8, 0x64, 0x34,
+0x94, 0xeb, 0x37, 0xf7, 0xe9, 0xaa, 0x6c, 0xa4, 0xd8, 0x74, 0x6d, 0xca, 0x8d, 0x1a, 0x31, 0x73,
+0xca, 0xb4, 0xc7, 0x47, 0x34, 0x7f, 0xf8, 0x24, 0x9b, 0xfa, 0xc9, 0xcc, 0xa8, 0x61, 0xb4, 0x0e,
+0x4d, 0x68, 0xc7, 0xa0, 0xcb, 0xea, 0xf0, 0xcc, 0x0a, 0x6c, 0xf2, 0x33, 0x42, 0x99, 0x6c, 0xd8,
+0x74, 0x7f, 0x1e, 0x8a, 0xa3, 0x0a, 0x48, 0x4b, 0x7e, 0xbe, 0xdb, 0x7f, 0x56, 0x69, 0x43, 0xe8,
+0xbf, 0x12, 0xc4, 0x7b, 0xc2, 0xd9, 0xfa, 0x5c, 0xeb, 0x45, 0xca, 0x07, 0x3d, 0xc0, 0xcd, 0x68,
+0x8b, 0xd0, 0x79, 0xea, 0x0a, 0x78, 0x06, 0xdc, 0x81, 0xd7, 0x32, 0x18, 0xb3, 0x65, 0xbe, 0x47,
+0xbb, 0xfa, 0x17, 0x09, 0xe9, 0x31, 0x95, 0x30, 0xef, 0x07, 0x44, 0xec, 0xd0, 0x98, 0x98, 0xc0,
+0x6b, 0x71, 0x5b, 0x23, 0xb8, 0xb6, 0xd2, 0x21, 0xff, 0x51, 0xdd, 0xae, 0x48, 0x29, 0x75, 0x0c,
+0xc3, 0x3d, 0x91, 0xfe, 0x9d, 0xa8, 0x5e, 0xb2, 0x34, 0xb2, 0xd3, 0x81, 0xf6, 0x27, 0x9c, 0xac,
+0x6b, 0x20, 0x56, 0x86, 0xa5, 0x4f, 0x7a, 0xdb, 0xf9, 0xac, 0xa9, 0x8e, 0xe3, 0x73, 0x21, 0x99,
+0x71, 0x2d, 0xaf, 0x27, 0x92, 0x0c, 0xc7, 0xd3, 0x85, 0xb3, 0x40, 0xda, 0x13, 0x4a, 0x04, 0x41,
+0x54, 0xf8, 0xf2, 0x55, 0xb7, 0x80, 0xdd, 0x77, 0xba, 0x01, 0x7a, 0x31, 0xbd, 0x6b, 0xdc, 0x5c,
+0x59, 0xf4, 0x2b, 0xca, 0x25, 0xbb, 0x50, 0xba, 0xfa, 0x42, 0x38, 0xd2, 0x28, 0x10, 0x8b, 0x7b,
+0x96, 0x45, 0x30, 0xbb, 0x7f, 0xf4, 0x5a, 0xf7, 0x28, 0x6f, 0x47, 0xdc, 0xd2, 0x82, 0xf2, 0xf7,
+0xdd, 0x20, 0xb5, 0x0c, 0x7e, 0x53, 0x85, 0xa7, 0xfc, 0x3b, 0x1a, 0xc0, 0x07, 0x7b, 0xa1, 0x43,
+0x05, 0x18, 0x19, 0xd3, 0xfc, 0x41, 0xc2, 0xce, 0xd9, 0x5b, 0x4b, 0x63, 0xe2, 0x8f, 0x86, 0x3a,
+0xd1, 0xd0, 0x1d, 0x74, 0x2e, 0xbc, 0xd3, 0xce, 0x08, 0x0c, 0x10, 0x7a, 0x42, 0x60, 0xc5, 0x3a,
+0xa6, 0xd8, 0xb0, 0x52, 0xcf, 0x53, 0x28, 0x70, 0x45, 0xb7, 0x72, 0x7d, 0x77, 0x66, 0x54, 0x3d,
+0x38, 0x26, 0xcf, 0xd5, 0xbf, 0xe4, 0x80, 0x10, 0xba, 0x2b, 0xe8, 0xdc, 0xc3, 0xfe, 0x28, 0xa3,
+0x52, 0x58, 0x70, 0x4a, 0xde, 0x84, 0x33, 0x5e, 0x93, 0x04, 0xa4, 0x7c, 0xe7, 0xea, 0x8e, 0xba,
+0xeb, 0x8a, 0x19, 0x26, 0x6a, 0x7f, 0x7c, 0x4a, 0x5b, 0xb4, 0x0d, 0xfc, 0xc8, 0x11, 0x1b, 0x41,
+0x68, 0x5d, 0x2a, 0x25, 0x04, 0x4f, 0xc8, 0xf4, 0x65, 0xfc, 0xb9, 0x58, 0xeb, 0xb4, 0x67, 0x50,
+0x24, 0xf5, 0x43, 0xf6, 0x91, 0x4a, 0xb0, 0x0f, 0x32, 0xe0, 0x07, 0x75, 0x69, 0x1b, 0x3c, 0xeb,
+0xb2, 0x65, 0x26, 0x6f, 0xb8, 0x79, 0xe0, 0x78, 0x8c, 0xdc, 0x39, 0x24, 0x48, 0x76, 0x11, 0xd4,
+0x3a, 0xc5, 0xd2, 0x2b, 0xaa, 0x55, 0xfb, 0x92, 0x12, 0x2d, 0x88, 0x05, 0xd1, 0xb1, 0x31, 0x36,
+0x1f, 0xc2, 0x44, 0x1c, 0xab, 0x2e, 0xcd, 0x1c, 0x72, 0x86, 0xf6, 0x83, 0x87, 0x2e, 0x8b, 0xdb,
+0xaa, 0x16, 0x0e, 0x1b, 0xe6, 0x5c, 0x4d, 0x2f, 0x82, 0xbd, 0x49, 0x11, 0x60, 0x22, 0x0f, 0xde,
+0x3b, 0x2b, 0x20, 0x1d, 0x56, 0xb7, 0x21, 0xae, 0x0b, 0x26, 0x4f, 0xde, 0x3d, 0xa6, 0x3f, 0x61,
+0x81, 0xe2, 0x76, 0x60, 0x08, 0xc5, 0x4b, 0x18, 0x0b, 0xd1, 0xf5, 0xff, 0x8d, 0x1a, 0x96, 0x76,
+0x51, 0x15, 0x05, 0x4d, 0x8c, 0x6b, 0x12, 0x90, 0x47, 0xd4, 0xa4, 0x38, 0xb9, 0x48, 0xe4, 0x4c,
+0x05, 0x69, 0x6a, 0x8b, 0x9d, 0x7c, 0xa1, 0xbc, 0x77, 0xeb, 0x86, 0x93, 0x0a, 0x15, 0x84, 0xba,
+0x8f, 0xf5, 0x7c, 0x44, 0x75, 0x31, 0x79, 0x16, 0xc1, 0x81, 0x1a, 0xb6, 0xe6, 0x6c, 0x3d, 0xb8,
+0x15, 0x46, 0xf5, 0xbe, 0x46, 0x04, 0xa6, 0xec, 0xec, 0xd1, 0x74, 0x8b, 0x87, 0x2b, 0xdb, 0xd0,
+0x9f, 0xb3, 0x99, 0x9d, 0x87, 0x8c, 0xc6, 0xaa, 0xd4, 0x64, 0x45, 0xbd, 0xe8, 0xed, 0xa3, 0xc1,
+0x2a, 0x41, 0x1e, 0x26, 0xaf, 0x86, 0x16, 0xed, 0x80, 0x08, 0xca, 0x64, 0x21, 0x3a, 0xce, 0x21,
+0x4c, 0x41, 0xb9, 0x13, 0x2d, 0xf7, 0x1b, 0xdf, 0x2b, 0x33, 0x69, 0xe7, 0x5c, 0x8c, 0x7b, 0xfb,
+0xe3, 0x41, 0xe9, 0xce, 0xd7, 0xff, 0x0e, 0x54, 0xfe, 0xb0, 0x71, 0x78, 0xdc, 0xde, 0x7e, 0xdd,
+0x1f, 0x1c, 0x4a, 0x8f, 0x3e, 0x16, 0xfd, 0x91, 0x82, 0x94, 0xd4, 0xc2, 0xf7, 0xb2, 0x77, 0x89,
+0x16, 0x2c, 0xba, 0xb6, 0xbd, 0xed, 0x95, 0x43, 0x05, 0x9b, 0xf2, 0xc4, 0xbe, 0x46, 0x43, 0x90,
+0x1d, 0xd8, 0x24, 0x02, 0xd2, 0xea, 0xf4, 0x08, 0xd9, 0xf7, 0x84, 0x0e, 0xc6, 0xe7, 0x44, 0xdb,
+0xb8, 0xac, 0x0a, 0x53, 0x39, 0x61, 0x43, 0xdc, 0x22, 0x28, 0x8f, 0x22, 0x2f, 0x73, 0xbf, 0x59,
+0x2d, 0x3c, 0x8c, 0x0b, 0xcc, 0x2a, 0x67, 0xe0, 0x5b, 0x5c, 0x65, 0x5e, 0x6d, 0x98, 0x99, 0xaa,
+0x3b, 0x89, 0x12, 0xe2, 0x99, 0xf6, 0x15, 0xa7, 0xd2, 0x6a, 0x79, 0xb4, 0xf6, 0x0b, 0xf5, 0x0d,
+0x2d, 0x4c, 0xcb, 0x1b, 0x35, 0x93, 0x61, 0x32, 0xa1, 0x8a, 0xa8, 0x27, 0xe8, 0x95, 0x5a, 0x56,
+0x59, 0x04, 0xfe, 0xce, 0xc2, 0xd8, 0x92, 0x97, 0xb2, 0x54, 0x63, 0xd0, 0x3b, 0xde, 0x10, 0x34,
+0x32, 0x16, 0x05, 0x51, 0x1d, 0xfc, 0x96, 0x8e, 0xf1, 0xf6, 0x4b, 0xd7, 0x48, 0x22, 0xce, 0xca,
+0x1c, 0x6b, 0xab, 0x1f, 0x59, 0xa2, 0x74, 0xd6, 0xcd, 0x15, 0x07, 0xab, 0xa2, 0xd5, 0x22, 0x81,
+0xec, 0x20, 0x14, 0x36, 0xac, 0xe4, 0x25, 0x7d, 0xe6, 0x09, 0x00, 0x2c, 0x92, 0x4d, 0x4e, 0xbf,
+0xbf, 0xa1, 0xd4, 0xbe, 0x6b, 0xd4, 0x1f, 0x95, 0x9b, 0xf3, 0xda, 0x99, 0xad, 0xa4, 0x6c, 0x73,
+0x55, 0xd1, 0x9d, 0x4b, 0x16, 0xd4, 0x06, 0xec, 0x46, 0x3d, 0xb7, 0xe7, 0xce, 0xd0, 0x1d, 0x94,
+0x65, 0xde, 0x61, 0xb3, 0xc1, 0x10, 0x65, 0xe5, 0x68, 0x9b, 0xb0, 0xb4, 0x43, 0x0b, 0x92, 0xaf,
+0xb7, 0x40, 0xa2, 0xe5, 0x06, 0x3d, 0x72, 0x00, 0xc5, 0x39, 0xab, 0x35, 0x29, 0x22, 0x4c, 0xa5,
+0xa5, 0x3f, 0x22, 0x90, 0x53, 0xd2, 0x36, 0x63, 0x1e, 0xd3, 0x33, 0xa5, 0xbb, 0x3d, 0xa3, 0x0c,
+0x14, 0x9c, 0x2e, 0x6d, 0x9a, 0x7a, 0xf7, 0xf1, 0x56, 0x66, 0xe5, 0x8d, 0x53, 0x83, 0x34, 0x3f,
+0xa9, 0x83, 0x84, 0x68, 0x90, 0xc9, 0x51, 0xc2, 0xd4, 0x8e, 0x6c, 0xc7, 0x6d, 0xa7, 0x19, 0x61,
+0xa7, 0x2e, 0x36, 0xbc, 0xd2, 0x0f, 0x17, 0x49, 0xd4, 0x6b, 0x36, 0x63, 0xfb, 0x1d, 0xf4, 0xb0,
+0x6b, 0xcf, 0x34, 0x5f, 0xd2, 0x77, 0xae, 0x12, 0xaf, 0xb3, 0xdf, 0x52, 0xf7, 0xc2, 0xc8, 0xf2,
+0x63, 0x61, 0xb6, 0x3e, 0x39, 0xf2, 0xa7, 0x1a, 0x89, 0x9d, 0x0e, 0x8f, 0xaf, 0xe1, 0x01, 0x24,
+0xa6, 0x3a, 0xd5, 0x9a, 0x62, 0x67, 0xa3, 0x66, 0xee, 0xbc, 0xc5, 0x94, 0x4b, 0xc3, 0x15, 0xa1,
+0x7e, 0x07, 0x07, 0x2b, 0xb7, 0x43, 0x2a, 0xb4, 0xb8, 0x25, 0x88, 0x86, 0x23, 0xab, 0xdf, 0x05,
+0xbe, 0x46, 0x56, 0xd7, 0xda, 0xd6, 0x75, 0x53, 0xd9, 0xc8, 0x26, 0x8f, 0x39, 0x67, 0xed, 0x21,
+0x53, 0x1c, 0x9c, 0x89, 0x46, 0xd3, 0xfe, 0x54, 0xe6, 0x1d, 0x02, 0xb9, 0x25, 0x82, 0x66, 0xe6,
+0xf9, 0x45, 0xd9, 0x3f, 0xa5, 0x71, 0xc1, 0x46, 0x66, 0x7a, 0x27, 0x8a, 0x82, 0xc9, 0x21, 0xe9,
+0x17, 0xab, 0x6c, 0xef, 0x45, 0xe5, 0x88, 0x93, 0x87, 0x80, 0xb3, 0x85, 0x25, 0x96, 0x19, 0x41,
+0xab, 0xd6, 0xba, 0x92, 0x76, 0x21, 0x8a, 0x58, 0xbd, 0xe2, 0x4b, 0xec, 0x45, 0x59, 0x2c, 0x13,
+0x1a, 0xb5, 0x13, 0x25, 0x44, 0xe7, 0x71, 0x26, 0x0a, 0x34, 0x33, 0xb9, 0x57, 0x15, 0xa4, 0x90,
+0x60, 0x11, 0x05, 0x8e, 0xc8, 0x8e, 0x74, 0x52, 0x4b, 0x31, 0x71, 0xeb, 0x66, 0x7e, 0xee, 0xb1,
+0x0a, 0x21, 0x52, 0xc0, 0x1a, 0xe9, 0xa1, 0x5a, 0xe3, 0x3a, 0x24, 0xfb, 0xf3, 0x1e, 0xd6, 0x83,
+0x1d, 0xfb, 0x81, 0xa8, 0x91, 0x60, 0x9e, 0xbc, 0x59, 0x20, 0xc9, 0x9e, 0x71, 0x19, 0x83, 0x2b,
+0x6a, 0x48, 0x4e, 0x6b, 0x46, 0x82, 0x89, 0xda, 0x60, 0xff, 0x1a, 0x46, 0x94, 0x55, 0xda, 0xe5,
+0x99, 0xfa, 0x84, 0xd7, 0x3b, 0xb9, 0xa5, 0x34, 0x87, 0x86, 0x5e, 0x6d, 0x75, 0x9a, 0xe7, 0x09,
+0xb8, 0xe6, 0x71, 0x15, 0x10, 0x56, 0xd7, 0xc1, 0xc8, 0xb2, 0x62, 0xbc, 0xec, 0xe0, 0x94, 0xa0,
+0xcd, 0xb4, 0x04, 0xa9, 0xc3, 0x51, 0xee, 0xf8, 0x2e, 0x42, 0x9a, 0xaa, 0x34, 0xd3, 0xb9, 0xb0,
+0x36, 0xf9, 0x47, 0xc1, 0x07, 0x49, 0xde, 0xb8, 0x32, 0x8a, 0x87, 0x68, 0x56, 0x9a, 0x35, 0x79,
+0xd1, 0xac, 0x49, 0x38, 0xc6, 0xfe, 0xfd, 0xdf, 0x6f, 0x3c, 0xda, 0x48, 0xbd, 0x23, 0xfd, 0x85,
+0xf0, 0x96, 0xee, 0x1c, 0x27, 0x18, 0x86, 0xa6, 0xf0, 0x7b, 0xd8, 0x3c, 0xc7, 0x22, 0x3e, 0x2f,
+0xac, 0xb1, 0x37, 0xbd, 0x84, 0x4b, 0xe3, 0x92, 0x82, 0xd0, 0x25, 0x14, 0x22, 0x65, 0xed, 0xeb,
+0xef, 0xb9, 0xb6, 0xe4, 0x95, 0x18, 0x0d, 0x2b, 0x8d, 0x4f, 0xaf, 0xc0, 0xa0, 0x05, 0x8b, 0x35,
+0x5b, 0x94, 0xb2, 0x68, 0x26, 0x4f, 0x4a, 0x9e, 0x85, 0x0e, 0x46, 0xe0, 0x4f, 0x60, 0x66, 0x01,
+0xa4, 0x39, 0xe8, 0x8b, 0x2a, 0x50, 0xf5, 0x18, 0x70, 0xe2, 0xfc, 0xd6, 0xbe, 0xd3, 0x46, 0x4b
+
+ciphertext =
+0x0F, 0x9B, 0xF5, 0x9F, 0xE2, 0xB3, 0xD9, 0x12, 0x27, 0x51, 0x18, 0xD7, 0x4E, 0x1F, 0x40, 0x4C,
+0xC1, 0xDF, 0x66, 0xAB, 0x1A, 0xFA, 0xF8, 0xEE, 0xA4, 0x40, 0x63, 0x72, 0x58, 0xAE, 0x7E, 0x98,
+0x76, 0x63, 0x56, 0x1F, 0x71, 0xDB, 0x80, 0x07, 0xFE, 0x34, 0x23, 0x43, 0x1E, 0x3D, 0xDE, 0x7E,
+0xC0, 0x72, 0xEF, 0xAD, 0xF4, 0x30, 0xDF, 0x4E, 0x3B, 0x9F, 0xCA, 0x90, 0xC3, 0x95, 0x8A, 0x66,
+0x5C, 0xD6, 0xCA, 0xBD, 0x3C, 0xC9, 0xD2, 0xFE, 0x30, 0x02, 0xA9, 0x8E, 0xAA, 0x80, 0xF6, 0xFF,
+0xCD, 0x40, 0xBC, 0x99, 0xD2, 0x25, 0x7F, 0xBF, 0xC5, 0xF3, 0x50, 0x31, 0x69, 0xE1, 0xC8, 0x64,
+0xC5, 0x5F, 0x30, 0x30, 0xD9, 0xD7, 0xF9, 0xF0, 0xD3, 0x77, 0xE0, 0xD0, 0x11, 0xB8, 0xC9, 0x54,
+0xD9, 0x9C, 0x10, 0x74, 0xCA, 0x4A, 0xFE, 0xD2, 0x16, 0xA5, 0xD8, 0x2D, 0xC0, 0xDA, 0x39, 0x24,
+0xAF, 0x5E, 0xF2, 0xEB, 0xC7, 0x9D, 0xBC, 0xEF, 0x94, 0xA0, 0x49, 0x56, 0x39, 0xCE, 0x8A, 0x38,
+0x3B, 0x70, 0xC7, 0xB2, 0xE0, 0xD4, 0x43, 0xD7, 0xAC, 0xB4, 0xB3, 0xDB, 0xA2, 0x2B, 0x75, 0xE2,
+0x0E, 0x38, 0x2B, 0xE6, 0x42, 0x1A, 0x11, 0x08, 0x79, 0x9A, 0x32, 0xD2, 0x41, 0xCC, 0x28, 0xC3,
+0x4B, 0x3E, 0xD4, 0xB0, 0x10, 0x89, 0x7B, 0x0D, 0xB7, 0x95, 0xBE, 0x22, 0x01, 0xD0, 0x86, 0xA8,
+0xC6, 0xD0, 0xDD, 0xDF, 0x18, 0x2C, 0x1B, 0x49, 0xE3, 0x2B, 0x84, 0x53, 0x54, 0x14, 0xE6, 0x04,
+0xE1, 0xD6, 0x98, 0x91, 0x17, 0xE0, 0xD9, 0x39, 0xAF, 0xF9, 0x71, 0x35, 0x90, 0xCE, 0x4B, 0xD2,
+0x45, 0xB2, 0x4B, 0x68, 0x26, 0xBB, 0x8C, 0xBD, 0xA3, 0xF7, 0x60, 0xD4, 0x38, 0xAA, 0xDF, 0x5B,
+0x3B, 0x53, 0xF6, 0xA4, 0x45, 0x49, 0x4A, 0xEF, 0x6F, 0x04, 0x00, 0xFF, 0xE3, 0x3F, 0x7C, 0x7D,
+0xDC, 0xB0, 0x62, 0x9C, 0x6A, 0x99, 0x07, 0x85, 0xB3, 0x13, 0x2B, 0x40, 0x06, 0xAF, 0xE3, 0xA0,
+0x17, 0x97, 0x0D, 0x4E, 0xD7, 0xB0, 0x6B, 0xF8, 0x3C, 0x91, 0x0A, 0xF3, 0x17, 0x51, 0x30, 0xC8,
+0x58, 0x20, 0x20, 0xE2, 0xA3, 0xE6, 0x3B, 0x2F, 0x77, 0x7C, 0x52, 0x31, 0x4F, 0x4C, 0xA8, 0xD8,
+0x84, 0xB1, 0xE9, 0xB4, 0x86, 0xD8, 0x93, 0xBF, 0x2D, 0x6A, 0xDA, 0x5D, 0x39, 0x62, 0x5B, 0x52,
+0xFB, 0xBA, 0x9F, 0x83, 0x82, 0x3C, 0x40, 0x57, 0x02, 0x92, 0x6A, 0x97, 0x06, 0x39, 0x17, 0x0B,
+0xA7, 0xF5, 0x6A, 0x1A, 0x8E, 0x64, 0x74, 0x90, 0x33, 0xA6, 0xA3, 0x1E, 0x30, 0x1E, 0x67, 0x49,
+0x5A, 0x76, 0x43, 0x97, 0x71, 0xE0, 0x4E, 0xCC, 0x5A, 0xFD, 0x44, 0xFD, 0x5C, 0x41, 0x3A, 0x09,
+0x8E, 0x4E, 0xD2, 0xF0, 0x9A, 0x52, 0x39, 0x5B, 0x0E, 0xC4, 0xF2, 0xFE, 0xB4, 0x66, 0x6C, 0x60,
+0x47, 0x96, 0x80, 0x91, 0xBE, 0x6C, 0xA8, 0x33, 0x66, 0x4D, 0x08, 0x70, 0x27, 0x0C, 0x33, 0x50,
+0x8F, 0xEF, 0x05, 0xC9, 0x93, 0x21, 0x8D, 0xA4, 0x94, 0xF3, 0xBC, 0x4D, 0x96, 0x9A, 0x51, 0x29,
+0xDA, 0x8E, 0x32, 0xB4, 0xB3, 0xD8, 0x75, 0x20, 0x37, 0x9F, 0x33, 0xE2, 0xF9, 0xEB, 0xFA, 0xF2,
+0x6E, 0x3F, 0x71, 0x0C, 0x29, 0x8D, 0xFE, 0xE1, 0xF9, 0xC6, 0x49, 0xB6, 0x6E, 0x53, 0xBC, 0x24,
+0x1D, 0x0B, 0x24, 0x75, 0x54, 0x51, 0x0B, 0xEB, 0xDD, 0x67, 0x40, 0x61, 0xCA, 0x3F, 0xD2, 0x85,
+0x71, 0x16, 0xFC, 0x77, 0x9C, 0x56, 0xE5, 0xCA, 0x43, 0xC6, 0xC3, 0x2A, 0x47, 0xA8, 0x98, 0x40,
+0x02, 0x1D, 0x64, 0x47, 0x85, 0x99, 0x2F, 0x8F, 0x2D, 0xC2, 0x29, 0x7B, 0x8D, 0x64, 0xD9, 0x8F,
+0xF4, 0x91, 0x6F, 0x2A, 0xB0, 0x5C, 0xDC, 0xB0, 0xBE, 0xDE, 0x34, 0xA8, 0x99, 0x40, 0x23, 0x9F,
+0x8A, 0xF5, 0x0C, 0x32, 0xDE, 0x53, 0xA5, 0x55, 0xFE, 0x4C, 0xF8, 0x87, 0x83, 0xB6, 0xA1, 0x31,
+0x2C, 0xB4, 0xE9, 0x78, 0xB8, 0x45, 0xAA, 0x33, 0x6E, 0x8A, 0xBE, 0xDB, 0x76, 0x35, 0xDD, 0xDF,
+0xA1, 0x98, 0x21, 0x2B, 0x42, 0xF3, 0xA4, 0x3E, 0x2C, 0x38, 0xA9, 0xB1, 0x07, 0x38, 0xA1, 0x1D,
+0xA5, 0x85, 0x61, 0x87, 0xF1, 0xA1, 0x9D, 0x3D, 0x2C, 0xA6, 0x2F, 0x26, 0xFD, 0xE8, 0x46, 0x0D,
+0xBD, 0xDA, 0x44, 0xC4, 0xB5, 0xFF, 0x6F, 0xDB, 0xF7, 0xF4, 0xDB, 0x0A, 0x80, 0x7C, 0x81, 0x27,
+0xF4, 0x27, 0x41, 0x15, 0x9F, 0xEC, 0xA5, 0xAA, 0x79, 0x30, 0x9B, 0x0D, 0x84, 0xAC, 0x4D, 0x50,
+0x68, 0x56, 0x55, 0x32, 0xF9, 0x28, 0x06, 0xC3, 0x96, 0xD6, 0x57, 0x61, 0x04, 0xCF, 0xD8, 0xB9,
+0x36, 0x0D, 0x33, 0x11, 0xEE, 0x8A, 0x88, 0x5A, 0x11, 0x6C, 0x11, 0x71, 0x41, 0xFC, 0xD5, 0xF1,
+0xB7, 0xC2, 0x94, 0x98, 0x6F, 0xAB, 0x12, 0x5F, 0x34, 0x46, 0xDD, 0xBC, 0x65, 0x5C, 0x76, 0x3A,
+0x81, 0x42, 0xF8, 0x6D, 0xC2, 0x08, 0x93, 0x58, 0x30, 0x1B, 0x82, 0x28, 0xD7, 0xFB, 0x90, 0x61,
+0x24, 0x38, 0x12, 0xDC, 0xFB, 0x88, 0xFB, 0xC8, 0xB9, 0xB8, 0x23, 0xA5, 0xEB, 0x85, 0xB1, 0x92,
+0x55, 0x34, 0xA7, 0x8E, 0x2C, 0x3D, 0xD7, 0x07, 0xF6, 0x2C, 0xF5, 0x5A, 0x72, 0x38, 0x25, 0x5A,
+0x3F, 0x1F, 0xF5, 0x7B, 0x02, 0xFC, 0x70, 0xB1, 0x31, 0x01, 0xCA, 0xD6, 0x18, 0x7C, 0xF4, 0x0A,
+0x3A, 0xAE, 0x7B, 0x89, 0x0B, 0xE8, 0x5E, 0x7D, 0x64, 0x7A, 0x10, 0xE0, 0x8C, 0x8C, 0x02, 0xF7,
+0x2F, 0x52, 0x06, 0xE7, 0xD7, 0xFA, 0x56, 0xBB, 0xC4, 0x5A, 0x82, 0x14, 0x13, 0x7D, 0x41, 0xD0,
+0xCE, 0x57, 0xE6, 0x41, 0x3A, 0x08, 0xAD, 0x7C, 0x20, 0xDC, 0x15, 0xA8, 0xBA, 0xE6, 0xB8, 0x58,
+0xA6, 0xF4, 0x82, 0x70, 0xDE, 0x09, 0xE2, 0x8B, 0xFC, 0xAA, 0x7D, 0xD9, 0xFD, 0xCE, 0xBC, 0xAC,
+0xB8, 0xE8, 0xB2, 0x46, 0xC5, 0xC4, 0xF9, 0xFC, 0xE5, 0xFF, 0xF5, 0xCA, 0xB3, 0x51, 0x3C, 0xB8,
+0x25, 0xD7, 0x83, 0x3C, 0x91, 0xCD, 0xFB, 0x86, 0x94, 0x5E, 0x36, 0xCF, 0xDE, 0x8E, 0x37, 0x4F,
+0x9D, 0x54, 0xBB, 0x8D, 0x52, 0xE6, 0x86, 0x5C, 0x8B, 0x72, 0x23, 0xAF, 0x93, 0xF5, 0xFF, 0xB5,
+0xC5, 0xFE, 0xC1, 0x31, 0xCE, 0x72, 0xD7, 0x3A, 0x2E, 0x25, 0xBF, 0xC1, 0xEE, 0xD5, 0x28, 0xDB,
+0xB7, 0xD2, 0x4E, 0xC6, 0x2B, 0xE3, 0x6F, 0x87, 0x2D, 0xA5, 0xFB, 0xDB, 0x03, 0xB0, 0x13, 0x58,
+0x3F, 0x3B, 0xBD, 0x2E, 0x0B, 0x78, 0xC2, 0xDC, 0xF8, 0x30, 0x2F, 0x08, 0xA6, 0x6A, 0x00, 0x57,
+0x82, 0x79, 0x06, 0x11, 0xC2, 0x20, 0x49, 0xD1, 0xDD, 0x53, 0xF6, 0xA8, 0xB1, 0x0A, 0x1C, 0x97,
+0x11, 0x5C, 0xA4, 0x98, 0xBC, 0xE7, 0x10, 0x27, 0x73, 0x54, 0x3C, 0x91, 0x98, 0x58, 0x2F, 0xA8,
+0x96, 0xB2, 0xE8, 0x54, 0xC0, 0x6B, 0x4B, 0x69, 0x0E, 0x92, 0xFB, 0xF0, 0x5E, 0xDC, 0x45, 0xAE,
+0x6B, 0x12, 0x29, 0xBE, 0x59, 0xA2, 0x0B, 0xD6, 0xD2, 0x09, 0xE0, 0xBA, 0xE2, 0x1D, 0xD9, 0x8A,
+0x6D, 0x70, 0xD0, 0x62, 0x0C, 0x9D, 0xC4, 0x38, 0x3A, 0x11, 0xBA, 0xF8, 0x8B, 0x0A, 0xDF, 0xDF,
+0xD1, 0xBF, 0x87, 0x98, 0xC8, 0xFF, 0x42, 0x85, 0x13, 0xE7, 0x6C, 0xDE, 0xFE, 0x45, 0xFE, 0xF8,
+0x67, 0x28, 0x56, 0xF7, 0x3C, 0x12, 0x4E, 0x88, 0x05, 0x8B, 0x56, 0x53, 0x2E, 0xE8, 0x6F, 0x3B,
+0xEE, 0x40, 0x4D, 0x3C, 0x1B, 0x0C, 0xE7, 0xCA, 0xD1, 0x3A, 0xA3, 0x2A, 0xDB, 0xC0, 0xDB, 0x9D,
+0xA4, 0xC1, 0xDA, 0x50, 0x81, 0x59, 0x52, 0x87, 0x33, 0x10, 0xE6, 0x58, 0xC3, 0x3E, 0x08, 0x69,
+0xFF, 0x3B, 0x4C, 0x82, 0xBA, 0xC3, 0x81, 0x82, 0xB5, 0xF2, 0x9F, 0x0A, 0x20, 0xCF, 0x70, 0xFB,
+0x81, 0x63, 0xB7, 0x04, 0x95, 0xFE, 0x08, 0x33, 0x1A, 0xED, 0xB0, 0x0A, 0x86, 0xA6, 0x42, 0x8F,
+0xEB, 0x48, 0xD8, 0xF4, 0xFE, 0x50, 0xA2, 0xC6, 0x5E, 0xD8, 0x33, 0xAE, 0x44, 0x83, 0xE6, 0x7C,
+0x88, 0xA3, 0xEC, 0x74, 0xA4, 0x26, 0x58, 0xE6, 0x3C, 0xD4, 0x04, 0xAC, 0x71, 0xBD, 0xBE, 0xCC,
+0xC6, 0xE8, 0x55, 0xDE, 0x98, 0x7F, 0x6B, 0x4F, 0x62, 0xF6, 0x4E, 0x1C, 0x95, 0x44, 0xBE, 0xD7,
+0x77, 0x9C, 0xD2, 0x13, 0xD7, 0xF5, 0x2C, 0x8D, 0xC1, 0xC7, 0xCA, 0xF8, 0x68, 0x91, 0x06, 0x24,
+0x01, 0x5D, 0xD0, 0x9E, 0xC9, 0x24, 0x12, 0x83, 0xA1, 0xF4, 0x05, 0xE0, 0xB2, 0xA9, 0xAD, 0xFC,
+0xC3, 0x64, 0x08, 0x17, 0x58, 0xAB, 0xB7, 0x39, 0xA4, 0xB6, 0x77, 0x26, 0x37, 0x13, 0x24, 0x20,
+0xEB, 0xEE, 0x61, 0x28, 0x0E, 0x33, 0x7C, 0x4D, 0xD5, 0xEA, 0x42, 0x39, 0xB7, 0x20, 0xCB, 0x97,
+0x44, 0xBE, 0x6D, 0x67, 0x28, 0xF9, 0x99, 0xE1, 0xE7, 0x3A, 0x5D, 0x7D, 0xAA, 0xBB, 0x1D, 0xEC,
+0xA4, 0xF6, 0xAA, 0xED, 0xC5, 0x1F, 0xAD, 0xDF, 0x0E, 0xD3, 0x94, 0xA9, 0xD9, 0xFF, 0x29, 0x56,
+0x3C, 0x43, 0x5E, 0xC2, 0x3B, 0xB3, 0x2E, 0x4A, 0xB7, 0x9F, 0xF4, 0xA0, 0xF1, 0xD6, 0x93, 0xFF,
+0x4A, 0xC8, 0xF2, 0xDE, 0x1A, 0x96, 0xB1, 0x83, 0x37, 0x45, 0x0C, 0x79, 0x40, 0x51, 0xD1, 0x08,
+0xF2, 0xDC, 0xFA, 0xBF, 0xB4, 0x15, 0x9A, 0x60, 0x37, 0x81, 0x5F, 0x76, 0xB2, 0x87, 0x82, 0x2D,
+0xA8, 0x0A, 0x70, 0x1B, 0xA2, 0xD6, 0x7E, 0x65, 0xAC, 0x02, 0x36, 0xDA, 0x0F, 0xB4, 0xD5, 0x6E,
+0xEE, 0x60, 0x0B, 0xC8, 0xD6, 0x1B, 0x9C, 0xF1, 0xCF, 0x66, 0xCA, 0x1D, 0x9D, 0xE5, 0xC7, 0x0B,
+0x1A, 0xE1, 0x9E, 0x68, 0xFF, 0xCE, 0xCE, 0x1B, 0x0A, 0x1E, 0x8D, 0x25, 0x37, 0x40, 0x05, 0x20,
+0xE4, 0xA1, 0x4F, 0x4C, 0x33, 0x29, 0xB3, 0x7B, 0x47, 0xFA, 0x7E, 0xEF, 0x87, 0x89, 0x13, 0x10,
+0x1C, 0x0F, 0x15, 0x03, 0xFD, 0xD8, 0x81, 0xD7, 0xB9, 0xA7, 0xBA, 0xD0, 0x6F, 0x6F, 0x76, 0xF6,
+0xD1, 0xF3, 0xCE, 0x57, 0x21, 0x02, 0x12, 0xAA, 0x5E, 0x61, 0xD4, 0xCA, 0xF2, 0x84, 0x80, 0xF0,
+0x90, 0x37, 0x0B, 0xE5, 0xC9, 0xE4, 0xAD, 0xD5, 0x8C, 0x47, 0x6D, 0xFA, 0xE1, 0xE5, 0xC6, 0x3A,
+0xE4, 0x0A, 0x82, 0xCE, 0x05, 0xD6, 0x46, 0x39, 0xAE, 0xE9, 0xE4, 0x6A, 0xD7, 0xE5, 0x9E, 0x85,
+0x4A, 0x31, 0xFA, 0xA0, 0x86, 0x01, 0x4B, 0xC8, 0x77, 0xC2, 0x10, 0xED, 0x70, 0xD5, 0x7D, 0x0F,
+0xF4, 0xDC, 0x97, 0x93, 0xB2, 0x7D, 0x45, 0x7A, 0x24, 0x37, 0x2D, 0x39, 0xAC, 0xB6, 0x53, 0x6B,
+0xA5, 0x9B, 0xDC, 0xC1, 0xAC, 0x95, 0x01, 0x3E, 0x52, 0x53, 0x11, 0x0A, 0xB7, 0x1E, 0x1F, 0xCD,
+0x29, 0xC0, 0xE9, 0x0A, 0xDF, 0xE5, 0xF5, 0x54, 0xF7, 0xF7, 0x23, 0x7C, 0xC6, 0xB3, 0x18, 0xEA,
+0x0E, 0x08, 0xE3, 0x76, 0x8D, 0xDC, 0x01, 0x45, 0xB7, 0x45, 0xAF, 0x5C, 0x88, 0xEA, 0x74, 0xFE,
+0xE0, 0xC5, 0xA3, 0x73, 0x2D, 0x2E, 0x47, 0xF1, 0x2E, 0xC0, 0x01, 0x97, 0xE5, 0x64, 0x84, 0x59,
+0xC6, 0x83, 0xF5, 0xFC, 0x0D, 0x2D, 0x70, 0x46, 0x0E, 0x17, 0xE1, 0xC9, 0xE2, 0xBF, 0xF2, 0xF4,
+0x7B, 0x5C, 0xF1, 0xBE, 0xC3, 0xA7, 0x81, 0x96, 0xA1, 0x24, 0x67, 0x85, 0xF5, 0x6C, 0xD4, 0xA3,
+0x2A, 0xBE, 0xF5, 0x05, 0x2A, 0xBB, 0xF2, 0x18, 0xAF, 0xDC, 0x21, 0x8A, 0x76, 0x2E, 0xAC, 0x31,
+0xB7, 0x56, 0x55, 0xFD, 0x09, 0x48, 0x3A, 0xA6, 0x66, 0x1C, 0x2F, 0x92, 0x2C, 0x07, 0xDF, 0x09,
+0x3A, 0xD1, 0xEA, 0x7A, 0xFA, 0x87, 0xE3, 0xF6, 0x5D, 0x03, 0x45, 0xC9, 0x3F, 0x97, 0x60, 0x4F,
+0x9C, 0x6C, 0xCC, 0x55, 0x1F, 0xE1, 0xBD, 0x5E, 0xE5, 0x5D, 0x76, 0x0C, 0x00, 0xE0, 0xDA, 0xB9,
+0x60, 0xFC, 0xF4, 0xC5, 0xFC, 0x14, 0x3E, 0x2C, 0xF2, 0x18, 0xEC, 0x7D, 0x2B, 0x56, 0xFC, 0x6D,
+0xCE, 0x3D, 0x94, 0x5E, 0xA1, 0x00, 0xDC, 0x00, 0xA6, 0x73, 0xCE, 0xDE, 0x57, 0x87, 0xB3, 0x90,
+0xE5, 0x16, 0x82, 0x24, 0x62, 0xAD, 0x27, 0x8B, 0xFF, 0xDD, 0xD6, 0x7C, 0xFB, 0x45, 0x8A, 0x09,
+0xB0, 0x80, 0x15, 0x86, 0x90, 0xB9, 0xE9, 0x0F, 0x59, 0x86, 0x9E, 0xBA, 0xA2, 0x6A, 0xBA, 0x33,
+0xDD, 0xE5, 0xE8, 0x08, 0x6F, 0x3F, 0xF4, 0x2E, 0xF7, 0x25, 0x28, 0x23, 0x60, 0x51, 0x88, 0x50,
+0x66, 0x31, 0xEA, 0xEE, 0x83, 0x98, 0x62, 0x04, 0xBD, 0x48, 0x17, 0x34, 0x15, 0x2E, 0x9F, 0x33,
+0x8C, 0x96, 0x83, 0x71, 0x1C, 0x0F, 0x72, 0xEB, 0x5F, 0x1C, 0x1A, 0x79, 0x7C, 0x22, 0x4C, 0x84,
+0xC6, 0xA6, 0xD6, 0xDD, 0x29, 0x67, 0x78, 0xC5, 0xEC, 0x37, 0x3E, 0x00, 0xCF, 0xEF, 0x7E, 0x4B,
+0x6D, 0xB2, 0xBD, 0xD3, 0x70, 0x41, 0x2A, 0x89, 0x00, 0x0E, 0xCA, 0x89, 0xC6, 0x31, 0x9F, 0xCB,
+0x59, 0x0C, 0x9B, 0x47, 0x6A, 0x0C, 0x56, 0x6D, 0x03, 0xF7, 0xD4, 0x81, 0x15, 0x3E, 0x9F, 0x6F,
+0x15, 0x22, 0x10, 0x52, 0xCC, 0xD9, 0x2C, 0xA3, 0x85, 0xA5, 0x42, 0x04, 0x7E, 0xD2, 0xE2, 0x16,
+0x98, 0xB1, 0x40, 0x57, 0x8A, 0x5A, 0x54, 0xA1, 0xF8, 0x9B, 0x8C, 0x77, 0x7A, 0x5F, 0x26, 0x4B,
+0x85, 0xE0, 0x3C, 0xB2, 0xC2, 0xC2, 0x90, 0xDD, 0x8E, 0xD9, 0xF5, 0x94, 0xAC, 0x3E, 0x56, 0x20,
+0x63, 0x13, 0x08, 0x48, 0x69, 0x9C, 0xD7, 0xD7, 0x05, 0x1B, 0xB0, 0xF3, 0x8A, 0x81, 0x78, 0x43,
+0x62, 0x04, 0xAB, 0x8D, 0xA8, 0xD7, 0x51, 0x2C, 0xFE, 0x66, 0x80, 0x06, 0xC0, 0x10, 0x92, 0xF3,
+0xBA, 0xBF, 0x42, 0x95, 0x96, 0xB7, 0xDB, 0xA5, 0x73, 0x06, 0xB8, 0x0D, 0xAE, 0xDA, 0x00, 0xA7,
+0xD2, 0xF8, 0x63, 0xF0, 0x35, 0x15, 0x7D, 0x9B, 0x1C, 0x3D, 0x3F, 0x83, 0x9C, 0xAE, 0xC1, 0xFA,
+0xE3, 0x62, 0x9A, 0x8A, 0xF3, 0x86, 0x2F, 0xE4, 0xDB, 0x49, 0xDF, 0x2C, 0x17, 0xFE, 0x2C, 0x30,
+0xD1, 0x76, 0x88, 0x92, 0x60, 0x60, 0xB9, 0xF9, 0x35, 0x0F, 0xE2, 0xE8, 0x8E, 0x73, 0x9E, 0x7C,
+0xF6, 0xD0, 0x5F, 0x81, 0x22, 0x2A, 0x5D, 0x5F, 0x10, 0xFE, 0x7B, 0x06, 0xB5, 0x5F, 0xF3, 0x42,
+0x94, 0xC4, 0x3E, 0xE0, 0x0E, 0x11, 0xCC, 0x3B, 0x4F, 0xBD, 0x06, 0xD1, 0x15, 0xF5, 0x32, 0x76,
+0xE4, 0x97, 0xBF, 0xA7, 0xC5, 0xD0, 0x5E, 0x5A, 0x1C, 0x35, 0x18, 0x43, 0x6B, 0x10, 0xD2, 0xAB,
+0x8B, 0xF6, 0xE1, 0xBB, 0x8C, 0xE9, 0x9C, 0x6C, 0x27, 0x96, 0xF8, 0x19, 0x6F, 0x6E, 0x73, 0x28,
+0x8F, 0x51, 0x61, 0x0B, 0x4D, 0x4F, 0x22, 0x15, 0x1A, 0xCD, 0x88, 0x2C, 0x6F, 0x9C, 0xEC, 0x79,
+0x51, 0x70, 0x27, 0x8E, 0x58, 0xEF, 0xDE, 0x0B, 0xAA, 0x4D, 0xD2, 0x8F, 0x96, 0xE0, 0x71, 0x6A,
+0x8C, 0x41, 0xDB, 0x98, 0xF7, 0x2A, 0x09, 0x59, 0xD9, 0x48, 0x7B, 0x14, 0x87, 0x9B, 0x4A, 0xBB,
+0x88, 0xF2, 0x9D, 0x9D, 0x47, 0xF2, 0x80, 0x4B, 0xD9, 0x0E, 0xF3, 0xA9, 0x7E, 0xEF, 0xA1, 0x80,
+0x9D, 0x6F, 0x67, 0x6C, 0x14, 0x09, 0x2C, 0xB9, 0x03, 0xF3, 0x58, 0x37, 0xAE, 0x6B, 0xF7, 0x01,
+0x86, 0xDF, 0x43, 0xD5, 0xE4, 0xE2, 0x28, 0x8E, 0x8D, 0xA4, 0xF2, 0xC5, 0x7A, 0xA7, 0x89, 0x3A,
+0xE2, 0x7E, 0x77, 0xFD, 0x3A, 0x9A, 0x65, 0x5D, 0x87, 0xDC, 0x85, 0x70, 0xAB, 0xF3, 0x18, 0x0A
+
+cipher_key =
+0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2, 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
+0xd0, 0xe7, 0x4b, 0xfb, 0x5d, 0xe5, 0x0c, 0xe7
+
+auth_key =
+0xaf, 0x96, 0x42, 0xf1, 0x8c, 0x50, 0xdc, 0x67, 0x1a, 0x43, 0x47, 0x62, 0xc7, 0x04, 0xab, 0x05,
+0xf5, 0x0c, 0xe7, 0xa2, 0xa6, 0x23, 0xd5, 0x3d, 0x95, 0xd8, 0xcd, 0x86, 0x79, 0xf5, 0x01, 0x47,
+0x4f, 0xf9, 0x1d, 0x9d, 0x36, 0xf7, 0x68, 0x1a, 0x64, 0x44, 0x58, 0x5d, 0xe5, 0x81, 0x15, 0x2a,
+0x41, 0xe4, 0x0e, 0xaa, 0x1f, 0x04, 0x21, 0xff, 0x2c, 0xf3, 0x73, 0x2b, 0x48, 0x1e, 0xd2, 0xf7,
+0xf6, 0xd9, 0xaf, 0xbf, 0x08, 0x3b, 0xbb, 0x19, 0x5f, 0xf6, 0x7d, 0x25, 0x85, 0xdf, 0x6b, 0x54,
+0xd0, 0xe7, 0x4b, 0x9e, 0xc7, 0xef, 0xca, 0x48, 0x6f, 0x21, 0xd7, 0x51, 0xc8, 0x21, 0xc1, 0x15,
+0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
+0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
+
+iv =
+0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+
+####################
+# sha_hmac_buff_32 #
+####################
+[sha1_hmac_buff_32]
+digest =
+0x34, 0xCB, 0xFA, 0xE6, 0xBF, 0x60, 0x88, 0x82, 0x2C, 0x9F, 0x96, 0x4A, 0x5A, 0xD8, 0xAE, 0x48,
+0x1D, 0x2B, 0x0A, 0x22
+
+[sha224_hmac_buff_32]
+digest =
+0x17, 0xDD, 0xF3, 0xC0, 0xFC, 0xBC, 0x89, 0xE1, 0x02, 0x94, 0x51, 0x2B, 0xA0, 0x63, 0x4D, 0x1B,
+0x78, 0xCA, 0x0D, 0xD5, 0x99, 0xC0, 0xB8, 0x8E, 0x7F, 0x8A, 0xCB, 0x92
+
+[sha256_hmac_buff_32]
+digest =
+0xF7, 0xC0, 0xFD, 0x09, 0x94, 0x8F, 0x88, 0xF3, 0x26, 0xE1, 0xB1, 0xA6, 0x70, 0xFD, 0x57, 0xA2,
+0xB7, 0x63, 0x9C, 0x35, 0x97, 0x60, 0x27, 0x6A, 0xCC, 0xD8, 0x4A, 0xAE, 0xFD, 0xEC, 0x14, 0x56
+
+[sha384_hmac_buff_32]
+digest =
+0x01, 0x90, 0x96, 0x95, 0x8B, 0xE8, 0xF8, 0x30, 0xE0, 0xFE, 0xD4, 0x83, 0xE4, 0xE1, 0x48, 0xEB,
+0xEB, 0x3E, 0x6D, 0xBC, 0x72, 0xD8, 0xBF, 0x00, 0x86, 0x0B, 0x80, 0xCB, 0xCF, 0x0E, 0x83, 0x7E,
+0x77, 0x29, 0xA0, 0x71, 0xA9, 0x15, 0x8F, 0xE5, 0xC4, 0x32, 0xC8, 0xDB, 0x0A, 0xD1, 0xE3, 0x79
+
+[sha512_hmac_buff_32]
+digest =
+0x45, 0x72, 0x2B, 0xF4, 0x10, 0x82, 0xB6, 0xBC, 0xDB, 0x44, 0x34, 0x47, 0x55, 0xA7, 0x34, 0x4C,
+0x1C, 0x5D, 0x4D, 0x88, 0x58, 0x0B, 0xC2, 0x3E, 0xE7, 0x49, 0xF4, 0x6A, 0x99, 0x35, 0xE5, 0x2B,
+0xF4, 0x18, 0xD7, 0xD1, 0xAF, 0xC9, 0x81, 0xC5, 0x97, 0xE7, 0x94, 0xC4, 0xFD, 0x95, 0x7E, 0x1D,
+0x4E, 0xF4, 0x88, 0xC0, 0x10, 0x31, 0xB5, 0x1E, 0x39, 0x91, 0x1A, 0x48, 0xC2, 0x2F, 0xFE, 0xF6
+
+####################
+# sha_hmac_buff_64 #
+####################
+[sha1_hmac_buff_64]
+digest =
+0xD5, 0x0D, 0x9A, 0x30, 0xED, 0x71, 0x88, 0xF3, 0x72, 0x5D, 0x1C, 0xEF, 0x44, 0x36, 0xC1, 0x0C,
+0x66, 0x32, 0x15, 0xB5
+
+[sha224_hmac_buff_64]
+digest =
+0x59, 0xF0, 0x38, 0xA5, 0x7F, 0xDC, 0x55, 0xAF, 0x0A, 0x18, 0x0B, 0x34, 0xC1, 0x48, 0xFC, 0xB2,
+0xF8, 0x3B, 0xC2, 0x4A, 0x36, 0x0F, 0xEA, 0x4F, 0xD9, 0x46, 0x25, 0x1F
+
+[sha256_hmac_buff_64]
+digest =
+0x00, 0xD2, 0x5F, 0xAC, 0x89, 0x4B, 0x16, 0x08, 0x89, 0x8D, 0x4F, 0x8F, 0x56, 0xF5, 0xA2, 0x9B,
+0xDB, 0x91, 0x2F, 0xCE, 0x90, 0x0E, 0x4B, 0x17, 0x74, 0x8B, 0xC4, 0x8A, 0x47, 0xF0, 0x7C, 0x7A
+
+[sha384_hmac_buff_64]
+digest =
+0xFE, 0x79, 0x5C, 0x3D, 0xEA, 0x2C, 0x04, 0xB9, 0xE9, 0x37, 0x4E, 0x02, 0xE8, 0x56, 0xE8, 0x7E,
+0xB4, 0xA7, 0x3D, 0x37, 0xDD, 0x05, 0x58, 0x6A, 0x3D, 0x44, 0x0E, 0x84, 0xA9, 0xB4, 0x1F, 0x26,
+0xED, 0xAF, 0xA3, 0x4D, 0xA3, 0x6E, 0x30, 0x8F, 0xE3, 0x79, 0xB9, 0x58, 0x4E, 0xB3, 0x36, 0x1D
+
+[sha512_hmac_buff_64]
+digest =
+0x3A, 0xFE, 0x1E, 0xC8, 0x75, 0x0D, 0xF3, 0x15, 0x03, 0xCC, 0x7B, 0x50, 0xD1, 0x3E, 0x35, 0x97,
+0x4D, 0x80, 0xB8, 0x0B, 0x5E, 0x22, 0x4D, 0xB9, 0xD2, 0xC9, 0x0E, 0x24, 0xC4, 0xD9, 0xDD, 0x95,
+0xA1, 0x7D, 0xED, 0xE4, 0x28, 0x17, 0x7C, 0x50, 0x0B, 0x40, 0x81, 0x18, 0xEA, 0xFF, 0xBA, 0x1C,
+0x8C, 0x5D, 0x17, 0xB3, 0x5B, 0x39, 0x70, 0x93, 0x9A, 0xA0, 0x47, 0x59, 0x06, 0x01, 0xE5, 0xD0
+
+#####################
+# sha_hmac_buff_128 #
+#####################
+[sha1_hmac_buff_128]
+digest =
+0xF6, 0x21, 0x56, 0x15, 0xEC, 0xB6, 0xDE, 0x2B, 0x68, 0x79, 0x30, 0x69, 0x69, 0x82, 0x4B, 0x97,
+0x57, 0x61, 0xED, 0x01
+
+[sha224_hmac_buff_128]
+digest =
+0xC4, 0x5A, 0x48, 0x07, 0xCC, 0xFD, 0x68, 0x28, 0xCC, 0xDA, 0x13, 0x9C, 0xFE, 0x02, 0x22, 0x1C,
+0xD5, 0x2E, 0x9F, 0x0D, 0xED, 0x0B, 0x9B, 0x6D, 0xF3, 0x81, 0xD6, 0xDF
+
+[sha256_hmac_buff_128]
+digest =
+0x44, 0xA6, 0x70, 0x6C, 0xD2, 0xA6, 0x3F, 0xFB, 0x98, 0xB9, 0x6A, 0x71, 0x40, 0xCF, 0xD2, 0x40,
+0xA3, 0xC0, 0xC6, 0x4E, 0xF6, 0xD8, 0x4D, 0x87, 0xF5, 0x9B, 0xAC, 0xB0, 0x7B, 0xB7, 0x35, 0x5B
+
+[sha384_hmac_buff_128]
+digest =
+0xD7, 0xFA, 0xF1, 0xC5, 0xD6, 0xCE, 0xB9, 0x77, 0xD5, 0x6B, 0x4D, 0x7F, 0xFE, 0xAF, 0xDF, 0xCE,
+0x68, 0x1A, 0xB7, 0x3E, 0xA5, 0x9A, 0xF5, 0x79, 0x91, 0x96, 0x7C, 0xED, 0xC9, 0x02, 0x31, 0x67,
+0xC4, 0xD9, 0xD7, 0x5A, 0xD7, 0xF0, 0x82, 0x2C, 0xBD, 0x4A, 0xFF, 0x03, 0x25, 0xB6, 0x50, 0x17
+
+[sha512_hmac_buff_128]
+digest =
+0x78, 0xE1, 0x86, 0x74, 0xC1, 0x83, 0xDB, 0x3C, 0xAC, 0x2B, 0x17, 0xAC, 0x12, 0xAA, 0x30, 0xDE,
+0x2C, 0x86, 0xC4, 0x53, 0x4A, 0xC2, 0x78, 0x86, 0x3A, 0x8A, 0x96, 0x36, 0x2B, 0x09, 0xB1, 0x62,
+0xCA, 0xD0, 0x25, 0xB2, 0x5B, 0x3A, 0x76, 0xFA, 0xED, 0x5B, 0xFB, 0xF0, 0x8F, 0xF2, 0x06, 0x76,
+0x9B, 0xE0, 0x82, 0x25, 0x10, 0x5E, 0x8A, 0x13, 0xA1, 0x25, 0x3B, 0xB7, 0xFC, 0xC3, 0x15, 0xED
+
+#####################
+# sha_hmac_buff_256 #
+#####################
+[sha1_hmac_buff_256]
+digest =
+0x31, 0x36, 0xC8, 0xC7, 0x95, 0xDB, 0xF7, 0xF3, 0xA0, 0xFF, 0x89, 0x94, 0x8D, 0xB2, 0x3D, 0x5F,
+0x7A, 0xC1, 0x38, 0x97
+
+[sha224_hmac_buff_256]
+digest =
+0xAE, 0x1B, 0xA4, 0x42, 0xE6, 0x3C, 0x3D, 0xEE, 0xEE, 0xD1, 0x24, 0xD2, 0xFF, 0xD2, 0x1A, 0xF3,
+0x28, 0x87, 0x8F, 0x00, 0xE3, 0x74, 0xA1, 0xA2, 0xED, 0x70, 0x2D, 0x9D
+
+[sha256_hmac_buff_256]
+digest =
+0x73, 0x89, 0x54, 0x06, 0x65, 0x71, 0x1D, 0xA1, 0xAB, 0x4A, 0x0A, 0x36, 0x63, 0x92, 0xB8, 0x94,
+0x98, 0x98, 0xB5, 0x27, 0x9D, 0x9C, 0xF2, 0x91, 0x0C, 0x56, 0xC4, 0xEE, 0x99, 0xC6, 0xDE, 0xC7
+
+[sha384_hmac_buff_256]
+digest =
+0xFE, 0x6E, 0xA5, 0xDC, 0x82, 0x57, 0xBB, 0x4D, 0xA8, 0xF1, 0x2F, 0xD1, 0xA2, 0x05, 0x0A, 0xFE,
+0xF2, 0x64, 0x8A, 0xB3, 0x96, 0xBA, 0x06, 0x47, 0xA4, 0x40, 0x76, 0x8E, 0xB8, 0xE3, 0xAD, 0xD1,
+0xB2, 0x90, 0x9A, 0x02, 0xD1, 0x13, 0x4F, 0x74, 0x9B, 0xEB, 0x09, 0xFC, 0x7F, 0x99, 0xAC, 0xCD
+
+[sha512_hmac_buff_256]
+digest =
+0x9F, 0x71, 0xE0, 0x86, 0xF9, 0x76, 0x3F, 0xAB, 0x16, 0x4D, 0x9C, 0x28, 0x72, 0x3A, 0x17, 0x8F,
+0x35, 0xD1, 0x44, 0x18, 0xE0, 0x4A, 0xBD, 0x8B, 0x25, 0x9F, 0x6E, 0x5B, 0xE3, 0xF4, 0x1C, 0x40,
+0x2B, 0xF4, 0x61, 0x59, 0x60, 0x8D, 0x55, 0xD8, 0x18, 0x9B, 0x65, 0x8D, 0x9F, 0xAA, 0xB3, 0x71,
+0x3D, 0xB5, 0x70, 0xD2, 0x26, 0xF1, 0x55, 0xDC, 0xCE, 0x49, 0x40, 0xD7, 0x23, 0x6B, 0x20, 0x4A
+
+#####################
+# sha_hmac_buff_512 #
+#####################
+[sha1_hmac_buff_512]
+digest =
+0x0F, 0xF6, 0x33, 0x61, 0x16, 0x5C, 0xD1, 0x9E, 0xE0, 0x3D, 0x95, 0x93, 0xD2, 0x82, 0xDE, 0x2E,
+0xFA, 0x25, 0x56, 0xE4
+
+[sha224_hmac_buff_512]
+digest =
+0x85, 0x61, 0x57, 0x9F, 0x3E, 0x6A, 0xE1, 0x5C, 0x1D, 0xA3, 0x98, 0x9C, 0x28, 0x2C, 0x96, 0x8E,
+0xD0, 0x7D, 0x70, 0x7D, 0xF5, 0x98, 0xA4, 0x7C, 0x88, 0x1C, 0xA4, 0x5C
+
+[sha256_hmac_buff_512]
+digest =
+0xF4, 0x77, 0xB2, 0x2E, 0xAD, 0xBC, 0xA2, 0xCD, 0x49, 0xE0, 0xD2, 0xB0, 0xB3, 0xDE, 0x51, 0xD0,
+0xBC, 0xEF, 0x33, 0x50, 0x4F, 0x39, 0xBC, 0x94, 0x18, 0xF3, 0xDD, 0xFC, 0xB5, 0x8E, 0x44, 0x18
+
+[sha384_hmac_buff_512]
+digest =
+0x03, 0x4F, 0x86, 0xA0, 0xBC, 0x00, 0x44, 0xEB, 0x06, 0x75, 0x61, 0x13, 0x92, 0x60, 0x74, 0x44,
+0x1D, 0xCB, 0x2C, 0x8D, 0xEC, 0xE5, 0x5C, 0xBE, 0xA3, 0xAE, 0x5F, 0xE2, 0x71, 0xED, 0xAC, 0x69,
+0x9C, 0x6C, 0xE3, 0x20, 0x5C, 0x90, 0xC3, 0xF4, 0x36, 0x76, 0x70, 0xAE, 0x2A, 0x9C, 0xF5, 0x0B
+
+[sha512_hmac_buff_512]
+digest =
+0x3B, 0x83, 0x4B, 0x7F, 0x2A, 0x62, 0x9A, 0xF6, 0x42, 0x29, 0x7A, 0xF0, 0xCA, 0xE7, 0xBE, 0x1F,
+0x92, 0x5C, 0x66, 0x88, 0x58, 0xFA, 0xA4, 0xC7, 0xE7, 0xF0, 0xEA, 0x83, 0xB2, 0x0A, 0x2C, 0x3B,
+0xA7, 0xD4, 0xA4, 0x3E, 0x87, 0x00, 0x44, 0x2B, 0x3F, 0xB2, 0x4B, 0xFF, 0xAD, 0x9B, 0x07, 0x7D,
+0xA1, 0x09, 0x09, 0x60, 0x68, 0x2F, 0x7B, 0x16, 0x43, 0x0E, 0x22, 0xAF, 0x78, 0x8D, 0xC7, 0x16
+
+######################
+# sha_hmac_buff_1024 #
+######################
+[sha1_hmac_buff_1024]
+digest =
+0xCD, 0x04, 0x78, 0x4A, 0xD5, 0xFE, 0x95, 0xFB, 0x04, 0x85, 0xD5, 0x25, 0x58, 0xE6, 0x6C, 0x81,
+0xFA, 0x4B, 0xE7, 0x75
+
+[sha224_hmac_buff_1024]
+digest =
+0x10, 0xA5, 0x18, 0x56, 0x66, 0xDE, 0xE6, 0xF6, 0x71, 0xAF, 0x65, 0xEC, 0xE3, 0x77, 0x08, 0x58,
+0xD5, 0x45, 0xE6, 0x21, 0xF5, 0xCC, 0x2B, 0xE2, 0x76, 0x91, 0x51, 0xD9
+
+[sha256_hmac_buff_1024]
+digest =
+0xB4, 0x09, 0xF9, 0xA0, 0x5B, 0x80, 0xFF, 0xBA, 0x21, 0x5F, 0x57, 0xAB, 0x8B, 0x67, 0x89, 0x6D,
+0xB4, 0xE9, 0xEA, 0x5D, 0x77, 0x57, 0xBD, 0x0A, 0x60, 0xA4, 0x6B, 0xE8, 0xAA, 0x8A, 0x9B, 0xC7
+
+[sha384_hmac_buff_1024]
+digest =
+0x9D, 0xAE, 0x37, 0x87, 0x2C, 0x36, 0xFD, 0x51, 0xF1, 0xF2, 0x4C, 0x82, 0x27, 0xB5, 0x99, 0x63,
+0xAB, 0xD7, 0x62, 0x4A, 0x4E, 0xF1, 0xBF, 0xFB, 0xCA, 0x30, 0x55, 0x3F, 0x43, 0x85, 0xDE, 0x6B,
+0xA2, 0xB2, 0x8B, 0x45, 0x2A, 0x9D, 0x39, 0x29, 0x63, 0x1C, 0x04, 0x47, 0x58, 0x94, 0x5C, 0x91
+
+[sha512_hmac_buff_1024]
+digest =
+0xBF, 0x06, 0x94, 0xBB, 0x2E, 0x9C, 0x0A, 0xA4, 0xF3, 0x5F, 0x52, 0x4B, 0x42, 0x6E, 0xE1, 0x6C,
+0xA8, 0xAB, 0xB7, 0xE9, 0x6F, 0xAB, 0x77, 0xF8, 0x94, 0xD0, 0xA1, 0x81, 0xB8, 0x17, 0x21, 0xAC,
+0xB2, 0x3C, 0x73, 0x71, 0xDD, 0x76, 0xF8, 0x58, 0x84, 0xE7, 0xBB, 0xD4, 0x4A, 0x4F, 0x51, 0xF5,
+0x8C, 0x87, 0xAA, 0xAC, 0xCE, 0x5E, 0xFB, 0xD3, 0x1F, 0xA2, 0x49, 0xF2, 0x40, 0xAB, 0xB8, 0x76
+
+######################
+# sha_hmac_buff_2048 #
+######################
+[sha1_hmac_buff_2048]
+digest =
+0x56, 0xE7, 0x86, 0x23, 0x2C, 0x77, 0xBE, 0x2B, 0xE6, 0x76, 0xA1, 0xE9, 0xB1, 0x0F, 0x25, 0x15,
+0x1D, 0x59, 0x7F, 0x4A
+
+[sha224_hmac_buff_2048]
+digest =
+0xFD, 0xEB, 0x9E, 0x04, 0x53, 0xC7, 0xEA, 0x56, 0x21, 0x91, 0x6D, 0x8B, 0xDC, 0xA1, 0x0A, 0x2F,
+0x73, 0x4D, 0x05, 0x36, 0x43, 0x58, 0x71, 0xB2, 0x74, 0x6E, 0x7B, 0xAF
+
+[sha256_hmac_buff_2048]
+digest =
+0x25, 0x76, 0xA5, 0x64, 0x00, 0x13, 0xF7, 0xE7, 0x2D, 0x6D, 0x17, 0x36, 0x13, 0xF3, 0xC7, 0x57,
+0x70, 0x30, 0xB2, 0x76, 0x7A, 0xF4, 0x8F, 0xAF, 0x6B, 0x8C, 0x26, 0x88, 0x73, 0x2E, 0x49, 0xC0
+
+[sha384_hmac_buff_2048]
+digest =
+0xF0, 0x69, 0x78, 0x9A, 0x34, 0x88, 0x25, 0x3C, 0x3D, 0xF5, 0x42, 0x65, 0x25, 0x79, 0xB2, 0xFC,
+0x3B, 0x92, 0x46, 0xF9, 0x0D, 0x6D, 0xC1, 0x8E, 0x45, 0xBE, 0x8B, 0x70, 0x7D, 0x1B, 0x7E, 0xDE,
+0x93, 0x04, 0xC8, 0x59, 0x4B, 0x37, 0x2C, 0x20, 0x51, 0xB1, 0x91, 0x4F, 0xA4, 0x30, 0x09, 0xED
+
+[sha512_hmac_buff_2048]
+digest =
+0xAE, 0x8A, 0x42, 0x03, 0x11, 0x25, 0xE8, 0xC3, 0x4B, 0x4B, 0xC0, 0x29, 0x27, 0xE0, 0x0D, 0x27,
+0x13, 0x8F, 0x7D, 0x82, 0x72, 0x94, 0x4B, 0xF8, 0xC3, 0x1A, 0xE1, 0x5A, 0xF3, 0x8E, 0x23, 0x27,
+0x76, 0xE4, 0xF5, 0x3E, 0x24, 0x5B, 0xA3, 0xFA, 0x49, 0x8E, 0x57, 0xE3, 0x88, 0x15, 0x1D, 0xF6,
+0x27, 0xA5, 0xC8, 0xFB, 0x34, 0x44, 0x18, 0x6A, 0x64, 0xBE, 0xFB, 0x1E, 0x87, 0xA8, 0x36, 0x1E
diff --git a/app/test-crypto-perf/data/aes_cbc_256_sha.data b/app/test-crypto-perf/data/aes_cbc_256_sha.data
new file mode 100644
index 00000000..52dafb93
--- /dev/null
+++ b/app/test-crypto-perf/data/aes_cbc_256_sha.data
@@ -0,0 +1,504 @@
+# List of tests for AES-256 CBC:
+# 1) [sha1_hmac_buff_x]
+# 2) [sha224_hmac_buff_x]
+# 3) [sha256_hmac_buff_x]
+# 4) [sha384_hmac_buff_x]
+# 5) [sha512_hmac_buff_x]
+# where x is one of the values: 32, 64, 128, 256, 512, 1024, 2048
+
+##########
+# GLOBAL #
+##########
+plaintext =
+0xff, 0xca, 0xfb, 0xf1, 0x38, 0x20, 0x2f, 0x7b, 0x24, 0x98, 0x26, 0x7d, 0x1d, 0x9f, 0xb3, 0x93,
+0xd9, 0xef, 0xbd, 0xad, 0x4e, 0x40, 0xbd, 0x60, 0xe9, 0x48, 0x59, 0x90, 0x67, 0xd7, 0x2b, 0x7b,
+0x8a, 0xe0, 0x4d, 0xb0, 0x70, 0x38, 0xcc, 0x48, 0x61, 0x7d, 0xee, 0xd6, 0x35, 0x49, 0xae, 0xb4,
+0xaf, 0x6b, 0xdd, 0xe6, 0x21, 0xc0, 0x60, 0xce, 0x0a, 0xf4, 0x1c, 0x2e, 0x1c, 0x8d, 0xe8, 0x7b,
+0x59, 0xda, 0x19, 0x4f, 0xec, 0x07, 0x8e, 0xe2, 0xf0, 0x61, 0xf9, 0x27, 0x61, 0x6f, 0xf8, 0xdf,
+0x62, 0x4d, 0xaf, 0x06, 0xfe, 0x41, 0xa6, 0xa6, 0xf9, 0xa2, 0x06, 0x40, 0xb3, 0x04, 0xbd, 0xe6,
+0xc8, 0x17, 0xfb, 0x56, 0x6f, 0xa9, 0x3b, 0x8e, 0xa6, 0x58, 0xdc, 0x91, 0x17, 0x58, 0x42, 0x95,
+0xa3, 0x7c, 0x81, 0x78, 0xa6, 0x3d, 0x3f, 0x75, 0x74, 0x17, 0x1a, 0xd3, 0x6c, 0x2f, 0x48, 0x39,
+0x20, 0x20, 0xc1, 0x9a, 0x29, 0x84, 0x7d, 0x2d, 0x52, 0xa1, 0xf9, 0x5c, 0xf3, 0x4f, 0x91, 0xfc,
+0x75, 0xcf, 0xd6, 0x2d, 0xe7, 0x9a, 0x59, 0x6e, 0x00, 0x0e, 0x8d, 0x22, 0x17, 0xbd, 0xa0, 0xdd,
+0x79, 0x1f, 0x71, 0xe6, 0xcd, 0x2f, 0xb1, 0xb6, 0xbc, 0xc3, 0xdb, 0x02, 0x91, 0x41, 0x9b, 0x09,
+0xa9, 0xd2, 0x7e, 0xbd, 0x2c, 0x18, 0xae, 0xc0, 0x93, 0x0c, 0x02, 0x9a, 0xdb, 0x4e, 0xaa, 0xeb,
+0x84, 0x4b, 0x43, 0x5e, 0xf0, 0x98, 0xf2, 0x5f, 0x86, 0x70, 0x96, 0x90, 0x15, 0x30, 0xcf, 0x3a,
+0xc9, 0x33, 0x21, 0xec, 0x59, 0x86, 0xfc, 0x65, 0x7d, 0xbe, 0xb9, 0xf8, 0x97, 0xf9, 0x30, 0xa9,
+0x6d, 0xfc, 0x0c, 0x6e, 0x36, 0x67, 0xd5, 0xa6, 0x67, 0xd9, 0xbd, 0x9b, 0x34, 0x5d, 0xa7, 0xdd,
+0xda, 0x46, 0x33, 0x25, 0x60, 0x4a, 0x18, 0xf1, 0x55, 0x07, 0xb2, 0xb7, 0x26, 0x7b, 0xa6, 0x1e,
+0x77, 0xbe, 0x7f, 0x35, 0x46, 0xdf, 0x56, 0x9c, 0x22, 0x19, 0xc8, 0x85, 0xa2, 0x45, 0xb2, 0xad,
+0xf9, 0x26, 0x66, 0xab, 0xfc, 0x97, 0x4b, 0x51, 0x32, 0x36, 0xbc, 0xad, 0xcf, 0x54, 0x3a, 0x4f,
+0x94, 0xdb, 0xd2, 0xf9, 0x67, 0x1b, 0x3b, 0xe5, 0xb2, 0x1d, 0xc5, 0x52, 0x64, 0x2c, 0x06, 0x44,
+0xcf, 0x18, 0x83, 0xe0, 0xd8, 0x04, 0x92, 0xa9, 0xc4, 0x3c, 0x8b, 0xa3, 0x2b, 0xbc, 0x88, 0x7e,
+0xc0, 0x76, 0xa7, 0xe2, 0x7b, 0x47, 0x90, 0xf2, 0xaa, 0x0a, 0x34, 0x1b, 0x91, 0x12, 0xd2, 0xd0,
+0x82, 0x45, 0xf4, 0x57, 0xf1, 0xbd, 0x91, 0x5e, 0xab, 0x41, 0x4c, 0xdf, 0x91, 0x4c, 0xdd, 0x67,
+0x04, 0xa0, 0x98, 0x23, 0x8c, 0x24, 0xbe, 0xd6, 0x80, 0xb3, 0x6d, 0x04, 0xa1, 0x77, 0x43, 0xa5,
+0xee, 0xb7, 0xce, 0xb1, 0x48, 0x43, 0x94, 0x61, 0x15, 0x20, 0x9d, 0xce, 0xd0, 0x14, 0x95, 0x37,
+0xc8, 0x64, 0xa3, 0x2d, 0x3d, 0xe3, 0xff, 0xb4, 0x55, 0x83, 0x84, 0x41, 0x50, 0x57, 0xbd, 0x5a,
+0x0c, 0xe4, 0xda, 0x3b, 0x36, 0x4d, 0x21, 0xb5, 0x6f, 0x73, 0x2a, 0x8c, 0x78, 0x4f, 0x9b, 0x83,
+0xda, 0x11, 0x3c, 0xf0, 0xc9, 0x7e, 0xa6, 0x48, 0x34, 0x53, 0x62, 0xd3, 0x0c, 0xff, 0xb1, 0x74,
+0xd6, 0xea, 0xa5, 0xfc, 0x13, 0x1c, 0x05, 0xa8, 0xc0, 0xbc, 0x95, 0x9c, 0x8c, 0xf6, 0x8c, 0xc3,
+0xf3, 0x69, 0xab, 0x93, 0x65, 0xc0, 0xb7, 0x7e, 0xb0, 0x16, 0x7c, 0xb5, 0x5f, 0x05, 0x28, 0xc9,
+0x09, 0x4e, 0x2a, 0x32, 0x87, 0xb3, 0xab, 0xf8, 0x4c, 0xab, 0xeb, 0x3b, 0x6a, 0xa0, 0x1d, 0x7f,
+0xef, 0xe5, 0x9b, 0xa4, 0xb7, 0xd7, 0xc2, 0x5e, 0x03, 0x0f, 0x99, 0xeb, 0xb1, 0xb1, 0xa6, 0x9d,
+0x1c, 0x7c, 0x5c, 0x94, 0x8b, 0x6e, 0x11, 0x7a, 0xb3, 0x6d, 0x1e, 0x61, 0x64, 0xc3, 0x7d, 0x1c,
+0xb3, 0x54, 0x65, 0x08, 0x3b, 0xda, 0x97, 0xb9, 0x75, 0xc1, 0x2b, 0x3e, 0xa8, 0x5c, 0x3c, 0x2d,
+0x81, 0x5b, 0xbf, 0x5a, 0x13, 0x0e, 0xeb, 0x66, 0xc0, 0x0b, 0x8f, 0x04, 0x68, 0x68, 0x9b, 0xe3,
+0x0d, 0x84, 0xe0, 0xcf, 0x83, 0xd7, 0x62, 0x48, 0xc1, 0x31, 0xa5, 0xd5, 0xbc, 0xe3, 0xa3, 0xa5,
+0xb6, 0xd1, 0xfd, 0x81, 0x91, 0x4d, 0xbd, 0xc4, 0x62, 0x4f, 0xe3, 0xd5, 0x99, 0x14, 0xf1, 0xcd,
+0xf4, 0x7d, 0x13, 0xda, 0x68, 0x0a, 0xca, 0xd6, 0x82, 0x0b, 0xf6, 0xea, 0xad, 0x78, 0xa4, 0xc8,
+0x14, 0x7a, 0xec, 0x11, 0xd3, 0x16, 0x86, 0x9f, 0x17, 0x37, 0x6a, 0x06, 0x56, 0xaa, 0x1b, 0xd1,
+0xaf, 0x85, 0x95, 0x21, 0x36, 0x69, 0xec, 0x1b, 0x56, 0x84, 0x01, 0x3f, 0x4d, 0x34, 0x3d, 0x2d,
+0x38, 0x57, 0x2d, 0x7e, 0xd9, 0x7b, 0x2d, 0x81, 0x86, 0xd4, 0x7c, 0x83, 0x12, 0x1d, 0x9d, 0x27,
+0x72, 0x1b, 0x5e, 0xf4, 0x15, 0xa5, 0xcd, 0xb7, 0x5f, 0xbb, 0x49, 0xa1, 0xd9, 0xdd, 0x8d, 0xad,
+0xa9, 0x2c, 0x65, 0x18, 0x91, 0xfd, 0xd2, 0xd4, 0x09, 0x60, 0x0c, 0xfd, 0xa4, 0xe1, 0x25, 0x87,
+0x32, 0x64, 0x7b, 0x99, 0xd7, 0x61, 0x2f, 0xd4, 0x73, 0xdd, 0x85, 0x26, 0x08, 0x92, 0xc0, 0xe1,
+0x4f, 0x0c, 0x76, 0x5b, 0x26, 0x69, 0xdb, 0x78, 0x35, 0x65, 0xb9, 0x58, 0x1f, 0x9c, 0x0f, 0x18,
+0x95, 0xfe, 0x40, 0xfc, 0xf7, 0x93, 0x71, 0x70, 0x8b, 0x73, 0xdc, 0xb0, 0x88, 0x72, 0x19, 0x26,
+0x94, 0x26, 0xa7, 0xaa, 0x00, 0x72, 0x61, 0x53, 0xd2, 0x5d, 0x8f, 0x5e, 0x51, 0x88, 0x2d, 0xa4,
+0x28, 0xd5, 0xaf, 0x2d, 0xd2, 0x84, 0x39, 0x75, 0x1e, 0xe7, 0xf0, 0x23, 0xc0, 0x4f, 0x8d, 0xdd,
+0x5c, 0x90, 0xef, 0x6e, 0x53, 0xe0, 0x54, 0x67, 0xe1, 0x5b, 0x10, 0xf1, 0xf5, 0xf8, 0x64, 0x34,
+0x94, 0xeb, 0x37, 0xf7, 0xe9, 0xaa, 0x6c, 0xa4, 0xd8, 0x74, 0x6d, 0xca, 0x8d, 0x1a, 0x31, 0x73,
+0xca, 0xb4, 0xc7, 0x47, 0x34, 0x7f, 0xf8, 0x24, 0x9b, 0xfa, 0xc9, 0xcc, 0xa8, 0x61, 0xb4, 0x0e,
+0x4d, 0x68, 0xc7, 0xa0, 0xcb, 0xea, 0xf0, 0xcc, 0x0a, 0x6c, 0xf2, 0x33, 0x42, 0x99, 0x6c, 0xd8,
+0x74, 0x7f, 0x1e, 0x8a, 0xa3, 0x0a, 0x48, 0x4b, 0x7e, 0xbe, 0xdb, 0x7f, 0x56, 0x69, 0x43, 0xe8,
+0xbf, 0x12, 0xc4, 0x7b, 0xc2, 0xd9, 0xfa, 0x5c, 0xeb, 0x45, 0xca, 0x07, 0x3d, 0xc0, 0xcd, 0x68,
+0x8b, 0xd0, 0x79, 0xea, 0x0a, 0x78, 0x06, 0xdc, 0x81, 0xd7, 0x32, 0x18, 0xb3, 0x65, 0xbe, 0x47,
+0xbb, 0xfa, 0x17, 0x09, 0xe9, 0x31, 0x95, 0x30, 0xef, 0x07, 0x44, 0xec, 0xd0, 0x98, 0x98, 0xc0,
+0x6b, 0x71, 0x5b, 0x23, 0xb8, 0xb6, 0xd2, 0x21, 0xff, 0x51, 0xdd, 0xae, 0x48, 0x29, 0x75, 0x0c,
+0xc3, 0x3d, 0x91, 0xfe, 0x9d, 0xa8, 0x5e, 0xb2, 0x34, 0xb2, 0xd3, 0x81, 0xf6, 0x27, 0x9c, 0xac,
+0x6b, 0x20, 0x56, 0x86, 0xa5, 0x4f, 0x7a, 0xdb, 0xf9, 0xac, 0xa9, 0x8e, 0xe3, 0x73, 0x21, 0x99,
+0x71, 0x2d, 0xaf, 0x27, 0x92, 0x0c, 0xc7, 0xd3, 0x85, 0xb3, 0x40, 0xda, 0x13, 0x4a, 0x04, 0x41,
+0x54, 0xf8, 0xf2, 0x55, 0xb7, 0x80, 0xdd, 0x77, 0xba, 0x01, 0x7a, 0x31, 0xbd, 0x6b, 0xdc, 0x5c,
+0x59, 0xf4, 0x2b, 0xca, 0x25, 0xbb, 0x50, 0xba, 0xfa, 0x42, 0x38, 0xd2, 0x28, 0x10, 0x8b, 0x7b,
+0x96, 0x45, 0x30, 0xbb, 0x7f, 0xf4, 0x5a, 0xf7, 0x28, 0x6f, 0x47, 0xdc, 0xd2, 0x82, 0xf2, 0xf7,
+0xdd, 0x20, 0xb5, 0x0c, 0x7e, 0x53, 0x85, 0xa7, 0xfc, 0x3b, 0x1a, 0xc0, 0x07, 0x7b, 0xa1, 0x43,
+0x05, 0x18, 0x19, 0xd3, 0xfc, 0x41, 0xc2, 0xce, 0xd9, 0x5b, 0x4b, 0x63, 0xe2, 0x8f, 0x86, 0x3a,
+0xd1, 0xd0, 0x1d, 0x74, 0x2e, 0xbc, 0xd3, 0xce, 0x08, 0x0c, 0x10, 0x7a, 0x42, 0x60, 0xc5, 0x3a,
+0xa6, 0xd8, 0xb0, 0x52, 0xcf, 0x53, 0x28, 0x70, 0x45, 0xb7, 0x72, 0x7d, 0x77, 0x66, 0x54, 0x3d,
+0x38, 0x26, 0xcf, 0xd5, 0xbf, 0xe4, 0x80, 0x10, 0xba, 0x2b, 0xe8, 0xdc, 0xc3, 0xfe, 0x28, 0xa3,
+0x52, 0x58, 0x70, 0x4a, 0xde, 0x84, 0x33, 0x5e, 0x93, 0x04, 0xa4, 0x7c, 0xe7, 0xea, 0x8e, 0xba,
+0xeb, 0x8a, 0x19, 0x26, 0x6a, 0x7f, 0x7c, 0x4a, 0x5b, 0xb4, 0x0d, 0xfc, 0xc8, 0x11, 0x1b, 0x41,
+0x68, 0x5d, 0x2a, 0x25, 0x04, 0x4f, 0xc8, 0xf4, 0x65, 0xfc, 0xb9, 0x58, 0xeb, 0xb4, 0x67, 0x50,
+0x24, 0xf5, 0x43, 0xf6, 0x91, 0x4a, 0xb0, 0x0f, 0x32, 0xe0, 0x07, 0x75, 0x69, 0x1b, 0x3c, 0xeb,
+0xb2, 0x65, 0x26, 0x6f, 0xb8, 0x79, 0xe0, 0x78, 0x8c, 0xdc, 0x39, 0x24, 0x48, 0x76, 0x11, 0xd4,
+0x3a, 0xc5, 0xd2, 0x2b, 0xaa, 0x55, 0xfb, 0x92, 0x12, 0x2d, 0x88, 0x05, 0xd1, 0xb1, 0x31, 0x36,
+0x1f, 0xc2, 0x44, 0x1c, 0xab, 0x2e, 0xcd, 0x1c, 0x72, 0x86, 0xf6, 0x83, 0x87, 0x2e, 0x8b, 0xdb,
+0xaa, 0x16, 0x0e, 0x1b, 0xe6, 0x5c, 0x4d, 0x2f, 0x82, 0xbd, 0x49, 0x11, 0x60, 0x22, 0x0f, 0xde,
+0x3b, 0x2b, 0x20, 0x1d, 0x56, 0xb7, 0x21, 0xae, 0x0b, 0x26, 0x4f, 0xde, 0x3d, 0xa6, 0x3f, 0x61,
+0x81, 0xe2, 0x76, 0x60, 0x08, 0xc5, 0x4b, 0x18, 0x0b, 0xd1, 0xf5, 0xff, 0x8d, 0x1a, 0x96, 0x76,
+0x51, 0x15, 0x05, 0x4d, 0x8c, 0x6b, 0x12, 0x90, 0x47, 0xd4, 0xa4, 0x38, 0xb9, 0x48, 0xe4, 0x4c,
+0x05, 0x69, 0x6a, 0x8b, 0x9d, 0x7c, 0xa1, 0xbc, 0x77, 0xeb, 0x86, 0x93, 0x0a, 0x15, 0x84, 0xba,
+0x8f, 0xf5, 0x7c, 0x44, 0x75, 0x31, 0x79, 0x16, 0xc1, 0x81, 0x1a, 0xb6, 0xe6, 0x6c, 0x3d, 0xb8,
+0x15, 0x46, 0xf5, 0xbe, 0x46, 0x04, 0xa6, 0xec, 0xec, 0xd1, 0x74, 0x8b, 0x87, 0x2b, 0xdb, 0xd0,
+0x9f, 0xb3, 0x99, 0x9d, 0x87, 0x8c, 0xc6, 0xaa, 0xd4, 0x64, 0x45, 0xbd, 0xe8, 0xed, 0xa3, 0xc1,
+0x2a, 0x41, 0x1e, 0x26, 0xaf, 0x86, 0x16, 0xed, 0x80, 0x08, 0xca, 0x64, 0x21, 0x3a, 0xce, 0x21,
+0x4c, 0x41, 0xb9, 0x13, 0x2d, 0xf7, 0x1b, 0xdf, 0x2b, 0x33, 0x69, 0xe7, 0x5c, 0x8c, 0x7b, 0xfb,
+0xe3, 0x41, 0xe9, 0xce, 0xd7, 0xff, 0x0e, 0x54, 0xfe, 0xb0, 0x71, 0x78, 0xdc, 0xde, 0x7e, 0xdd,
+0x1f, 0x1c, 0x4a, 0x8f, 0x3e, 0x16, 0xfd, 0x91, 0x82, 0x94, 0xd4, 0xc2, 0xf7, 0xb2, 0x77, 0x89,
+0x16, 0x2c, 0xba, 0xb6, 0xbd, 0xed, 0x95, 0x43, 0x05, 0x9b, 0xf2, 0xc4, 0xbe, 0x46, 0x43, 0x90,
+0x1d, 0xd8, 0x24, 0x02, 0xd2, 0xea, 0xf4, 0x08, 0xd9, 0xf7, 0x84, 0x0e, 0xc6, 0xe7, 0x44, 0xdb,
+0xb8, 0xac, 0x0a, 0x53, 0x39, 0x61, 0x43, 0xdc, 0x22, 0x28, 0x8f, 0x22, 0x2f, 0x73, 0xbf, 0x59,
+0x2d, 0x3c, 0x8c, 0x0b, 0xcc, 0x2a, 0x67, 0xe0, 0x5b, 0x5c, 0x65, 0x5e, 0x6d, 0x98, 0x99, 0xaa,
+0x3b, 0x89, 0x12, 0xe2, 0x99, 0xf6, 0x15, 0xa7, 0xd2, 0x6a, 0x79, 0xb4, 0xf6, 0x0b, 0xf5, 0x0d,
+0x2d, 0x4c, 0xcb, 0x1b, 0x35, 0x93, 0x61, 0x32, 0xa1, 0x8a, 0xa8, 0x27, 0xe8, 0x95, 0x5a, 0x56,
+0x59, 0x04, 0xfe, 0xce, 0xc2, 0xd8, 0x92, 0x97, 0xb2, 0x54, 0x63, 0xd0, 0x3b, 0xde, 0x10, 0x34,
+0x32, 0x16, 0x05, 0x51, 0x1d, 0xfc, 0x96, 0x8e, 0xf1, 0xf6, 0x4b, 0xd7, 0x48, 0x22, 0xce, 0xca,
+0x1c, 0x6b, 0xab, 0x1f, 0x59, 0xa2, 0x74, 0xd6, 0xcd, 0x15, 0x07, 0xab, 0xa2, 0xd5, 0x22, 0x81,
+0xec, 0x20, 0x14, 0x36, 0xac, 0xe4, 0x25, 0x7d, 0xe6, 0x09, 0x00, 0x2c, 0x92, 0x4d, 0x4e, 0xbf,
+0xbf, 0xa1, 0xd4, 0xbe, 0x6b, 0xd4, 0x1f, 0x95, 0x9b, 0xf3, 0xda, 0x99, 0xad, 0xa4, 0x6c, 0x73,
+0x55, 0xd1, 0x9d, 0x4b, 0x16, 0xd4, 0x06, 0xec, 0x46, 0x3d, 0xb7, 0xe7, 0xce, 0xd0, 0x1d, 0x94,
+0x65, 0xde, 0x61, 0xb3, 0xc1, 0x10, 0x65, 0xe5, 0x68, 0x9b, 0xb0, 0xb4, 0x43, 0x0b, 0x92, 0xaf,
+0xb7, 0x40, 0xa2, 0xe5, 0x06, 0x3d, 0x72, 0x00, 0xc5, 0x39, 0xab, 0x35, 0x29, 0x22, 0x4c, 0xa5,
+0xa5, 0x3f, 0x22, 0x90, 0x53, 0xd2, 0x36, 0x63, 0x1e, 0xd3, 0x33, 0xa5, 0xbb, 0x3d, 0xa3, 0x0c,
+0x14, 0x9c, 0x2e, 0x6d, 0x9a, 0x7a, 0xf7, 0xf1, 0x56, 0x66, 0xe5, 0x8d, 0x53, 0x83, 0x34, 0x3f,
+0xa9, 0x83, 0x84, 0x68, 0x90, 0xc9, 0x51, 0xc2, 0xd4, 0x8e, 0x6c, 0xc7, 0x6d, 0xa7, 0x19, 0x61,
+0xa7, 0x2e, 0x36, 0xbc, 0xd2, 0x0f, 0x17, 0x49, 0xd4, 0x6b, 0x36, 0x63, 0xfb, 0x1d, 0xf4, 0xb0,
+0x6b, 0xcf, 0x34, 0x5f, 0xd2, 0x77, 0xae, 0x12, 0xaf, 0xb3, 0xdf, 0x52, 0xf7, 0xc2, 0xc8, 0xf2,
+0x63, 0x61, 0xb6, 0x3e, 0x39, 0xf2, 0xa7, 0x1a, 0x89, 0x9d, 0x0e, 0x8f, 0xaf, 0xe1, 0x01, 0x24,
+0xa6, 0x3a, 0xd5, 0x9a, 0x62, 0x67, 0xa3, 0x66, 0xee, 0xbc, 0xc5, 0x94, 0x4b, 0xc3, 0x15, 0xa1,
+0x7e, 0x07, 0x07, 0x2b, 0xb7, 0x43, 0x2a, 0xb4, 0xb8, 0x25, 0x88, 0x86, 0x23, 0xab, 0xdf, 0x05,
+0xbe, 0x46, 0x56, 0xd7, 0xda, 0xd6, 0x75, 0x53, 0xd9, 0xc8, 0x26, 0x8f, 0x39, 0x67, 0xed, 0x21,
+0x53, 0x1c, 0x9c, 0x89, 0x46, 0xd3, 0xfe, 0x54, 0xe6, 0x1d, 0x02, 0xb9, 0x25, 0x82, 0x66, 0xe6,
+0xf9, 0x45, 0xd9, 0x3f, 0xa5, 0x71, 0xc1, 0x46, 0x66, 0x7a, 0x27, 0x8a, 0x82, 0xc9, 0x21, 0xe9,
+0x17, 0xab, 0x6c, 0xef, 0x45, 0xe5, 0x88, 0x93, 0x87, 0x80, 0xb3, 0x85, 0x25, 0x96, 0x19, 0x41,
+0xab, 0xd6, 0xba, 0x92, 0x76, 0x21, 0x8a, 0x58, 0xbd, 0xe2, 0x4b, 0xec, 0x45, 0x59, 0x2c, 0x13,
+0x1a, 0xb5, 0x13, 0x25, 0x44, 0xe7, 0x71, 0x26, 0x0a, 0x34, 0x33, 0xb9, 0x57, 0x15, 0xa4, 0x90,
+0x60, 0x11, 0x05, 0x8e, 0xc8, 0x8e, 0x74, 0x52, 0x4b, 0x31, 0x71, 0xeb, 0x66, 0x7e, 0xee, 0xb1,
+0x0a, 0x21, 0x52, 0xc0, 0x1a, 0xe9, 0xa1, 0x5a, 0xe3, 0x3a, 0x24, 0xfb, 0xf3, 0x1e, 0xd6, 0x83,
+0x1d, 0xfb, 0x81, 0xa8, 0x91, 0x60, 0x9e, 0xbc, 0x59, 0x20, 0xc9, 0x9e, 0x71, 0x19, 0x83, 0x2b,
+0x6a, 0x48, 0x4e, 0x6b, 0x46, 0x82, 0x89, 0xda, 0x60, 0xff, 0x1a, 0x46, 0x94, 0x55, 0xda, 0xe5,
+0x99, 0xfa, 0x84, 0xd7, 0x3b, 0xb9, 0xa5, 0x34, 0x87, 0x86, 0x5e, 0x6d, 0x75, 0x9a, 0xe7, 0x09,
+0xb8, 0xe6, 0x71, 0x15, 0x10, 0x56, 0xd7, 0xc1, 0xc8, 0xb2, 0x62, 0xbc, 0xec, 0xe0, 0x94, 0xa0,
+0xcd, 0xb4, 0x04, 0xa9, 0xc3, 0x51, 0xee, 0xf8, 0x2e, 0x42, 0x9a, 0xaa, 0x34, 0xd3, 0xb9, 0xb0,
+0x36, 0xf9, 0x47, 0xc1, 0x07, 0x49, 0xde, 0xb8, 0x32, 0x8a, 0x87, 0x68, 0x56, 0x9a, 0x35, 0x79,
+0xd1, 0xac, 0x49, 0x38, 0xc6, 0xfe, 0xfd, 0xdf, 0x6f, 0x3c, 0xda, 0x48, 0xbd, 0x23, 0xfd, 0x85,
+0xf0, 0x96, 0xee, 0x1c, 0x27, 0x18, 0x86, 0xa6, 0xf0, 0x7b, 0xd8, 0x3c, 0xc7, 0x22, 0x3e, 0x2f,
+0xac, 0xb1, 0x37, 0xbd, 0x84, 0x4b, 0xe3, 0x92, 0x82, 0xd0, 0x25, 0x14, 0x22, 0x65, 0xed, 0xeb,
+0xef, 0xb9, 0xb6, 0xe4, 0x95, 0x18, 0x0d, 0x2b, 0x8d, 0x4f, 0xaf, 0xc0, 0xa0, 0x05, 0x8b, 0x35,
+0x5b, 0x94, 0xb2, 0x68, 0x26, 0x4f, 0x4a, 0x9e, 0x85, 0x0e, 0x46, 0xe0, 0x4f, 0x60, 0x66, 0x01,
+0xa4, 0x39, 0xe8, 0x8b, 0x2a, 0x50, 0xf5, 0x18, 0x70, 0xe2, 0xfc, 0xd6, 0xbe, 0xd3, 0x46, 0x4b
+
+ciphertext =
+0x77, 0xF9, 0xF7, 0x7A, 0xA3, 0xCB, 0x68, 0x1A, 0x11, 0x70, 0xD8, 0x7A, 0xB6, 0xE2, 0x37, 0x7E,
+0xD1, 0x57, 0x1C, 0x8E, 0x85, 0xD8, 0x08, 0xBF, 0x57, 0x1F, 0x21, 0x6C, 0xAD, 0xAD, 0x47, 0x1E,
+0x0D, 0x6B, 0x79, 0x39, 0x15, 0x4E, 0x5B, 0x59, 0x2D, 0x76, 0x87, 0xA6, 0xD6, 0x47, 0x8F, 0x82,
+0xB8, 0x51, 0x91, 0x32, 0x60, 0xCB, 0x97, 0xDE, 0xBE, 0xF0, 0xAD, 0xFC, 0x23, 0x2E, 0x22, 0x02,
+0x46, 0x17, 0x3F, 0x8F, 0x24, 0x0E, 0x31, 0x80, 0xEA, 0xD6, 0x85, 0x50, 0xA5, 0xEE, 0xB7, 0x15,
+0xD0, 0x2F, 0xA6, 0x92, 0xEF, 0xCD, 0x8B, 0x91, 0x4A, 0xEA, 0x03, 0x92, 0xB4, 0x65, 0x19, 0x66,
+0x9E, 0x73, 0x79, 0xCE, 0xA7, 0x4D, 0x8B, 0x77, 0x74, 0x44, 0x1C, 0x9F, 0xEE, 0x15, 0x91, 0xF2,
+0xF9, 0x50, 0x7D, 0x2A, 0xC2, 0x6B, 0x58, 0x36, 0xF7, 0x62, 0x25, 0x9C, 0x71, 0x34, 0x43, 0x14,
+0x9E, 0x17, 0xF7, 0xB7, 0x56, 0x1D, 0x91, 0x4C, 0xF6, 0x6C, 0xF2, 0x19, 0x39, 0xA2, 0x29, 0xA3,
+0x22, 0x4F, 0x14, 0x18, 0x76, 0x8A, 0x59, 0xAD, 0x3E, 0x5F, 0xDC, 0xC9, 0x80, 0x07, 0x51, 0xB2,
+0x90, 0x6A, 0xB9, 0x0C, 0xA4, 0x3F, 0x42, 0xBD, 0x40, 0xB7, 0xA7, 0xF5, 0x85, 0xBF, 0xEA, 0xD8,
+0x89, 0xA9, 0xE9, 0xC7, 0x25, 0xEC, 0xFF, 0x80, 0xE8, 0x94, 0x3B, 0xE7, 0xD1, 0x68, 0xDA, 0x1C,
+0xFA, 0x5D, 0xCD, 0x68, 0x09, 0x72, 0x63, 0xBA, 0x34, 0x56, 0xD4, 0x5F, 0xB0, 0xA7, 0xAE, 0xCF,
+0xFB, 0xA8, 0xBD, 0x52, 0x79, 0x98, 0xF6, 0x39, 0x52, 0xD3, 0xA7, 0xE1, 0xFB, 0x75, 0x76, 0x87,
+0xBC, 0x11, 0x18, 0x17, 0x47, 0x65, 0xDA, 0xE3, 0x25, 0x3E, 0x17, 0x43, 0x7B, 0x0D, 0x8B, 0x7F,
+0x20, 0xFF, 0x03, 0xFA, 0x28, 0xC7, 0xD3, 0xF8, 0xC2, 0xA8, 0xC1, 0xE5, 0xDA, 0x77, 0x41, 0x28,
+0x06, 0xC4, 0x20, 0xFC, 0x1B, 0xAA, 0x99, 0x78, 0x5C, 0x28, 0xDA, 0x9A, 0x2B, 0x6C, 0x56, 0x7E,
+0x63, 0x34, 0xCF, 0xCD, 0x5D, 0xB6, 0x13, 0xF5, 0x98, 0x08, 0x2E, 0x02, 0x2C, 0x63, 0xEC, 0xE3,
+0x43, 0xE8, 0x3B, 0xE6, 0x59, 0x8C, 0x61, 0x60, 0xDD, 0x33, 0x3F, 0x29, 0xA4, 0xA5, 0xD5, 0x33,
+0xEF, 0xAA, 0x7E, 0x8C, 0xAE, 0x9C, 0x1B, 0x0D, 0x74, 0xF6, 0x01, 0x8C, 0xF1, 0x04, 0xEB, 0x62,
+0x75, 0xC0, 0x98, 0x24, 0xB2, 0xDF, 0xB1, 0xBA, 0x50, 0xC3, 0x01, 0x5B, 0x13, 0x3A, 0xF9, 0x7A,
+0xF6, 0xF4, 0x75, 0xAF, 0x55, 0x54, 0x10, 0xBE, 0x11, 0x91, 0x7D, 0xF6, 0x66, 0x79, 0xE6, 0x4D,
+0x0E, 0x0B, 0x70, 0x3C, 0x00, 0x68, 0x2E, 0xA3, 0x84, 0xCE, 0xE1, 0x0A, 0xDC, 0xFE, 0xF9, 0xD2,
+0x59, 0x23, 0x05, 0xCA, 0x79, 0xF0, 0x89, 0xB9, 0x76, 0xD9, 0xAA, 0x04, 0x43, 0x30, 0x97, 0x15,
+0x59, 0x0B, 0x7C, 0x22, 0x0E, 0x72, 0xC6, 0x61, 0x19, 0x35, 0xC3, 0x6A, 0xF2, 0x6B, 0x39, 0xB6,
+0x1B, 0xD3, 0xE3, 0xF7, 0xCB, 0x46, 0x26, 0x97, 0x39, 0xBA, 0x41, 0xD8, 0xA4, 0x48, 0x96, 0xBC,
+0x22, 0x38, 0xCF, 0xE2, 0xCF, 0xD6, 0x36, 0x30, 0xD9, 0x96, 0x73, 0xAF, 0xA4, 0x0F, 0x52, 0x9D,
+0x64, 0x28, 0xAB, 0x3D, 0xF7, 0x1B, 0xA6, 0xDB, 0x47, 0x09, 0x45, 0x48, 0x30, 0x27, 0x4B, 0x37,
+0x38, 0x5B, 0xC5, 0x90, 0x8C, 0xCC, 0x82, 0x48, 0x7A, 0x98, 0x1C, 0x46, 0x24, 0xB1, 0xDA, 0xB9,
+0x6C, 0x30, 0x48, 0xF3, 0x6C, 0x84, 0xBD, 0x3F, 0x95, 0x3E, 0xC1, 0x27, 0x8B, 0x3C, 0x2F, 0xDA,
+0xD9, 0xF6, 0x54, 0x73, 0x04, 0x38, 0xD6, 0x45, 0xC5, 0x5C, 0x92, 0xDE, 0xB2, 0xE3, 0x62, 0x31,
+0xCE, 0x70, 0xD7, 0x11, 0x5E, 0x7A, 0x63, 0x0F, 0xA1, 0xD4, 0x8A, 0x2B, 0xDE, 0x38, 0xAA, 0x9F,
+0x33, 0x71, 0x67, 0x05, 0xDB, 0x48, 0xE4, 0x09, 0x73, 0x3A, 0x35, 0x1F, 0xC2, 0x0F, 0x44, 0x99,
+0x35, 0xBD, 0xBD, 0x7E, 0x85, 0x77, 0x46, 0x3A, 0x41, 0x79, 0xAB, 0x67, 0xA5, 0x87, 0xBD, 0x96,
+0xAE, 0xC2, 0x99, 0x35, 0xC3, 0xCA, 0x90, 0x36, 0xB1, 0x15, 0x9C, 0x37, 0x62, 0x54, 0xCA, 0x72,
+0x10, 0x07, 0x07, 0x6E, 0x1D, 0xD0, 0xFE, 0x4C, 0xE8, 0x48, 0x92, 0x7A, 0xA1, 0x7B, 0xA5, 0xAC,
+0xF7, 0xE1, 0x99, 0xC0, 0x99, 0x20, 0xD5, 0x07, 0x77, 0x1D, 0xE5, 0x14, 0x36, 0xA6, 0xF3, 0x77,
+0x9B, 0x61, 0x87, 0x98, 0xD6, 0xD6, 0xF8, 0xE6, 0x34, 0x37, 0x6F, 0x58, 0x29, 0x97, 0x2D, 0xE6,
+0xD1, 0x56, 0xB1, 0xBB, 0x35, 0xBD, 0x2B, 0x44, 0xAD, 0x30, 0x0F, 0x1D, 0x48, 0x5F, 0xDD, 0x58,
+0x7C, 0xB8, 0x2C, 0x2E, 0x26, 0x9B, 0xDA, 0x55, 0x01, 0x06, 0x66, 0xB0, 0x3C, 0xAB, 0xA0, 0x60,
+0x98, 0xF4, 0x72, 0xAF, 0xBC, 0x00, 0xAA, 0x57, 0x6A, 0xD8, 0x47, 0xC7, 0xC1, 0xCE, 0xB1, 0x05,
+0x45, 0x84, 0x63, 0x1E, 0x9C, 0x47, 0x11, 0xB4, 0xE6, 0x80, 0x8D, 0x3E, 0xFF, 0xE9, 0xD9, 0x3A,
+0x56, 0x3D, 0x41, 0x68, 0x2C, 0x6C, 0xA2, 0x23, 0x4C, 0xD6, 0x08, 0x91, 0x93, 0xCD, 0xAA, 0xF7,
+0xAA, 0x2B, 0x55, 0xEC, 0x53, 0xE8, 0xD9, 0x2E, 0xCB, 0xE0, 0x67, 0x1D, 0x9F, 0xFF, 0x94, 0xB8,
+0xBA, 0x82, 0xA7, 0x6A, 0x8C, 0x61, 0x7C, 0x58, 0x90, 0xA5, 0x11, 0x57, 0x21, 0xCF, 0x30, 0xB0,
+0x97, 0x44, 0x7D, 0x1D, 0xD3, 0x91, 0x3F, 0xC2, 0x4F, 0x0E, 0x3B, 0x57, 0x3A, 0x1F, 0x85, 0x82,
+0x79, 0x91, 0x03, 0xB4, 0x9B, 0x70, 0x2A, 0x5F, 0x8B, 0x20, 0x66, 0x6F, 0xF4, 0x10, 0x96, 0x52,
+0x4C, 0x77, 0xA2, 0x45, 0x28, 0xF8, 0xAD, 0xA3, 0x8C, 0x99, 0x3F, 0xD2, 0x39, 0x4A, 0x1A, 0x3A,
+0x72, 0x7E, 0x47, 0x49, 0x25, 0x63, 0x87, 0xCB, 0xEA, 0x89, 0x1D, 0x7F, 0x0C, 0x86, 0x9A, 0x8E,
+0xB1, 0x0C, 0xFF, 0xC6, 0xF2, 0xB1, 0x01, 0x99, 0xEA, 0xF1, 0x4A, 0xF1, 0xF3, 0x71, 0x4B, 0x92,
+0xC6, 0xD6, 0xD8, 0x26, 0xE8, 0x28, 0xF2, 0xF5, 0x5B, 0xE8, 0xF1, 0xE4, 0x4B, 0x36, 0x46, 0xD3,
+0x12, 0x2F, 0x98, 0x61, 0x12, 0xD9, 0x26, 0x58, 0x5C, 0x80, 0x7C, 0x71, 0x4E, 0x57, 0x9A, 0xAC,
+0x59, 0xE0, 0xC3, 0x70, 0x55, 0x57, 0xAE, 0x55, 0xF6, 0xCF, 0x6A, 0xF0, 0x10, 0xDC, 0xF4, 0xED,
+0xCC, 0x32, 0x4B, 0xAC, 0xC1, 0x4B, 0x2F, 0x62, 0x69, 0xD2, 0x15, 0x63, 0x39, 0xD5, 0x29, 0x09,
+0xA2, 0xB5, 0xC7, 0xBC, 0xFA, 0xC7, 0xC7, 0x8C, 0x64, 0xCF, 0x43, 0x9B, 0x4C, 0x60, 0x97, 0x33,
+0xA2, 0xB9, 0x0F, 0x70, 0x05, 0x89, 0x56, 0x62, 0xB1, 0x48, 0x08, 0xB5, 0x77, 0x4C, 0x60, 0x24,
+0x1D, 0x35, 0xEF, 0xD6, 0x53, 0xB0, 0x2E, 0x7F, 0xA6, 0x4B, 0x94, 0xE7, 0xCD, 0xC4, 0xFE, 0xC4,
+0x12, 0x7A, 0xAB, 0xD4, 0x05, 0xA5, 0x32, 0xD4, 0xA1, 0x8D, 0x9C, 0x22, 0x10, 0xDD, 0x39, 0x66,
+0x96, 0x79, 0x49, 0x19, 0x80, 0x1C, 0xE1, 0x1F, 0x01, 0x69, 0x37, 0x03, 0xB5, 0x22, 0x8F, 0x95,
+0xF7, 0xBD, 0x36, 0x89, 0x38, 0x37, 0x29, 0x6C, 0x0E, 0x89, 0x55, 0x4D, 0xC9, 0x64, 0xD3, 0xD5,
+0x9B, 0xB0, 0x51, 0x43, 0xBB, 0xA6, 0x6B, 0xFF, 0x13, 0xB6, 0x1A, 0x06, 0xF3, 0x86, 0x51, 0xFD,
+0xB9, 0xC8, 0x26, 0xA9, 0x8A, 0x4A, 0xC1, 0xE0, 0xD9, 0x3D, 0x31, 0x48, 0x39, 0xC8, 0x48, 0xC7,
+0xDE, 0xB1, 0x58, 0x0F, 0x4D, 0xEC, 0x5B, 0x32, 0x9C, 0x8B, 0xF4, 0x3A, 0x02, 0xE2, 0x92, 0x4A,
+0x7D, 0xCD, 0x38, 0x07, 0x4F, 0xBA, 0xD1, 0xD4, 0xE4, 0x3C, 0xB0, 0x4D, 0xB7, 0xEF, 0xFB, 0x06,
+0xA9, 0x83, 0x20, 0x0D, 0x7A, 0x1F, 0x15, 0x02, 0x70, 0x08, 0x8B, 0x91, 0xE6, 0xFD, 0x8C, 0x0B,
+0x3C, 0xEA, 0x1F, 0x94, 0xB6, 0x17, 0xC6, 0xB2, 0x07, 0x2C, 0x73, 0x7A, 0x4A, 0x76, 0x5F, 0x30,
+0x38, 0xE5, 0x22, 0xC6, 0xA3, 0xA7, 0x4D, 0x6A, 0x3A, 0xA7, 0x82, 0x90, 0xBE, 0xD1, 0xE9, 0x89,
+0x2F, 0xF0, 0xC9, 0x0A, 0xB6, 0xDA, 0x0D, 0x3E, 0x25, 0x8E, 0x99, 0xB2, 0x06, 0xE3, 0x65, 0x53,
+0x3F, 0x1A, 0xD9, 0x45, 0xCE, 0x10, 0xBE, 0x2E, 0xF4, 0x4F, 0x60, 0x25, 0xA7, 0x0A, 0xAE, 0x82,
+0x92, 0xAE, 0xC0, 0xFF, 0xAB, 0x49, 0x97, 0x5C, 0x53, 0x73, 0x4E, 0x78, 0x1A, 0x65, 0x42, 0xD5,
+0x6F, 0x1E, 0xE2, 0x25, 0x76, 0x3B, 0x6D, 0xF8, 0xBC, 0xBD, 0x3A, 0xDE, 0xB5, 0xFB, 0xBD, 0x90,
+0xDC, 0xC2, 0xB8, 0x90, 0xD4, 0x03, 0xD2, 0xDD, 0x35, 0x86, 0x48, 0x58, 0xB4, 0xCB, 0x10, 0xB2,
+0x31, 0xBD, 0x6C, 0x16, 0x92, 0x7A, 0x3D, 0x67, 0x45, 0x6B, 0x57, 0x26, 0xD2, 0xC2, 0xAF, 0xB1,
+0xAB, 0x82, 0x4B, 0x95, 0x08, 0x7D, 0x48, 0x1D, 0x17, 0x9D, 0x8B, 0x16, 0xCF, 0xE0, 0x16, 0x94,
+0xE1, 0xA6, 0xFC, 0x6C, 0xE1, 0x71, 0x3C, 0x57, 0x7F, 0x17, 0xC8, 0x4E, 0xFF, 0x16, 0x46, 0x1E,
+0x21, 0x27, 0x05, 0x41, 0xD3, 0x19, 0x28, 0x58, 0x86, 0xFB, 0x5A, 0xEF, 0xC3, 0x00, 0xE7, 0xA3,
+0x25, 0x1A, 0x94, 0x41, 0xE3, 0x50, 0x98, 0x94, 0x29, 0x42, 0x1F, 0x1C, 0x69, 0x46, 0xF4, 0x89,
+0x30, 0x4E, 0x5C, 0xCE, 0x2F, 0x65, 0xC5, 0x34, 0x71, 0xB7, 0xD9, 0x54, 0xB2, 0xC1, 0xCC, 0xED,
+0x14, 0x3E, 0xF1, 0x7B, 0x5F, 0xAE, 0xD3, 0x8F, 0xA2, 0x18, 0x12, 0x15, 0x23, 0x92, 0x75, 0x61,
+0xFF, 0xFA, 0x8F, 0xD1, 0x77, 0xC8, 0xC7, 0xA3, 0x44, 0x9F, 0x06, 0x2B, 0x1E, 0xA4, 0x4D, 0x4F,
+0x8E, 0x9A, 0x02, 0xA8, 0x4A, 0x67, 0x5D, 0x2D, 0x59, 0xFD, 0x1A, 0x8F, 0xE6, 0x52, 0x0C, 0xC7,
+0x4A, 0x95, 0xAF, 0xDD, 0x04, 0x76, 0x26, 0xCE, 0x4C, 0x97, 0x4E, 0x55, 0x9C, 0x28, 0xA4, 0x1D,
+0x92, 0xD6, 0x84, 0x87, 0x29, 0x28, 0x16, 0x1B, 0x34, 0xE3, 0xBD, 0x2F, 0x9B, 0xF8, 0x6F, 0xDC,
+0x9B, 0x6C, 0xF5, 0xEB, 0x26, 0x51, 0x47, 0x78, 0xA2, 0xB5, 0x4C, 0x24, 0x1E, 0x3D, 0xE5, 0x33,
+0xA3, 0xD9, 0x04, 0x20, 0x8E, 0xA7, 0x32, 0x88, 0xC6, 0x52, 0x0B, 0x71, 0x0D, 0x26, 0xC3, 0x3F,
+0xC4, 0xC8, 0x7F, 0x6F, 0x3A, 0xAD, 0xC7, 0x27, 0x3D, 0xB3, 0xE6, 0x6B, 0x68, 0x66, 0xB3, 0xEE,
+0x6D, 0xC7, 0xAB, 0xD4, 0xA2, 0x88, 0xAF, 0xEB, 0x1A, 0x51, 0x76, 0x19, 0xFC, 0xF7, 0x29, 0xF0,
+0x4D, 0xC5, 0xAB, 0x42, 0x81, 0x9F, 0x10, 0xD9, 0xB0, 0x5C, 0x9D, 0x1A, 0x5A, 0xFE, 0xB3, 0x71,
+0xBC, 0x13, 0x69, 0xDA, 0xCE, 0x15, 0x7C, 0x18, 0x2C, 0x81, 0xFC, 0xA9, 0x1E, 0x0B, 0x33, 0xBF,
+0x82, 0x0D, 0xD5, 0x58, 0xD0, 0xB6, 0x17, 0x34, 0xFE, 0x53, 0x45, 0xE7, 0x57, 0x9B, 0xFA, 0x3C,
+0x04, 0xCF, 0x89, 0x38, 0x73, 0xE9, 0x60, 0xEA, 0xF4, 0x0F, 0xB2, 0x2E, 0x90, 0x60, 0xAE, 0xFB,
+0x57, 0xCB, 0xA5, 0x9D, 0x60, 0x44, 0x46, 0x13, 0x3C, 0xB3, 0xB6, 0x0A, 0x09, 0x12, 0x2B, 0x27,
+0x95, 0x45, 0x29, 0x92, 0x86, 0x00, 0x2A, 0x93, 0x77, 0x8D, 0xAA, 0xC3, 0xF8, 0x46, 0xBE, 0x3A,
+0x6A, 0x0E, 0x51, 0x9D, 0x94, 0x60, 0x9A, 0x76, 0x93, 0xF4, 0x01, 0x19, 0xC3, 0xB1, 0x86, 0xA9,
+0x7E, 0xD1, 0xF6, 0xF1, 0x88, 0x59, 0x4E, 0x9F, 0xCC, 0xF2, 0xF7, 0xDD, 0x1B, 0x91, 0x98, 0xAC,
+0xCC, 0xC6, 0x81, 0x57, 0x3F, 0x07, 0xF2, 0x52, 0x5B, 0x79, 0x5D, 0xFB, 0x07, 0xF7, 0x6A, 0x62,
+0x30, 0xE5, 0x77, 0x81, 0x00, 0x6C, 0xB1, 0x11, 0x8A, 0x1D, 0x0C, 0x9C, 0x94, 0x1A, 0xAD, 0xB6,
+0x85, 0x29, 0x70, 0x19, 0xFB, 0xE1, 0xF5, 0x89, 0x6D, 0xB3, 0x84, 0xC5, 0x56, 0x14, 0x1E, 0x67,
+0x46, 0x57, 0xFE, 0x30, 0xD0, 0x81, 0x2B, 0x27, 0xD6, 0x4B, 0x41, 0x74, 0xF3, 0x51, 0xD0, 0x78,
+0xCE, 0x3A, 0x5C, 0x46, 0xCC, 0xCE, 0x19, 0xC9, 0xC3, 0x1A, 0x81, 0xF4, 0x62, 0x9A, 0x8B, 0xAD,
+0x71, 0x9C, 0x3E, 0x5B, 0x23, 0xA7, 0x9F, 0x7E, 0x26, 0xDD, 0x21, 0xCC, 0x36, 0x75, 0x90, 0x09,
+0x61, 0x0B, 0x85, 0xC1, 0x0A, 0xF4, 0x9D, 0x93, 0x9F, 0x5F, 0x73, 0x71, 0xAB, 0x2B, 0xFA, 0x5E,
+0xD9, 0xA1, 0xF8, 0x7F, 0x0F, 0xD5, 0x07, 0x59, 0xB2, 0x4F, 0xF9, 0x71, 0xD4, 0x35, 0x3E, 0x5D,
+0x85, 0x6A, 0x32, 0x76, 0xDB, 0xBE, 0xC5, 0xD4, 0x2B, 0xC5, 0x70, 0x95, 0x7C, 0x64, 0x04, 0x0E,
+0xC0, 0x4E, 0x59, 0x76, 0x10, 0xBF, 0x93, 0xBE, 0xEC, 0x40, 0x2C, 0xDE, 0x2D, 0xE6, 0xD1, 0x77,
+0xC7, 0x84, 0x4B, 0xD6, 0x1C, 0x9A, 0xA1, 0x93, 0xE4, 0x50, 0xA8, 0x1B, 0x73, 0x29, 0x92, 0xB0,
+0x37, 0x83, 0x15, 0xE3, 0xB5, 0xCD, 0xD1, 0x47, 0x38, 0xD1, 0xB6, 0xB6, 0x04, 0x3D, 0x58, 0x28,
+0xB1, 0xB5, 0x9E, 0xF3, 0x95, 0x12, 0x1A, 0xC2, 0xA1, 0x71, 0x72, 0x45, 0x35, 0x0F, 0xB8, 0xC4,
+0xEF, 0xF7, 0xAD, 0xD6, 0x82, 0x6A, 0x6A, 0x9E, 0x0E, 0xEF, 0xAB, 0xAD, 0x9D, 0x8D, 0xE4, 0x77,
+0xA1, 0x93, 0xAE, 0xE1, 0xBA, 0x0E, 0xAF, 0x83, 0xC4, 0x84, 0x19, 0x6E, 0x5B, 0x15, 0xD7, 0xAE,
+0x33, 0xA4, 0x37, 0xE2, 0xA1, 0x18, 0x2A, 0x4A, 0x9C, 0x5E, 0x7C, 0x61, 0x70, 0x76, 0xE9, 0xE6,
+0x0E, 0x11, 0xEE, 0x71, 0x45, 0xE0, 0x5E, 0x72, 0x3C, 0x88, 0x0C, 0x34, 0x34, 0x78, 0x39, 0xD7,
+0xFB, 0x26, 0x14, 0x1B, 0xCE, 0xEE, 0x15, 0x3C, 0xA4, 0x3F, 0xD3, 0x2A, 0x7C, 0x66, 0x58, 0xDD,
+0x56, 0x46, 0xAF, 0x14, 0x04, 0x35, 0x33, 0xD5, 0x83, 0xA0, 0x07, 0xE0, 0xC0, 0x4B, 0x9D, 0x36,
+0xF0, 0x72, 0x90, 0x7D, 0xFC, 0x4B, 0x3B, 0xDD, 0x07, 0x5E, 0xCD, 0xBE, 0x0B, 0x30, 0x78, 0x8C,
+0x9B, 0x4D, 0xFB, 0xB4, 0x95, 0xC4, 0xDE, 0x57, 0xB3, 0x07, 0xE6, 0x8F, 0x20, 0xE7, 0x54, 0x84,
+0xC8, 0x35, 0x3B, 0x68, 0x15, 0x74, 0x0F, 0x6A, 0xAB, 0xCC, 0x3E, 0x90, 0x6B, 0x38, 0x0A, 0xA8,
+0x5A, 0x3F, 0xF3, 0xAC, 0x27, 0x12, 0xFC, 0x04, 0xF6, 0x93, 0xB4, 0x84, 0xF2, 0x82, 0xED, 0xAE,
+0xF9, 0x64, 0x53, 0x1F, 0x9A, 0x2F, 0xAD, 0xB7, 0x2A, 0x17, 0x60, 0xFC, 0xDB, 0x07, 0xB1, 0x01,
+0xC9, 0xF8, 0x02, 0x5F, 0xF3, 0x5B, 0x5B, 0x90, 0xD4, 0x96, 0x92, 0x99, 0x36, 0x22, 0x53, 0xEA,
+0x62, 0xAE, 0xB0, 0x22, 0x6A, 0xAB, 0x24, 0xCD, 0x19, 0xBB, 0x86, 0x54, 0x17, 0x0F, 0x9D, 0x1A,
+0x4A, 0x3D, 0xE4, 0xF0, 0x0D, 0x03, 0xF2, 0x9A, 0x6D, 0x70, 0xEE, 0xA5, 0x51, 0x5F, 0xE8, 0x74,
+0xC1, 0xAC, 0x4B, 0xC6, 0x1C, 0x58, 0x26, 0x8F, 0xBF, 0xE1, 0x1D, 0xDB, 0x2D, 0xCA, 0x7E, 0x56,
+0xB9, 0x5E, 0x28, 0x4D, 0x63, 0x21, 0xDA, 0x20, 0xC5, 0xBB, 0xE3, 0x23, 0x92, 0x90, 0xB3, 0x2D,
+0xCE, 0x5B, 0x97, 0xF1, 0x66, 0x4A, 0x1D, 0xD0, 0xA4, 0x9E, 0x72, 0xD5, 0x3C, 0xC8, 0x7C, 0xCF,
+0x78, 0x1F, 0x5B, 0x34, 0x9B, 0xFF, 0x92, 0x71, 0xF5, 0x02, 0x0E, 0x01, 0xAC, 0x6A, 0x1E, 0xE0,
+0x2D, 0x15, 0x05, 0x40, 0x37, 0xF1, 0x7B, 0x24, 0xD8, 0x92, 0x5B, 0xE9, 0xEB, 0xD1, 0x7F, 0xC1,
+0xCE, 0x9C, 0xAA, 0x6A, 0x48, 0x38, 0x3A, 0xF5, 0x5A, 0x3F, 0x17, 0xFF, 0x45, 0x09, 0x1B, 0x40
+
+cipher_key =
+0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2, 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
+0xd0, 0xe7, 0x4b, 0xfb, 0x5d, 0xe5, 0x0c, 0xe7, 0x6f, 0x21, 0xb5, 0x52, 0x2a, 0xbb, 0xc7, 0xf7
+
+auth_key =
+0xaf, 0x96, 0x42, 0xf1, 0x8c, 0x50, 0xdc, 0x67, 0x1a, 0x43, 0x47, 0x62, 0xc7, 0x04, 0xab, 0x05,
+0xf5, 0x0c, 0xe7, 0xa2, 0xa6, 0x23, 0xd5, 0x3d, 0x95, 0xd8, 0xcd, 0x86, 0x79, 0xf5, 0x01, 0x47,
+0x4f, 0xf9, 0x1d, 0x9d, 0x36, 0xf7, 0x68, 0x1a, 0x64, 0x44, 0x58, 0x5d, 0xe5, 0x81, 0x15, 0x2a,
+0x41, 0xe4, 0x0e, 0xaa, 0x1f, 0x04, 0x21, 0xff, 0x2c, 0xf3, 0x73, 0x2b, 0x48, 0x1e, 0xd2, 0xf7,
+0xf6, 0xd9, 0xaf, 0xbf, 0x08, 0x3b, 0xbb, 0x19, 0x5f, 0xf6, 0x7d, 0x25, 0x85, 0xdf, 0x6b, 0x54,
+0xd0, 0xe7, 0x4b, 0x9e, 0xc7, 0xef, 0xca, 0x48, 0x6f, 0x21, 0xd7, 0x51, 0xc8, 0x21, 0xc1, 0x15,
+0xe8, 0x38, 0x36, 0x58, 0x39, 0xd9, 0x9a, 0xc5, 0xe7, 0x3b, 0xc4, 0x47, 0xe2, 0xbd, 0x80, 0x73,
+0xf8, 0xd1, 0x9a, 0x5e, 0x4b, 0xfb, 0x52, 0x6b, 0x50, 0xaf, 0x8b, 0xb7, 0xb5, 0x2c, 0x52, 0x84
+
+iv =
+0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+
+####################
+# sha_hmac_buff_32 #
+####################
+[sha1_hmac_buff_32]
+digest =
+0x36, 0xCA, 0x49, 0x6A, 0xE3, 0x54, 0xD8, 0x4F, 0x0B, 0x76, 0xD8, 0xAA, 0x78, 0xEB, 0x9D, 0x65,
+0x2C, 0xCA, 0x1F, 0x97
+
+[sha224_hmac_buff_32]
+digest =
+0x48, 0xC1, 0x45, 0x25, 0x29, 0xA0, 0x8B, 0x88, 0x72, 0x7A, 0xBC, 0x00, 0x94, 0x37, 0xE1, 0x22,
+0xEB, 0xFA, 0x1B, 0x7D, 0x89, 0x81, 0x31, 0xC8, 0x64, 0x76, 0x55, 0xA4
+
+[sha256_hmac_buff_32]
+digest =
+0x1C, 0xB2, 0x3D, 0xD1, 0xF9, 0xC7, 0x6C, 0x49, 0x2E, 0xDA, 0x94, 0x8B, 0xF1, 0xCF, 0x96, 0x43,
+0x67, 0x50, 0x39, 0x76, 0xB5, 0xA1, 0xCE, 0xA1, 0xD7, 0x77, 0x10, 0x07, 0x43, 0x37, 0x05, 0xB4
+
+[sha384_hmac_buff_32]
+digest =
+0x6C, 0xBD, 0x1E, 0x2E, 0x75, 0xA7, 0x2C, 0x98, 0xC4, 0x1E, 0x03, 0x4E, 0x39, 0x4B, 0x27, 0x41,
+0xFB, 0xC6, 0x56, 0x87, 0x84, 0xEB, 0xFA, 0xB1, 0x20, 0x1F, 0x11, 0x81, 0x8D, 0xDC, 0xB6, 0xA7,
+0xAD, 0x1F, 0xAC, 0xA9, 0x43, 0x1D, 0x2B, 0xEB, 0x5F, 0x27, 0xC6, 0x0F, 0x9E, 0xFB, 0x1E, 0xB1
+
+[sha512_hmac_buff_32]
+digest =
+0xA4, 0x60, 0x7E, 0xBE, 0x5F, 0x47, 0x58, 0x3B, 0x41, 0x5F, 0x29, 0xDF, 0xE4, 0xD2, 0xFB, 0x30,
+0xF0, 0x2B, 0x09, 0x4E, 0x09, 0x50, 0xEC, 0x1C, 0x0E, 0x34, 0x79, 0xAE, 0xD8, 0x6D, 0xAC, 0xB6,
+0x9B, 0x7C, 0xB9, 0x06, 0xC2, 0x4A, 0x4E, 0x22, 0x14, 0x4D, 0x42, 0x46, 0x20, 0xE0, 0x6C, 0xEE,
+0x2F, 0xE1, 0x23, 0xA2, 0x7A, 0x2F, 0xDB, 0xAF, 0x78, 0x75, 0x56, 0xF7, 0x3A, 0x5E, 0x74, 0xEF
+
+####################
+# sha_hmac_buff_64 #
+####################
+[sha1_hmac_buff_64]
+digest =
+0xFC, 0x17, 0x7E, 0x0E, 0x52, 0x94, 0xE3, 0x27, 0xC0, 0x9B, 0x72, 0xAD, 0xC0, 0x5B, 0xCF, 0xFF,
+0x65, 0x88, 0x43, 0xE7
+
+[sha224_hmac_buff_64]
+digest =
+0xD7, 0x55, 0x25, 0xC0, 0x26, 0xDD, 0x8E, 0x14, 0x17, 0x8B, 0x89, 0x59, 0x8A, 0xBB, 0xEA, 0xD6,
+0x7D, 0x85, 0x00, 0x9F, 0xC2, 0x8A, 0xCB, 0x01, 0x7F, 0x8C, 0x6E, 0x24
+
+[sha256_hmac_buff_64]
+digest =
+0x8F, 0x4B, 0x3B, 0x4C, 0x58, 0x25, 0x3B, 0x07, 0xEB, 0xF8, 0x20, 0x81, 0xD9, 0xD9, 0x92, 0x8F,
+0xF4, 0x32, 0x7C, 0x2A, 0xD9, 0xEC, 0x92, 0x60, 0x8F, 0xE3, 0x90, 0x7F, 0xC5, 0x75, 0x05, 0xB6
+
+[sha384_hmac_buff_64]
+digest =
+0xD1, 0xC7, 0x64, 0x27, 0xF0, 0x30, 0x43, 0x8E, 0xD6, 0xA6, 0x78, 0xF7, 0xE9, 0xCC, 0x8E, 0x69,
+0x6D, 0xB8, 0x3E, 0xFA, 0xA0, 0x81, 0x9C, 0x61, 0x78, 0x72, 0xF0, 0x1C, 0x29, 0x35, 0x51, 0x3E,
+0x4A, 0x95, 0xDE, 0x2C, 0x6A, 0x3F, 0x56, 0xA8, 0x12, 0xBA, 0x44, 0x39, 0x1E, 0xDB, 0xF7, 0xF5
+
+[sha512_hmac_buff_64]
+digest =
+0xE6, 0xE9, 0xD8, 0x1D, 0x90, 0xAE, 0x32, 0x0E, 0xBA, 0x55, 0x58, 0xD5, 0x55, 0x97, 0x40, 0xB3,
+0xE9, 0x12, 0xD3, 0x08, 0xEF, 0x21, 0xED, 0xA5, 0x94, 0x8D, 0xF2, 0x4C, 0x52, 0x2C, 0x50, 0xB2,
+0xD2, 0xEC, 0xB7, 0xE1, 0x95, 0x2D, 0x68, 0xDB, 0xAD, 0xB5, 0x94, 0x50, 0x67, 0xF3, 0x0A, 0x83,
+0x54, 0x03, 0x33, 0x1C, 0xD5, 0x42, 0x7D, 0xB4, 0x3E, 0x69, 0x7C, 0x36, 0x7E, 0x96, 0x0D, 0x3E
+
+#####################
+# sha_hmac_buff_128 #
+#####################
+[sha1_hmac_buff_128]
+digest =
+0xAA, 0x90, 0x55, 0xA5, 0x71, 0xC4, 0x2B, 0xA3, 0x02, 0xAA, 0xB1, 0x1C, 0xB3, 0x88, 0x38, 0x6E,
+0xAD, 0x26, 0x98, 0xA7
+
+[sha224_hmac_buff_128]
+digest =
+0xBE, 0xCC, 0x83, 0x48, 0x4C, 0x58, 0xF9, 0x86, 0xFA, 0x93, 0x5F, 0xD1, 0x3C, 0x11, 0x8A, 0x37,
+0xA6, 0xEE, 0x52, 0x4D, 0xA3, 0x98, 0x3E, 0x35, 0xF1, 0x4F, 0xD9, 0xDB
+
+[sha256_hmac_buff_128]
+digest =
+0xE2, 0x9C, 0xE1, 0xDF, 0xCD, 0xAE, 0x50, 0x4B, 0x9A, 0xA6, 0x41, 0xAC, 0x0C, 0xF1, 0x66, 0xED,
+0xA1, 0x22, 0x05, 0x72, 0x49, 0x97, 0xA1, 0x30, 0xB8, 0xF9, 0xED, 0x36, 0x0A, 0x19, 0xE4, 0x2A
+
+[sha384_hmac_buff_128]
+digest =
+0xD9, 0x3C, 0xEB, 0xF4, 0x20, 0xC6, 0x4F, 0xC7, 0xBD, 0x34, 0xBA, 0xFD, 0x7C, 0xA9, 0xCE, 0xFF,
+0x26, 0x2E, 0xB4, 0x4A, 0xB7, 0x47, 0x71, 0x2C, 0x9E, 0xCF, 0x44, 0x0B, 0xD9, 0xAF, 0x8D, 0x17,
+0x0A, 0x3A, 0x02, 0xD0, 0xE9, 0xDF, 0xCF, 0x52, 0x5F, 0xDA, 0xA7, 0xB6, 0x51, 0x7C, 0x59, 0x09
+
+[sha512_hmac_buff_128]
+digest =
+0xAD, 0x7E, 0xB7, 0x33, 0xFB, 0x8A, 0x17, 0xD0, 0x3C, 0xB0, 0x80, 0x19, 0xF3, 0x9A, 0x6F, 0x90,
+0xDE, 0xF3, 0x53, 0xEA, 0x48, 0x75, 0x0A, 0x1E, 0x49, 0x02, 0xA0, 0x94, 0xC4, 0xE8, 0xFB, 0x87,
+0x83, 0x80, 0xD3, 0xFF, 0x6B, 0x79, 0x73, 0x54, 0xF9, 0x2F, 0x2D, 0x59, 0x69, 0x0E, 0x50, 0x29,
+0x2A, 0xDA, 0x59, 0x38, 0xDD, 0x62, 0xF9, 0x1A, 0x18, 0xA9, 0x51, 0x5A, 0xFE, 0x8E, 0xFD, 0xBF
+
+#####################
+# sha_hmac_buff_256 #
+#####################
+[sha1_hmac_buff_256]
+digest =
+0xB1, 0x18, 0x31, 0xBF, 0xEE, 0x81, 0x7E, 0xFC, 0x68, 0xDA, 0xB6, 0x8A, 0x5D, 0xDE, 0x39, 0x65,
+0xC8, 0xF8, 0xC3, 0xE5
+
+[sha224_hmac_buff_256]
+digest =
+0xCD, 0xF6, 0xC2, 0x6D, 0xFD, 0x33, 0x1A, 0xD8, 0x2F, 0x07, 0x4F, 0x1A, 0xE8, 0x18, 0xBD, 0x04,
+0xB1, 0xE5, 0x8D, 0xC1, 0x21, 0x95, 0x87, 0x75, 0xC2, 0x27, 0x4B, 0xF2
+
+[sha256_hmac_buff_256]
+digest =
+0xC0, 0xFA, 0x8F, 0x6F, 0x55, 0xFC, 0xF3, 0xDF, 0x8E, 0x5D, 0x93, 0x5E, 0x6B, 0x20, 0x0A, 0x9A,
+0x84, 0x3D, 0xCD, 0x4B, 0x57, 0x63, 0x2D, 0x93, 0x51, 0x45, 0xF2, 0x1E, 0xC7, 0xA4, 0xD4, 0x69
+
+[sha384_hmac_buff_256]
+digest =
+0x2B, 0x92, 0x9E, 0x85, 0x5A, 0x89, 0xB5, 0x12, 0x4A, 0x9B, 0x2D, 0xD2, 0xB2, 0x3E, 0xAB, 0xC1,
+0x1E, 0x7F, 0x53, 0xD9, 0x88, 0xEB, 0xEE, 0xA2, 0x49, 0x14, 0xDE, 0x1A, 0x9E, 0x20, 0xCE, 0xEC,
+0x7A, 0x5D, 0x25, 0xD8, 0x8F, 0xFE, 0x8B, 0xB1, 0xB1, 0x04, 0x5F, 0x46, 0x2D, 0x34, 0x2D, 0x72
+
+[sha512_hmac_buff_256]
+digest =
+0x4F, 0x96, 0x89, 0x9E, 0x9D, 0x53, 0xAC, 0x05, 0xC7, 0xA0, 0x0F, 0x4D, 0xB6, 0x3E, 0x06, 0x03,
+0x19, 0x68, 0x41, 0x4F, 0x11, 0x57, 0x77, 0x21, 0xBD, 0x60, 0x3E, 0xB4, 0xFE, 0x6A, 0x0D, 0xBF,
+0xE0, 0x4F, 0x32, 0x5B, 0xF9, 0xDF, 0x13, 0xBD, 0x02, 0x73, 0xD4, 0x0C, 0xE9, 0x9D, 0xB7, 0xD5,
+0x38, 0xA0, 0x20, 0xD9, 0xD1, 0x66, 0x17, 0x19, 0x54, 0x36, 0x18, 0xE1, 0xF5, 0x34, 0x12, 0x9E
+
+#####################
+# sha_hmac_buff_512 #
+#####################
+[sha1_hmac_buff_512]
+digest =
+0x78, 0x14, 0x01, 0xED, 0x93, 0x6F, 0x22, 0xB6, 0x96, 0x5A, 0x32, 0x05, 0xA9, 0xD3, 0x49, 0x04,
+0x55, 0xB0, 0x00, 0x06
+
+[sha224_hmac_buff_512]
+digest =
+0x25, 0xD4, 0x8F, 0x92, 0xE1, 0xD0, 0x4E, 0x3F, 0x34, 0x38, 0x01, 0xB8, 0xFE, 0x57, 0x3D, 0x34,
+0x39, 0x98, 0x82, 0x8D, 0x68, 0x04, 0x5A, 0x74, 0x28, 0x4F, 0x18, 0xCE
+
+[sha256_hmac_buff_512]
+digest =
+0x90, 0x06, 0x97, 0x8A, 0x7A, 0xEF, 0x62, 0x14, 0x4C, 0x14, 0xAA, 0x25, 0x4C, 0xE3, 0x5D, 0xE4,
+0xAD, 0x6C, 0xD6, 0x82, 0x2B, 0x87, 0x53, 0x3E, 0xE9, 0xE4, 0x97, 0x82, 0x82, 0x76, 0xE7, 0xF1
+
+[sha384_hmac_buff_512]
+digest =
+0xD5, 0xDA, 0x7C, 0x8A, 0x0D, 0x1B, 0xBE, 0x3E, 0x25, 0x1E, 0x6C, 0xA4, 0x50, 0x32, 0x92, 0x13,
+0x91, 0x4F, 0xA2, 0x29, 0x2A, 0x0A, 0x57, 0x62, 0x3D, 0x93, 0xF2, 0x45, 0x96, 0x22, 0xF8, 0x0D,
+0xA9, 0xE9, 0xAB, 0xAC, 0x7B, 0x2E, 0x42, 0xC2, 0x3E, 0x75, 0x23, 0xD0, 0xD2, 0xAA, 0x1E, 0xEE
+
+[sha512_hmac_buff_512]
+digest =
+0x57, 0x34, 0x65, 0x3D, 0xDE, 0x8B, 0x7B, 0x99, 0x62, 0x24, 0xF3, 0xAF, 0xA6, 0xB1, 0xF0, 0x01,
+0x23, 0xD4, 0x94, 0xC2, 0x06, 0x70, 0xA5, 0x8C, 0x80, 0x93, 0x49, 0x88, 0xB4, 0xB6, 0x45, 0xE5,
+0x37, 0xC9, 0xE4, 0xA1, 0xAB, 0x6C, 0xA5, 0x23, 0xD2, 0x07, 0x9B, 0x10, 0x4D, 0xFD, 0x75, 0xC0,
+0x28, 0xA1, 0x8A, 0x84, 0x03, 0x35, 0x22, 0xCC, 0xAC, 0x6C, 0x97, 0x93, 0x57, 0x08, 0x48, 0x51
+
+######################
+# sha_hmac_buff_1024 #
+######################
+[sha1_hmac_buff_1024]
+digest =
+0x74, 0xF7, 0x91, 0x04, 0x06, 0xDB, 0xA9, 0xF0, 0x08, 0x0E, 0x93, 0xCE, 0x55, 0xA8, 0x54, 0xF0,
+0x4B, 0x5E, 0x3F, 0xC7
+
+[sha224_hmac_buff_1024]
+digest =
+0x45, 0xDA, 0x2E, 0x83, 0xBD, 0x35, 0xA4, 0x58, 0x14, 0x74, 0xCB, 0xA4, 0x48, 0xA6, 0xBA, 0xDC,
+0x7D, 0x56, 0x6A, 0x44, 0xA7, 0xE9, 0x2F, 0x75, 0x20, 0x47, 0x2A, 0x5A
+
+[sha256_hmac_buff_1024]
+digest =
+0xA2, 0x81, 0xFE, 0x1A, 0x5C, 0x4F, 0x02, 0x72, 0xEF, 0x4F, 0xC6, 0xEE, 0x54, 0x71, 0x69, 0xAF,
+0x5C, 0x71, 0x9F, 0xB0, 0xAC, 0x5B, 0x7F, 0x51, 0xD6, 0x0D, 0x64, 0xD2, 0x2E, 0x0E, 0x30, 0x55
+
+[sha384_hmac_buff_1024]
+digest =
+0x26, 0x44, 0x13, 0x01, 0x25, 0x6E, 0xC7, 0xC3, 0x88, 0x25, 0xD5, 0xDD, 0x1D, 0xCA, 0x0C, 0xB1,
+0xB8, 0x82, 0xB2, 0xB8, 0x15, 0x3F, 0xE5, 0x54, 0x43, 0x47, 0x32, 0x3B, 0xB2, 0xE0, 0xC8, 0x58,
+0x64, 0xE7, 0x78, 0xC9, 0x1F, 0x81, 0x7B, 0xBD, 0x0D, 0x6D, 0x37, 0x9C, 0x01, 0x20, 0x6A, 0x8E
+
+[sha512_hmac_buff_1024]
+digest =
+0xBE, 0xDA, 0x0D, 0x3B, 0x47, 0x24, 0xBA, 0x45, 0xBA, 0xCA, 0x84, 0x5F, 0xEA, 0xAC, 0x33, 0x84,
+0x00, 0x62, 0xA5, 0x29, 0xB6, 0x2F, 0xB7, 0x86, 0xD0, 0x94, 0xFF, 0xFF, 0xE4, 0x1E, 0x5C, 0xFD,
+0xC8, 0xD5, 0x3A, 0xD4, 0xFC, 0xA6, 0x1C, 0x66, 0x4A, 0x6D, 0xF9, 0x2B, 0x1D, 0x7F, 0xA0, 0xCF,
+0x3D, 0x0F, 0x1F, 0x5B, 0xDD, 0x21, 0x12, 0xA8, 0x76, 0xB0, 0xD7, 0x30, 0x66, 0xA6, 0xA0, 0x6C
+
+######################
+# sha_hmac_buff_2048 #
+######################
+[sha1_hmac_buff_2048]
+digest =
+0x99, 0x32, 0xCD, 0xC3, 0xC9, 0x7F, 0x98, 0x1A, 0x96, 0xF6, 0x52, 0xC8, 0xA2, 0x16, 0x9C, 0x29,
+0x9D, 0x6E, 0x96, 0xF5
+
+[sha224_hmac_buff_2048]
+digest =
+0x1A, 0xC4, 0xDC, 0x46, 0xE5, 0x87, 0xFE, 0xE0, 0x47, 0x64, 0x53, 0xA3, 0x6A, 0x1F, 0x78, 0xC0,
+0xC0, 0x02, 0x03, 0x64, 0xB1, 0x55, 0x50, 0x66, 0xDA, 0xD6, 0x9E, 0xBC
+
+[sha256_hmac_buff_2048]
+digest =
+0xA6, 0xC6, 0x4B, 0x0C, 0x95, 0xDE, 0xD5, 0xE2, 0x40, 0x7D, 0x44, 0xC5, 0xBF, 0x00, 0x5A, 0xFB,
+0x6F, 0x3F, 0x5E, 0x69, 0xB1, 0x32, 0x91, 0xAB, 0x6C, 0x90, 0x25, 0xAB, 0xD9, 0x1B, 0x8F, 0x80
+
+[sha384_hmac_buff_2048]
+digest =
+0x16, 0xF1, 0x1B, 0xC1, 0x22, 0xDB, 0x21, 0x90, 0x08, 0xE3, 0x42, 0x0C, 0x9A, 0xF1, 0x0F, 0xF8,
+0x7A, 0xE9, 0x50, 0x09, 0xC6, 0x0C, 0x71, 0x65, 0x3A, 0x40, 0xB3, 0x78, 0x11, 0xE8, 0xD2, 0xD4,
+0xB0, 0x6C, 0xA9, 0x6A, 0x0C, 0xCD, 0xE1, 0x70, 0x7E, 0x90, 0x86, 0x34, 0xC1, 0x08, 0x9E, 0xFC
+
+[sha512_hmac_buff_2048]
+digest =
+0xDF, 0x7F, 0xC3, 0x26, 0x3E, 0x55, 0x80, 0x7D, 0x02, 0x06, 0x5A, 0x4B, 0x8C, 0xFD, 0x2F, 0x33,
+0xF8, 0x0E, 0x9C, 0x59, 0xAE, 0x56, 0xAE, 0x86, 0xA9, 0x25, 0x3F, 0xB7, 0xF7, 0x4C, 0x7A, 0xB4,
+0xD8, 0x0E, 0x43, 0xC0, 0x86, 0xDF, 0xDB, 0xBA, 0xA8, 0xCB, 0x46, 0x2A, 0x92, 0x34, 0xF5, 0x3B,
+0xBD, 0x59, 0x64, 0xDF, 0x30, 0x20, 0xF5, 0x13, 0xD7, 0x78, 0xB9, 0x27, 0xE6, 0xB6, 0x56, 0x19
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
new file mode 100644
index 00000000..9ec2a4b4
--- /dev/null
+++ b/app/test-crypto-perf/main.c
@@ -0,0 +1,444 @@
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_eal.h>
+#include <rte_cryptodev.h>
+
+#include "cperf.h"
+#include "cperf_options.h"
+#include "cperf_test_vector_parsing.h"
+#include "cperf_test_throughput.h"
+#include "cperf_test_latency.h"
+#include "cperf_test_verify.h"
+
+const char *cperf_test_type_strs[] = {
+ [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
+ [CPERF_TEST_TYPE_LATENCY] = "latency",
+ [CPERF_TEST_TYPE_VERIFY] = "verify"
+};
+
+const char *cperf_op_type_strs[] = {
+ [CPERF_CIPHER_ONLY] = "cipher-only",
+ [CPERF_AUTH_ONLY] = "auth-only",
+ [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
+ [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
+ [CPERF_AEAD] = "aead"
+};
+
+const struct cperf_test cperf_testmap[] = {
+ [CPERF_TEST_TYPE_THROUGHPUT] = {
+ cperf_throughput_test_constructor,
+ cperf_throughput_test_runner,
+ cperf_throughput_test_destructor
+ },
+ [CPERF_TEST_TYPE_LATENCY] = {
+ cperf_latency_test_constructor,
+ cperf_latency_test_runner,
+ cperf_latency_test_destructor
+ },
+ [CPERF_TEST_TYPE_VERIFY] = {
+ cperf_verify_test_constructor,
+ cperf_verify_test_runner,
+ cperf_verify_test_destructor
+ }
+};
+
+static int
+cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs)
+{
+ uint8_t cdev_id, enabled_cdev_count = 0, nb_lcores;
+ int ret;
+
+ enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
+ enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
+ if (enabled_cdev_count == 0) {
+ printf("No crypto devices type %s available\n",
+ opts->device_type);
+ return -EINVAL;
+ }
+
+ nb_lcores = rte_lcore_count() - 1;
+
+ if (enabled_cdev_count > nb_lcores) {
+ printf("Number of capable crypto devices (%d) "
+ "has to be less or equal to number of slave "
+ "cores (%d)\n", enabled_cdev_count, nb_lcores);
+ return -EINVAL;
+ }
+
+ for (cdev_id = 0; cdev_id < enabled_cdev_count &&
+ cdev_id < RTE_CRYPTO_MAX_DEVS; cdev_id++) {
+
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = 1,
+ .socket_id = SOCKET_ID_ANY,
+ .session_mp = {
+ .nb_objs = 2048,
+ .cache_size = 64
+ }
+ };
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = 2048
+ };
+
+ ret = rte_cryptodev_configure(enabled_cdevs[cdev_id], &conf);
+ if (ret < 0) {
+ printf("Failed to configure cryptodev %u",
+ enabled_cdevs[cdev_id]);
+ return -EINVAL;
+ }
+
+ ret = rte_cryptodev_queue_pair_setup(enabled_cdevs[cdev_id], 0,
+ &qp_conf, SOCKET_ID_ANY);
+ if (ret < 0) {
+ printf("Failed to setup queue pair %u on "
+ "cryptodev %u", 0, cdev_id);
+ return -EINVAL;
+ }
+
+ ret = rte_cryptodev_start(enabled_cdevs[cdev_id]);
+ if (ret < 0) {
+ printf("Failed to start device %u: error %d\n",
+ enabled_cdevs[cdev_id], ret);
+ return -EPERM;
+ }
+ }
+
+ return enabled_cdev_count;
+}
+
+static int
+cperf_verify_devices_capabilities(struct cperf_options *opts,
+ uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
+{
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+ const struct rte_cryptodev_symmetric_capability *capability;
+
+ uint8_t i, cdev_id;
+ int ret;
+
+ for (i = 0; i < nb_cryptodevs; i++) {
+
+ cdev_id = enabled_cdevs[i];
+
+ if (opts->op_type == CPERF_AUTH_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap_idx.algo.auth = opts->auth_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_auth(
+ capability,
+ opts->auth_key_sz,
+ opts->auth_digest_sz,
+ opts->auth_aad_sz);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (opts->op_type == CPERF_CIPHER_ONLY ||
+ opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER ||
+ opts->op_type == CPERF_AEAD) {
+
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = opts->cipher_algo;
+
+ capability = rte_cryptodev_sym_capability_get(cdev_id,
+ &cap_idx);
+ if (capability == NULL)
+ return -1;
+
+ ret = rte_cryptodev_sym_capability_check_cipher(
+ capability,
+ opts->cipher_key_sz,
+ opts->cipher_iv_sz);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cperf_check_test_vector(struct cperf_options *opts,
+ struct cperf_test_vector *test_vec)
+{
+ if (opts->op_type == CPERF_CIPHER_ONLY) {
+ if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->ciphertext.data == NULL)
+ return -1;
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->iv.data == NULL)
+ return -1;
+ if (test_vec->iv.length != opts->cipher_iv_sz)
+ return -1;
+ if (test_vec->cipher_key.data == NULL)
+ return -1;
+ if (test_vec->cipher_key.length != opts->cipher_key_sz)
+ return -1;
+ }
+ } else if (opts->op_type == CPERF_AUTH_ONLY) {
+ if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->auth_key.data == NULL)
+ return -1;
+ if (test_vec->auth_key.length != opts->auth_key_sz)
+ return -1;
+ if (test_vec->digest.data == NULL)
+ return -1;
+ if (test_vec->digest.length < opts->auth_digest_sz)
+ return -1;
+ }
+
+ } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
+ opts->op_type == CPERF_AUTH_THEN_CIPHER) {
+ if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->ciphertext.data == NULL)
+ return -1;
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->iv.data == NULL)
+ return -1;
+ if (test_vec->iv.length != opts->cipher_iv_sz)
+ return -1;
+ if (test_vec->cipher_key.data == NULL)
+ return -1;
+ if (test_vec->cipher_key.length != opts->cipher_key_sz)
+ return -1;
+ }
+ if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
+ if (test_vec->auth_key.data == NULL)
+ return -1;
+ if (test_vec->auth_key.length != opts->auth_key_sz)
+ return -1;
+ if (test_vec->digest.data == NULL)
+ return -1;
+ if (test_vec->digest.length < opts->auth_digest_sz)
+ return -1;
+ }
+ } else if (opts->op_type == CPERF_AEAD) {
+ if (test_vec->plaintext.data == NULL)
+ return -1;
+ if (test_vec->plaintext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->ciphertext.data == NULL)
+ return -1;
+ if (test_vec->ciphertext.length < opts->max_buffer_size)
+ return -1;
+ if (test_vec->aad.data == NULL)
+ return -1;
+ if (test_vec->aad.length != opts->auth_aad_sz)
+ return -1;
+ if (test_vec->digest.data == NULL)
+ return -1;
+ if (test_vec->digest.length < opts->auth_digest_sz)
+ return -1;
+ }
+ return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+ struct cperf_options opts = {0};
+ struct cperf_test_vector *t_vec = NULL;
+ struct cperf_op_fns op_fns;
+
+ void *ctx[RTE_MAX_LCORE] = { };
+
+ int nb_cryptodevs = 0;
+ uint8_t cdev_id, i;
+ uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
+
+ uint8_t buffer_size_idx = 0;
+
+ int ret;
+ uint32_t lcore_id;
+
+ /* Initialise DPDK EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
+ argc -= ret;
+ argv += ret;
+
+ cperf_options_default(&opts);
+
+ ret = cperf_options_parse(&opts, argc, argv);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
+ goto err;
+ }
+
+ ret = cperf_options_check(&opts);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "Checking on or more user options failed\n");
+ goto err;
+ }
+
+ if (!opts.silent)
+ cperf_options_dump(&opts);
+
+ nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs);
+ if (nb_cryptodevs < 1) {
+ RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
+ "device type\n");
+ nb_cryptodevs = 0;
+ goto err;
+ }
+
+ ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
+ nb_cryptodevs);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Crypto device type does not support "
+ "capabilities requested\n");
+ goto err;
+ }
+
+ if (opts.test_file != NULL) {
+ t_vec = cperf_test_vector_get_from_file(&opts);
+ if (t_vec == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Failed to create test vector for"
+ " specified file\n");
+ goto err;
+ }
+
+ if (cperf_check_test_vector(&opts, t_vec)) {
+ RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
+ "\n");
+ goto err;
+ }
+ } else {
+ t_vec = cperf_test_vector_get_dummy(&opts);
+ if (t_vec == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Failed to create test vector for"
+ " specified algorithms\n");
+ goto err;
+ }
+ }
+
+ ret = cperf_get_op_functions(&opts, &op_fns);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Failed to find function ops set for "
+ "specified algorithms combination\n");
+ goto err;
+ }
+
+ if (!opts.silent)
+ show_test_vector(t_vec);
+
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ ctx[cdev_id] = cperf_testmap[opts.test].constructor(cdev_id, 0,
+ &opts, t_vec, &op_fns);
+ if (ctx[cdev_id] == NULL) {
+ RTE_LOG(ERR, USER1, "Test run constructor failed\n");
+ goto err;
+ }
+ i++;
+ }
+
+ /* Get first size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size = opts.min_buffer_size;
+ else
+ opts.test_buffer_size = opts.buffer_size_list[0];
+
+ while (opts.test_buffer_size <= opts.max_buffer_size) {
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ rte_eal_remote_launch(cperf_testmap[opts.test].runner,
+ ctx[cdev_id], lcore_id);
+ i++;
+ }
+ rte_eal_mp_wait_lcore();
+
+ /* Get next size from range or list */
+ if (opts.inc_buffer_size != 0)
+ opts.test_buffer_size += opts.inc_buffer_size;
+ else {
+ if (++buffer_size_idx == opts.buffer_size_count)
+ break;
+ opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
+ }
+ }
+
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ i++;
+ }
+
+ free_test_vector(t_vec, &opts);
+
+ printf("\n");
+ return EXIT_SUCCESS;
+
+err:
+ i = 0;
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (i == nb_cryptodevs)
+ break;
+
+ cdev_id = enabled_cdevs[i];
+
+ if (ctx[cdev_id] && cperf_testmap[opts.test].destructor)
+ cperf_testmap[opts.test].destructor(ctx[cdev_id]);
+ i++;
+ }
+
+ free_test_vector(t_vec, &opts);
+
+ printf("\n");
+ return EXIT_FAILURE;
+}
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index 92c0c1b0..35ecee9f 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -47,6 +47,7 @@ CFLAGS += $(WERROR_FLAGS)
SRCS-y := testpmd.c
SRCS-y += parameters.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_flow.c
SRCS-y += config.c
SRCS-y += iofwd.c
SRCS-y += macfwd.c
@@ -59,13 +60,26 @@ SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
+LDLIBS += -lrte_pmd_bond
endif
-CFLAGS_cmdline.o := -D_GNU_SOURCE
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
+LDLIBS += -lrte_pmd_i40e
+endif
-# this application needs libraries first
-DEPDIRS-y += lib drivers
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_XENVIRT),y)
+LDLIBS += -lrte_pmd_xenvirt
+endif
+
+endif
+
+CFLAGS_cmdline.o := -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 315a252e..0afac68c 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -75,6 +75,7 @@
#include <rte_string_fns.h>
#include <rte_devargs.h>
#include <rte_eth_ctrl.h>
+#include <rte_flow.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
@@ -90,6 +91,9 @@
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+#include <rte_pmd_i40e.h>
+#endif
#include "testpmd.h"
static struct cmdline *testpmd_cl;
@@ -127,7 +131,7 @@ cmdline_parse_token_string_t cmd_help_brief_help =
cmdline_parse_inst_t cmd_help_brief = {
.f = cmd_help_brief_parsed,
.data = NULL,
- .help_str = "show help",
+ .help_str = "help: Show help",
.tokens = {
(void *)&cmd_help_brief_help,
NULL,
@@ -182,7 +186,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"Display:\n"
"--------\n\n"
- "show port (info|stats|xstats|fdir|stat_qmap|dcb_tc) (port_id|all)\n"
+ "show port (info|stats|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)\n"
" Display information for port_id, or all.\n\n"
"show port X rss reta (size) (mask0,mask1,...)\n"
@@ -210,6 +214,15 @@ static void cmd_help_long_parsed(void *parsed_result,
"read txd (port_id) (queue_id) (txd_id)\n"
" Display a TX descriptor of a port TX queue.\n\n"
+
+ "ddp get list (port_id)\n"
+ " Get ddp profile info list\n\n"
+
+ "show vf stats (port_id) (vf_id)\n"
+ " Display a VF's statistics.\n\n"
+
+ "clear vf stats (port_id) (vf_id)\n"
+ " Reset a VF's statistics.\n\n"
);
}
@@ -262,27 +275,43 @@ static void cmd_help_long_parsed(void *parsed_result,
"set portlist (x[,y]*)\n"
" Set the list of forwarding ports.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set tx loopback (port_id) (on|off)\n"
" Enable or disable tx loopback.\n\n"
+#ifdef RTE_LIBRTE_IXGBE_PMD
"set all queues drop (port_id) (on|off)\n"
" Set drop enable bit for all queues.\n\n"
"set vf split drop (port_id) (vf_id) (on|off)\n"
" Set split drop enable bit for a VF from the PF.\n\n"
+#endif
"set vf mac antispoof (port_id) (vf_id) (on|off).\n"
" Set MAC antispoof for a VF from the PF.\n\n"
+
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ "set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n"
+ " Enable MACsec offload.\n\n"
+
+ "set macsec offload (port_id) off\n"
+ " Disable MACsec offload.\n\n"
+
+ "set macsec sc (tx|rx) (port_id) (mac) (pi)\n"
+ " Configure MACsec secure connection (SC).\n\n"
+
+ "set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n"
+ " Configure MACsec secure association (SA).\n\n"
#endif
+ "set vf broadcast (port_id) (vf_id) (on|off)\n"
+ " Set VF broadcast for a VF from the PF.\n\n"
+
"vlan set strip (on|off) (port_id)\n"
" Set the VLAN strip on a port.\n\n"
"vlan set stripq (on|off) (port_id,queue_id)\n"
" Set the VLAN strip for a queue on a port.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set vf vlan stripq (port_id) (vf_id) (on|off)\n"
" Set the VLAN strip for all queues in a pool for a VF from the PF.\n\n"
@@ -291,7 +320,24 @@ static void cmd_help_long_parsed(void *parsed_result,
"set vf vlan antispoof (port_id) (vf_id) (on|off)\n"
" Set VLAN antispoof for a VF from the PF.\n\n"
-#endif
+
+ "set vf vlan tag (port_id) (vf_id) (on|off)\n"
+ " Set VLAN tag for a VF from the PF.\n\n"
+
+ "set vf tx max-bandwidth (port_id) (vf_id) (bandwidth)\n"
+ " Set a VF's max bandwidth(Mbps).\n\n"
+
+ "set vf tc tx min-bandwidth (port_id) (vf_id) (bw1, bw2, ...)\n"
+ " Set all TCs' min bandwidth(%%) on a VF.\n\n"
+
+ "set vf tc tx max-bandwidth (port_id) (vf_id) (tc_no) (bandwidth)\n"
+ " Set a TC's max bandwidth(Mbps) on a VF.\n\n"
+
+ "set tx strict-link-priority (port_id) (tc_bitmap)\n"
+ " Set some TCs' strict link priority mode on a physical port.\n\n"
+
+ "set tc tx min-bandwidth (port_id) (bw1, bw2, ...)\n"
+ " Set all TCs' min bandwidth(%%) for all PF and VFs.\n\n"
"vlan set filter (on|off) (port_id)\n"
" Set the VLAN filter on a port.\n\n"
@@ -383,13 +429,14 @@ static void cmd_help_long_parsed(void *parsed_result,
"mac_addr remove (port_id) (XX:XX:XX:XX:XX:XX)\n"
" Remove a MAC address from port_id.\n\n"
+ "mac_addr set (port_id) (XX:XX:XX:XX:XX:XX)\n"
+ " Set the default MAC address for port_id.\n\n"
+
"mac_addr add port (port_id) vf (vf_id) (mac_address)\n"
" Add a MAC address for a VF on the port.\n\n"
-#ifdef RTE_LIBRTE_IXGBE_PMD
"set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n"
" Set the MAC address for a VF from the PF.\n\n"
-#endif
"set port (port_id) uta (mac_address|all) (on|off)\n"
" Add/Remove a or all unicast hash filter(s)"
@@ -401,6 +448,12 @@ static void cmd_help_long_parsed(void *parsed_result,
"set allmulti (port_id|all) (on|off)\n"
" Set the allmulti mode on port_id, or all.\n\n"
+ "set vf promisc (port_id) (vf_id) (on|off)\n"
+ " Set unicast promiscuous mode for a VF from the PF.\n\n"
+
+ "set vf allmulti (port_id) (vf_id) (on|off)\n"
+ " Set multicast promiscuous mode for a VF from the PF.\n\n"
+
"set flow_ctrl rx (on|off) tx (on|off) (high_water)"
" (low_water) (pause_time) (send_xon) mac_ctrl_frame_fwd"
" (on|off) autoneg (on|off) (port_id)\n"
@@ -549,6 +602,21 @@ static void cmd_help_long_parsed(void *parsed_result,
"E-tag set filter del e-tag-id (value) port (port_id)\n"
" Delete an E-tag forwarding filter on a port\n\n"
+ "ddp add (port_id) (profile_path)\n"
+ " Load a profile package on a port\n\n"
+
+ "ptype mapping get (port_id) (valid_only)\n"
+ " Get ptype mapping on a port\n\n"
+
+ "ptype mapping replace (port_id) (target) (mask) (pky_type)\n"
+ " Replace target with the pkt_type in ptype mapping\n\n"
+
+ "ptype mapping reset (port_id)\n"
+ " Reset ptype mapping on a port\n\n"
+
+ "ptype mapping update (port_id) (hw_ptype) (sw_ptype)\n"
+ " Update a ptype mapping item on a port\n\n"
+
, list_pkt_forwarding_modes()
);
}
@@ -809,6 +877,33 @@ static void cmd_help_long_parsed(void *parsed_result,
"sctp-src-port|sctp-dst-port|sctp-veri-tag|none)"
" (select|add)\n"
" Set the input set for FDir.\n\n"
+
+ "flow validate {port_id}"
+ " [group {group_id}] [priority {level}]"
+ " [ingress] [egress]"
+ " pattern {item} [/ {item} [...]] / end"
+ " actions {action} [/ {action} [...]] / end\n"
+ " Check whether a flow rule can be created.\n\n"
+
+ "flow create {port_id}"
+ " [group {group_id}] [priority {level}]"
+ " [ingress] [egress]"
+ " pattern {item} [/ {item} [...]] / end"
+ " actions {action} [/ {action} [...]] / end\n"
+ " Create a flow rule.\n\n"
+
+ "flow destroy {port_id} rule {rule_id} [...]\n"
+ " Destroy specific flow rules.\n\n"
+
+ "flow flush {port_id}\n"
+ " Destroy all flow rules.\n\n"
+
+ "flow query {port_id} {rule_id} {action}\n"
+ " Query an existing flow rule.\n\n"
+
+ "flow list {port_id} [group {group_id}] [...]\n"
+ " List existing flow rules sorted by priority,"
+ " filtered by group identifiers.\n\n"
);
}
}
@@ -824,7 +919,8 @@ cmdline_parse_token_string_t cmd_help_long_section =
cmdline_parse_inst_t cmd_help_long = {
.f = cmd_help_long_parsed,
.data = NULL,
- .help_str = "show help",
+ .help_str = "help all|control|display|config|ports|register|filters: "
+ "Show help",
.tokens = {
(void *)&cmd_help_long_help,
(void *)&cmd_help_long_section,
@@ -868,7 +964,7 @@ cmdline_parse_token_string_t cmd_operate_port_all_all =
cmdline_parse_inst_t cmd_operate_port = {
.f = cmd_operate_port_parsed,
.data = NULL,
- .help_str = "port start|stop|close all: start/stop/close all ports",
+ .help_str = "port start|stop|close all: Start/Stop/Close all ports",
.tokens = {
(void *)&cmd_operate_port_all_cmd,
(void *)&cmd_operate_port_all_port,
@@ -913,7 +1009,7 @@ cmdline_parse_token_num_t cmd_operate_specific_port_id =
cmdline_parse_inst_t cmd_operate_specific_port = {
.f = cmd_operate_specific_port_parsed,
.data = NULL,
- .help_str = "port start|stop|close X: start/stop/close port X",
+ .help_str = "port start|stop|close <port_id>: Start/Stop/Close port_id",
.tokens = {
(void *)&cmd_operate_specific_port_cmd,
(void *)&cmd_operate_specific_port_port,
@@ -954,8 +1050,8 @@ cmdline_parse_token_string_t cmd_operate_attach_port_identifier =
cmdline_parse_inst_t cmd_operate_attach_port = {
.f = cmd_operate_attach_port_parsed,
.data = NULL,
- .help_str = "port attach identifier, "
- "identifier: pci address or virtual dev name",
+ .help_str = "port attach <identifier>: "
+ "(identifier: pci address or virtual dev name)",
.tokens = {
(void *)&cmd_operate_attach_port_port,
(void *)&cmd_operate_attach_port_keyword,
@@ -996,7 +1092,7 @@ cmdline_parse_token_num_t cmd_operate_detach_port_port_id =
cmdline_parse_inst_t cmd_operate_detach_port = {
.f = cmd_operate_detach_port_parsed,
.data = NULL,
- .help_str = "port detach port_id",
+ .help_str = "port detach <port_id>",
.tokens = {
(void *)&cmd_operate_detach_port_port,
(void *)&cmd_operate_detach_port_keyword,
@@ -1085,7 +1181,7 @@ cmd_config_speed_all_parsed(void *parsed_result,
&link_speed) < 0)
return;
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
ports[pid].dev_conf.link_speeds = link_speed;
}
@@ -1189,7 +1285,7 @@ cmdline_parse_token_string_t cmd_config_speed_specific_value2 =
cmdline_parse_inst_t cmd_config_speed_specific = {
.f = cmd_config_speed_specific_parsed,
.data = NULL,
- .help_str = "port config X speed "
+ .help_str = "port config <port_id> speed "
"10|100|1000|10000|25000|40000|50000|100000|auto duplex "
"half|full|auto",
.tokens = {
@@ -1279,7 +1375,7 @@ cmdline_parse_token_num_t cmd_config_rx_tx_value =
cmdline_parse_inst_t cmd_config_rx_tx = {
.f = cmd_config_rx_tx_parsed,
.data = NULL,
- .help_str = "port config all rxq|txq|rxd|txd value",
+ .help_str = "port config all rxq|txq|rxd|txd <value>",
.tokens = {
(void *)&cmd_config_rx_tx_port,
(void *)&cmd_config_rx_tx_keyword,
@@ -1354,7 +1450,7 @@ cmdline_parse_token_num_t cmd_config_max_pkt_len_value =
cmdline_parse_inst_t cmd_config_max_pkt_len = {
.f = cmd_config_max_pkt_len_parsed,
.data = NULL,
- .help_str = "port config all max-pkt-len value",
+ .help_str = "port config all max-pkt-len <value>",
.tokens = {
(void *)&cmd_config_max_pkt_len_port,
(void *)&cmd_config_max_pkt_len_keyword,
@@ -1405,7 +1501,7 @@ cmdline_parse_token_num_t cmd_config_mtu_value =
cmdline_parse_inst_t cmd_config_mtu = {
.f = cmd_config_mtu_parsed,
.data = NULL,
- .help_str = "port config mtu port_id value",
+ .help_str = "port config mtu <port_id> <value>",
.tokens = {
(void *)&cmd_config_mtu_port,
(void *)&cmd_config_mtu_keyword,
@@ -1625,7 +1721,8 @@ cmdline_parse_token_string_t cmd_config_rss_value =
cmdline_parse_inst_t cmd_config_rss = {
.f = cmd_config_rss_parsed,
.data = NULL,
- .help_str = "port config all rss all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none",
+ .help_str = "port config all rss "
+ "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none",
.tokens = {
(void *)&cmd_config_rss_port,
(void *)&cmd_config_rss_keyword,
@@ -1738,12 +1835,11 @@ cmdline_parse_token_string_t cmd_config_rss_hash_key_value =
cmdline_parse_inst_t cmd_config_rss_hash_key = {
.f = cmd_config_rss_hash_key_parsed,
.data = NULL,
- .help_str =
- "port config X rss-hash-key ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
- "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
- "ipv6-sctp|ipv6-other|l2-payload|"
- "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex "
- "<string of hexa digits (variable length, NIC dependent)>\n",
+ .help_str = "port config <port_id> rss-hash-key "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex "
+ "<string of hex digits (variable length, NIC dependent)>",
.tokens = {
(void *)&cmd_config_rss_hash_key_port,
(void *)&cmd_config_rss_hash_key_config,
@@ -1838,7 +1934,7 @@ cmdline_parse_token_string_t cmd_config_rxtx_queue_opname =
cmdline_parse_inst_t cmd_config_rxtx_queue = {
.f = cmd_config_rxtx_queue_parsed,
.data = NULL,
- .help_str = "port X rxq|txq ID start|stop",
+ .help_str = "port <port_id> rxq|txq <queue_id> start|stop",
.tokens = {
(void *)&cmd_config_speed_all_port,
(void *)&cmd_config_rxtx_queue_portid,
@@ -1973,7 +2069,7 @@ cmdline_parse_token_string_t cmd_config_rss_reta_list_of_items =
cmdline_parse_inst_t cmd_config_rss_reta = {
.f = cmd_set_rss_reta_parsed,
.data = NULL,
- .help_str = "port config X rss reta (hash,queue)[,(hash,queue)]",
+ .help_str = "port config <port_id> rss reta <hash,queue[,hash,queue]*>",
.tokens = {
(void *)&cmd_config_rss_reta_port,
(void *)&cmd_config_rss_reta_keyword,
@@ -2006,7 +2102,9 @@ showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf,
char s[256];
char *end;
char *str_fld[8];
- uint16_t i, num = nb_entries / RTE_RETA_GROUP_SIZE;
+ uint16_t i;
+ uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) /
+ RTE_RETA_GROUP_SIZE;
int ret;
p = strchr(p0, '(');
@@ -2080,7 +2178,7 @@ cmdline_parse_token_string_t cmd_showport_reta_list_of_items =
cmdline_parse_inst_t cmd_showport_reta = {
.f = cmd_showport_reta_parsed,
.data = NULL,
- .help_str = "show port X rss reta (size) (mask0,mask1,...)",
+ .help_str = "show port <port_id> rss reta <size> <mask0[,mask1]*>",
.tokens = {
(void *)&cmd_showport_reta_show,
(void *)&cmd_showport_reta_port,
@@ -2134,11 +2232,10 @@ cmdline_parse_token_string_t cmd_showport_rss_hash_rss_key =
cmdline_parse_inst_t cmd_showport_rss_hash = {
.f = cmd_showport_rss_hash_parsed,
.data = NULL,
- .help_str =
- "show port X rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
- "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
- "ipv6-sctp|ipv6-other|l2-payload|"
- "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex (X = port number)\n",
+ .help_str = "show port <port_id> rss-hash "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex",
.tokens = {
(void *)&cmd_showport_rss_hash_show,
(void *)&cmd_showport_rss_hash_port,
@@ -2152,11 +2249,10 @@ cmdline_parse_inst_t cmd_showport_rss_hash = {
cmdline_parse_inst_t cmd_showport_rss_hash_key = {
.f = cmd_showport_rss_hash_parsed,
.data = (void *)1,
- .help_str =
- "show port X rss-hash ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|"
- "ipv4-sctp|ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|"
- "ipv6-sctp|ipv6-other|l2-payload|"
- "ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex key (X = port number)\n",
+ .help_str = "show port <port_id> rss-hash "
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex key",
.tokens = {
(void *)&cmd_showport_rss_hash_show,
(void *)&cmd_showport_rss_hash_port,
@@ -2253,10 +2349,10 @@ cmdline_parse_token_string_t cmd_config_dcb_pfc_en =
TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, pfc_en, "on#off");
cmdline_parse_inst_t cmd_config_dcb = {
- .f = cmd_config_dcb_parsed,
- .data = NULL,
- .help_str = "port config port-id dcb vt on|off nb-tcs pfc on|off",
- .tokens = {
+ .f = cmd_config_dcb_parsed,
+ .data = NULL,
+ .help_str = "port config <port-id> dcb vt on|off <num_tcs> pfc on|off",
+ .tokens = {
(void *)&cmd_config_dcb_port,
(void *)&cmd_config_dcb_config,
(void *)&cmd_config_dcb_port_id,
@@ -2321,7 +2417,7 @@ cmdline_parse_token_num_t cmd_config_burst_value =
cmdline_parse_inst_t cmd_config_burst = {
.f = cmd_config_burst_parsed,
.data = NULL,
- .help_str = "port config all burst value",
+ .help_str = "port config all burst <value>",
.tokens = {
(void *)&cmd_config_burst_port,
(void *)&cmd_config_burst_keyword,
@@ -2390,7 +2486,7 @@ cmdline_parse_token_num_t cmd_config_thresh_value =
cmdline_parse_inst_t cmd_config_thresh = {
.f = cmd_config_thresh_parsed,
.data = NULL,
- .help_str = "port config all txpt|txht|txwt|rxpt|rxht|rxwt value",
+ .help_str = "port config all txpt|txht|txwt|rxpt|rxht|rxwt <value>",
.tokens = {
(void *)&cmd_config_thresh_port,
(void *)&cmd_config_thresh_keyword,
@@ -2454,7 +2550,7 @@ cmdline_parse_token_num_t cmd_config_threshold_value =
cmdline_parse_inst_t cmd_config_threshold = {
.f = cmd_config_threshold_parsed,
.data = NULL,
- .help_str = "port config all txfreet|txrst|rxfreet value",
+ .help_str = "port config all txfreet|txrst|rxfreet <value>",
.tokens = {
(void *)&cmd_config_threshold_port,
(void *)&cmd_config_threshold_keyword,
@@ -2483,7 +2579,7 @@ cmdline_parse_token_string_t cmd_stop_stop =
cmdline_parse_inst_t cmd_stop = {
.f = cmd_stop_parsed,
.data = NULL,
- .help_str = "stop - stop packet forwarding",
+ .help_str = "stop: Stop packet forwarding",
.tokens = {
(void *)&cmd_stop_stop,
NULL,
@@ -2613,7 +2709,7 @@ cmdline_parse_token_string_t cmd_set_list_of_items =
cmdline_parse_inst_t cmd_set_fwd_list = {
.f = cmd_set_list_parsed,
.data = NULL,
- .help_str = "set corelist|portlist x[,y]*",
+ .help_str = "set corelist|portlist <list0[,list1]*>",
.tokens = {
(void *)&cmd_set_list_keyword,
(void *)&cmd_set_list_name,
@@ -2660,7 +2756,7 @@ cmdline_parse_token_num_t cmd_setmask_value =
cmdline_parse_inst_t cmd_set_fwd_mask = {
.f = cmd_set_mask_parsed,
.data = NULL,
- .help_str = "set coremask|portmask hexadecimal value",
+ .help_str = "set coremask|portmask <hexadecimal value>",
.tokens = {
(void *)&cmd_setmask_set,
(void *)&cmd_setmask_mask,
@@ -2706,7 +2802,7 @@ cmdline_parse_token_num_t cmd_set_value =
cmdline_parse_inst_t cmd_set_numbers = {
.f = cmd_set_parsed,
.data = NULL,
- .help_str = "set nbport|nbcore|burst|verbose value",
+ .help_str = "set nbport|nbcore|burst|verbose <value>",
.tokens = {
(void *)&cmd_set_set,
(void *)&cmd_set_what,
@@ -2752,7 +2848,7 @@ cmdline_parse_token_string_t cmd_set_txpkts_lengths =
cmdline_parse_inst_t cmd_set_txpkts = {
.f = cmd_set_txpkts_parsed,
.data = NULL,
- .help_str = "set txpkts x[,y]*",
+ .help_str = "set txpkts <len0[,len1]*>",
.tokens = {
(void *)&cmd_set_txpkts_keyword,
(void *)&cmd_set_txpkts_name,
@@ -2859,7 +2955,7 @@ cmdline_parse_token_num_t cmd_config_txqflags_value =
cmdline_parse_inst_t cmd_config_txqflags = {
.f = cmd_config_txqflags_parsed,
.data = NULL,
- .help_str = "port config all txqflags value",
+ .help_str = "port config all txqflags <value>",
.tokens = {
(void *)&cmd_config_txqflags_port,
(void *)&cmd_config_txqflags_config,
@@ -2907,8 +3003,9 @@ cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid =
cmdline_parse_inst_t cmd_rx_vlan_filter_all = {
.f = cmd_rx_vlan_filter_all_parsed,
.data = NULL,
- .help_str = "add/remove all identifiers to/from the set of VLAN "
- "Identifiers filtered by a port",
+ .help_str = "rx_vlan add|rm all <port_id>: "
+ "Add/Remove all identifiers to/from the set of VLAN "
+ "identifiers filtered by a port",
.tokens = {
(void *)&cmd_rx_vlan_filter_all_rx_vlan,
(void *)&cmd_rx_vlan_filter_all_what,
@@ -3007,8 +3104,9 @@ cmdline_parse_token_string_t cmd_vlan_offload_portid =
cmdline_parse_inst_t cmd_vlan_offload = {
.f = cmd_vlan_offload_parsed,
.data = NULL,
- .help_str = "set strip|filter|qinq|stripq on|off port_id[,queue_id], filter/strip for rx side"
- " qinq(extended) for both rx/tx sides ",
+ .help_str = "vlan set strip|filter|qinq|stripq on|off "
+ "<port_id[,queue_id]>: "
+ "Filter/Strip for rx side qinq(extended) for both rx/tx sides",
.tokens = {
(void *)&cmd_vlan_offload_vlan,
(void *)&cmd_vlan_offload_set,
@@ -3070,8 +3168,8 @@ cmdline_parse_token_num_t cmd_vlan_tpid_portid =
cmdline_parse_inst_t cmd_vlan_tpid = {
.f = cmd_vlan_tpid_parsed,
.data = NULL,
- .help_str = "set inner|outer tpid tp_id port_id, set the VLAN "
- "Ether type",
+ .help_str = "vlan set inner|outer tpid <tp_id> <port_id>: "
+ "Set the VLAN Ether type",
.tokens = {
(void *)&cmd_vlan_tpid_vlan,
(void *)&cmd_vlan_tpid_set,
@@ -3120,8 +3218,9 @@ cmdline_parse_token_num_t cmd_rx_vlan_filter_portid =
cmdline_parse_inst_t cmd_rx_vlan_filter = {
.f = cmd_rx_vlan_filter_parsed,
.data = NULL,
- .help_str = "add/remove a VLAN identifier to/from the set of VLAN "
- "Identifiers filtered by a port",
+ .help_str = "rx_vlan add|rm <vlan_id> <port_id>: "
+ "Add/Remove a VLAN identifier to/from the set of VLAN "
+ "identifiers filtered by a port",
.tokens = {
(void *)&cmd_rx_vlan_filter_rx_vlan,
(void *)&cmd_rx_vlan_filter_what,
@@ -3165,7 +3264,8 @@ cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid =
cmdline_parse_inst_t cmd_tx_vlan_set = {
.f = cmd_tx_vlan_set_parsed,
.data = NULL,
- .help_str = "enable hardware insertion of a single VLAN header "
+ .help_str = "tx_vlan set <port_id> <vlan_id>: "
+ "Enable hardware insertion of a single VLAN header "
"with a given TAG Identifier in packets sent on a port",
.tokens = {
(void *)&cmd_tx_vlan_set_tx_vlan,
@@ -3214,7 +3314,8 @@ cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid_outer =
cmdline_parse_inst_t cmd_tx_vlan_set_qinq = {
.f = cmd_tx_vlan_set_qinq_parsed,
.data = NULL,
- .help_str = "enable hardware insertion of double VLAN header "
+ .help_str = "tx_vlan set <port_id> <vlan_id> <outer_vlan_id>: "
+ "Enable hardware insertion of double VLAN header "
"with given TAG Identifiers in packets sent on a port",
.tokens = {
(void *)&cmd_tx_vlan_set_qinq_tx_vlan,
@@ -3271,7 +3372,7 @@ cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_mode =
cmdline_parse_inst_t cmd_tx_vlan_set_pvid = {
.f = cmd_tx_vlan_set_pvid_parsed,
.data = NULL,
- .help_str = "tx_vlan set pvid port_id vlan_id (on|off)",
+ .help_str = "tx_vlan set pvid <port_id> <vlan_id> on|off",
.tokens = {
(void *)&cmd_tx_vlan_set_pvid_tx_vlan,
(void *)&cmd_tx_vlan_set_pvid_set,
@@ -3313,8 +3414,8 @@ cmdline_parse_token_num_t cmd_tx_vlan_reset_portid =
cmdline_parse_inst_t cmd_tx_vlan_reset = {
.f = cmd_tx_vlan_reset_parsed,
.data = NULL,
- .help_str = "disable hardware insertion of a VLAN header in packets "
- "sent on a port",
+ .help_str = "tx_vlan reset <port_id>: Disable hardware insertion of a "
+ "VLAN header in packets sent on a port",
.tokens = {
(void *)&cmd_tx_vlan_reset_tx_vlan,
(void *)&cmd_tx_vlan_reset_reset,
@@ -3440,8 +3541,9 @@ cmdline_parse_token_num_t cmd_csum_portid =
cmdline_parse_inst_t cmd_csum_set = {
.f = cmd_csum_parsed,
.data = NULL,
- .help_str = "enable/disable hardware calculation of L3/L4 checksum when "
- "using csum forward engine: csum set ip|tcp|udp|sctp|outer-ip hw|sw <port>",
+ .help_str = "csum set ip|tcp|udp|sctp|outer-ip hw|sw <port_id>: "
+ "Enable/Disable hardware calculation of L3/L4 checksum when "
+ "using csum forward engine",
.tokens = {
(void *)&cmd_csum_csum,
(void *)&cmd_csum_mode,
@@ -3459,7 +3561,7 @@ cmdline_parse_token_string_t cmd_csum_mode_show =
cmdline_parse_inst_t cmd_csum_show = {
.f = cmd_csum_parsed,
.data = NULL,
- .help_str = "show checksum offload configuration: csum show <port>",
+ .help_str = "csum show <port_id>: Show checksum offload configuration",
.tokens = {
(void *)&cmd_csum_csum,
(void *)&cmd_csum_mode_show,
@@ -3512,8 +3614,8 @@ cmdline_parse_token_num_t cmd_csum_tunnel_portid =
cmdline_parse_inst_t cmd_csum_tunnel = {
.f = cmd_csum_tunnel_parsed,
.data = NULL,
- .help_str = "enable/disable parsing of tunnels for csum engine: "
- "csum parse_tunnel on|off <tx-port>",
+ .help_str = "csum parse_tunnel on|off <port_id>: "
+ "Enable/Disable parsing of tunnels for csum engine",
.tokens = {
(void *)&cmd_csum_tunnel_csum,
(void *)&cmd_csum_tunnel_parse,
@@ -3576,8 +3678,9 @@ cmdline_parse_token_num_t cmd_tso_set_portid =
cmdline_parse_inst_t cmd_tso_set = {
.f = cmd_tso_set_parsed,
.data = NULL,
- .help_str = "Set TSO segment size of non-tunneled packets "
- "for csum engine (0 to disable): tso set <tso_segsz> <port>",
+ .help_str = "tso set <tso_segsz> <port_id>: "
+ "Set TSO segment size of non-tunneled packets for csum engine "
+ "(0 to disable)",
.tokens = {
(void *)&cmd_tso_set_tso,
(void *)&cmd_tso_set_mode,
@@ -3595,8 +3698,8 @@ cmdline_parse_token_string_t cmd_tso_show_mode =
cmdline_parse_inst_t cmd_tso_show = {
.f = cmd_tso_set_parsed,
.data = NULL,
- .help_str = "Show TSO segment size of non-tunneled packets "
- "for csum engine: tso show <port>",
+ .help_str = "tso show <port_id>: "
+ "Show TSO segment size of non-tunneled packets for csum engine",
.tokens = {
(void *)&cmd_tso_set_tso,
(void *)&cmd_tso_show_mode,
@@ -3692,8 +3795,9 @@ cmdline_parse_token_num_t cmd_tunnel_tso_set_portid =
cmdline_parse_inst_t cmd_tunnel_tso_set = {
.f = cmd_tunnel_tso_set_parsed,
.data = NULL,
- .help_str = "Set TSO segment size of tunneled packets for csum engine "
- "(0 to disable): tunnel_tso set <tso_segsz> <port>",
+ .help_str = "tunnel_tso set <tso_segsz> <port_id>: "
+ "Set TSO segment size of tunneled packets for csum engine "
+ "(0 to disable)",
.tokens = {
(void *)&cmd_tunnel_tso_set_tso,
(void *)&cmd_tunnel_tso_set_mode,
@@ -3711,8 +3815,8 @@ cmdline_parse_token_string_t cmd_tunnel_tso_show_mode =
cmdline_parse_inst_t cmd_tunnel_tso_show = {
.f = cmd_tunnel_tso_set_parsed,
.data = NULL,
- .help_str = "Show TSO segment size of tunneled packets "
- "for csum engine: tunnel_tso show <port>",
+ .help_str = "tunnel_tso show <port_id> "
+ "Show TSO segment size of tunneled packets for csum engine",
.tokens = {
(void *)&cmd_tunnel_tso_set_tso,
(void *)&cmd_tunnel_tso_show_mode,
@@ -3750,7 +3854,7 @@ cmdline_parse_token_string_t cmd_setflushrx_mode =
cmdline_parse_inst_t cmd_set_flush_rx = {
.f = cmd_set_flush_rx_parsed,
- .help_str = "set flush_rx on|off: enable/disable flush on rx streams",
+ .help_str = "set flush_rx on|off: Enable/Disable flush on rx streams",
.data = NULL,
.tokens = {
(void *)&cmd_setflushrx_set,
@@ -3789,7 +3893,7 @@ cmdline_parse_token_string_t cmd_setlinkcheck_mode =
cmdline_parse_inst_t cmd_set_link_check = {
.f = cmd_set_link_check_parsed,
- .help_str = "set link_check on|off: enable/disable link status check "
+ .help_str = "set link_check on|off: Enable/Disable link status check "
"when starting/stopping a port",
.data = NULL,
.tokens = {
@@ -3850,7 +3954,7 @@ cmdline_parse_token_num_t cmd_setbypass_mode_port =
cmdline_parse_inst_t cmd_set_bypass_mode = {
.f = cmd_set_bypass_mode_parsed,
- .help_str = "set bypass mode (normal|bypass|isolate) (port_id): "
+ .help_str = "set bypass mode normal|bypass|isolate <port_id>: "
"Set the NIC bypass mode for port_id",
.data = NULL,
.tokens = {
@@ -3950,9 +4054,9 @@ cmdline_parse_token_num_t cmd_setbypass_event_port =
cmdline_parse_inst_t cmd_set_bypass_event = {
.f = cmd_set_bypass_event_parsed,
- .help_str = "set bypass event (timeout|os_on|os_off|power_on|power_off) "
- "mode (normal|bypass|isolate) (port_id): "
- "Set the NIC bypass event mode for port_id",
+ .help_str = "set bypass event none|timeout|os_on|os_off|power_on|"
+ "power_off mode normal|bypass|isolate <port_id>: "
+ "Set the NIC bypass event mode for port_id",
.data = NULL,
.tokens = {
(void *)&cmd_setbypass_event_set,
@@ -4015,8 +4119,8 @@ cmdline_parse_token_string_t cmd_setbypass_timeout_value =
cmdline_parse_inst_t cmd_set_bypass_timeout = {
.f = cmd_set_bypass_timeout_parsed,
- .help_str = "set bypass timeout (0|1.5|2|3|4|8|16|32) seconds: "
- "Set the NIC bypass watchdog timeout",
+ .help_str = "set bypass timeout 0|1.5|2|3|4|8|16|32: "
+ "Set the NIC bypass watchdog timeout in seconds",
.data = NULL,
.tokens = {
(void *)&cmd_setbypass_timeout_set,
@@ -4109,7 +4213,7 @@ cmdline_parse_token_num_t cmd_showbypass_config_port =
cmdline_parse_inst_t cmd_show_bypass_config = {
.f = cmd_show_bypass_config_parsed,
- .help_str = "show bypass config (port_id): "
+ .help_str = "show bypass config <port_id>: "
"Show the NIC bypass config for port_id",
.data = NULL,
.tokens = {
@@ -4162,7 +4266,8 @@ TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result,
cmdline_parse_inst_t cmd_set_bonding_mode = {
.f = cmd_set_bonding_mode_parsed,
- .help_str = "set bonding mode (mode_value) (port_id): Set the bonding mode for port_id",
+ .help_str = "set bonding mode <mode_value> <port_id>: "
+ "Set the bonding mode for port_id",
.data = NULL,
.tokens = {
(void *) &cmd_setbonding_mode_set,
@@ -4227,7 +4332,9 @@ TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result,
cmdline_parse_inst_t cmd_set_balance_xmit_policy = {
.f = cmd_set_bonding_balance_xmit_policy_parsed,
- .help_str = "set bonding balance_xmit_policy (port_id) (policy_value): Set the bonding balance_xmit_policy for port_id",
+ .help_str = "set bonding balance_xmit_policy <port_id> "
+ "l2|l23|l34: "
+ "Set the bonding balance_xmit_policy for port_id",
.data = NULL,
.tokens = {
(void *)&cmd_setbonding_balance_xmit_policy_set,
@@ -4353,7 +4460,8 @@ TOKEN_NUM_INITIALIZER(struct cmd_show_bonding_config_result,
cmdline_parse_inst_t cmd_show_bonding_config = {
.f = cmd_show_bonding_config_parsed,
- .help_str = "show bonding config (port_id): Show the bonding config for port_id",
+ .help_str = "show bonding config <port_id>: "
+ "Show the bonding config for port_id",
.data = NULL,
.tokens = {
(void *)&cmd_showbonding_config_show,
@@ -4408,7 +4516,8 @@ TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result,
cmdline_parse_inst_t cmd_set_bonding_primary = {
.f = cmd_set_bonding_primary_parsed,
- .help_str = "set bonding primary (slave_id) (port_id): Set the primary slave for port_id",
+ .help_str = "set bonding primary <slave_id> <port_id>: "
+ "Set the primary slave for port_id",
.data = NULL,
.tokens = {
(void *)&cmd_setbonding_primary_set,
@@ -4465,7 +4574,8 @@ TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result,
cmdline_parse_inst_t cmd_add_bonding_slave = {
.f = cmd_add_bonding_slave_parsed,
- .help_str = "add bonding slave (slave_id) (port_id): Add a slave device to a bonded device",
+ .help_str = "add bonding slave <slave_id> <port_id>: "
+ "Add a slave device to a bonded device",
.data = NULL,
.tokens = {
(void *)&cmd_addbonding_slave_add,
@@ -4522,7 +4632,8 @@ cmdline_parse_token_num_t cmd_removebonding_slave_port =
cmdline_parse_inst_t cmd_remove_bonding_slave = {
.f = cmd_remove_bonding_slave_parsed,
- .help_str = "remove bonding slave (slave_id) (port_id): Remove a slave device from a bonded device",
+ .help_str = "remove bonding slave <slave_id> <port_id>: "
+ "Remove a slave device from a bonded device",
.data = NULL,
.tokens = {
(void *)&cmd_removebonding_slave_remove,
@@ -4574,7 +4685,6 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
nb_ports = rte_eth_dev_count();
reconfig(port_id, res->socket);
rte_eth_promiscuous_enable(port_id);
- ports[port_id].enabled = 1;
}
}
@@ -4597,7 +4707,8 @@ cmdline_parse_token_num_t cmd_createbonded_device_socket =
cmdline_parse_inst_t cmd_create_bonded_device = {
.f = cmd_create_bonded_device_parsed,
- .help_str = "create bonded device (mode) (socket): Create a new bonded device with specific bonding mode and socket",
+ .help_str = "create bonded device <mode> <socket>: "
+ "Create a new bonded device with specific bonding mode and socket",
.data = NULL,
.tokens = {
(void *)&cmd_createbonded_device_create,
@@ -4651,7 +4762,7 @@ cmdline_parse_token_etheraddr_t cmd_set_bond_mac_addr_addr =
cmdline_parse_inst_t cmd_set_bond_mac_addr = {
.f = cmd_set_bond_mac_addr_parsed,
.data = (void *) 0,
- .help_str = "set bonding mac_addr (port_id) (address): ",
+ .help_str = "set bonding mac_addr <port_id> <mac_addr>",
.tokens = {
(void *)&cmd_set_bond_mac_addr_set,
(void *)&cmd_set_bond_mac_addr_bonding,
@@ -4710,7 +4821,7 @@ cmdline_parse_token_num_t cmd_set_bond_mon_period_period_ms =
cmdline_parse_inst_t cmd_set_bond_mon_period = {
.f = cmd_set_bond_mon_period_parsed,
.data = (void *) 0,
- .help_str = "set bonding mon_period (port_id) (period_ms): ",
+ .help_str = "set bonding mon_period <port_id> <period_ms>",
.tokens = {
(void *)&cmd_set_bond_mon_period_set,
(void *)&cmd_set_bond_mon_period_bonding,
@@ -4768,8 +4879,8 @@ static void cmd_set_fwd_mode_init(void)
cmdline_parse_token_string_t *token_struct;
modes = list_pkt_forwarding_modes();
- snprintf(help, sizeof help, "set fwd %s - "
- "set packet forwarding mode", modes);
+ snprintf(help, sizeof(help), "set fwd %s: "
+ "Set packet forwarding mode", modes);
cmd_set_fwd_mode.help_str = help;
/* string token separator is # */
@@ -4835,8 +4946,8 @@ static void cmd_set_fwd_retry_mode_init(void)
cmdline_parse_token_string_t *token_struct;
modes = list_pkt_forwarding_retry_modes();
- snprintf(help, sizeof(help), "set fwd %s retry - "
- "set packet forwarding mode with retry", modes);
+ snprintf(help, sizeof(help), "set fwd %s retry: "
+ "Set packet forwarding mode with retry", modes);
cmd_set_fwd_retry_mode.help_str = help;
/* string token separator is # */
@@ -4895,7 +5006,7 @@ cmdline_parse_token_num_t cmd_set_burst_tx_retry_retry_num =
cmdline_parse_inst_t cmd_set_burst_tx_retry = {
.f = cmd_set_burst_tx_retry_parsed,
- .help_str = "set burst tx delay (time_by_useconds) retry (retry_num)",
+ .help_str = "set burst tx delay <delay_usec> retry <num_retry>",
.tokens = {
(void *)&cmd_set_burst_tx_retry_set,
(void *)&cmd_set_burst_tx_retry_burst,
@@ -4932,7 +5043,7 @@ static void cmd_set_promisc_mode_parsed(void *parsed_result,
/* all ports */
if (allports) {
- FOREACH_PORT(i, ports) {
+ RTE_ETH_FOREACH_DEV(i) {
if (enable)
rte_eth_promiscuous_enable(i);
else
@@ -4965,7 +5076,7 @@ cmdline_parse_token_string_t cmd_setpromisc_mode =
cmdline_parse_inst_t cmd_set_promisc_mode_all = {
.f = cmd_set_promisc_mode_parsed,
.data = (void *)1,
- .help_str = "set promisc all on|off: set promisc mode for all ports",
+ .help_str = "set promisc all on|off: Set promisc mode for all ports",
.tokens = {
(void *)&cmd_setpromisc_set,
(void *)&cmd_setpromisc_promisc,
@@ -4978,7 +5089,7 @@ cmdline_parse_inst_t cmd_set_promisc_mode_all = {
cmdline_parse_inst_t cmd_set_promisc_mode_one = {
.f = cmd_set_promisc_mode_parsed,
.data = (void *)0,
- .help_str = "set promisc X on|off: set promisc mode on port X",
+ .help_str = "set promisc <port_id> on|off: Set promisc mode on port_id",
.tokens = {
(void *)&cmd_setpromisc_set,
(void *)&cmd_setpromisc_promisc,
@@ -5012,7 +5123,7 @@ static void cmd_set_allmulti_mode_parsed(void *parsed_result,
/* all ports */
if (allports) {
- FOREACH_PORT(i, ports) {
+ RTE_ETH_FOREACH_DEV(i) {
if (enable)
rte_eth_allmulticast_enable(i);
else
@@ -5045,7 +5156,7 @@ cmdline_parse_token_string_t cmd_setallmulti_mode =
cmdline_parse_inst_t cmd_set_allmulti_mode_all = {
.f = cmd_set_allmulti_mode_parsed,
.data = (void *)1,
- .help_str = "set allmulti all on|off: set allmulti mode for all ports",
+ .help_str = "set allmulti all on|off: Set allmulti mode for all ports",
.tokens = {
(void *)&cmd_setallmulti_set,
(void *)&cmd_setallmulti_allmulti,
@@ -5058,7 +5169,8 @@ cmdline_parse_inst_t cmd_set_allmulti_mode_all = {
cmdline_parse_inst_t cmd_set_allmulti_mode_one = {
.f = cmd_set_allmulti_mode_parsed,
.data = (void *)0,
- .help_str = "set allmulti X on|off: set allmulti mode on port X",
+ .help_str = "set allmulti <port_id> on|off: "
+ "Set allmulti mode on port_id",
.tokens = {
(void *)&cmd_setallmulti_set,
(void *)&cmd_setallmulti_allmulti,
@@ -5157,9 +5269,9 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result, struct cmdline *cl,
cmdline_parse_inst_t cmd_link_flow_control_set = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = NULL,
- .help_str = "Configure the Ethernet flow control: set flow_ctrl rx on|off \
-tx on|off high_water low_water pause_time send_xon mac_ctrl_frame_fwd on|off \
-autoneg on|off port_id",
+ .help_str = "set flow_ctrl rx on|off tx on|off <high_water> "
+ "<low_water> <pause_time> <send_xon> mac_ctrl_frame_fwd on|off "
+ "autoneg on|off <port_id>: Configure the Ethernet flow control",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5183,8 +5295,8 @@ autoneg on|off port_id",
cmdline_parse_inst_t cmd_link_flow_control_set_rx = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_rx,
- .help_str = "Change rx flow control parameter: set flow_ctrl "
- "rx on|off port_id",
+ .help_str = "set flow_ctrl rx on|off <port_id>: "
+ "Change rx flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5198,8 +5310,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_rx = {
cmdline_parse_inst_t cmd_link_flow_control_set_tx = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_tx,
- .help_str = "Change tx flow control parameter: set flow_ctrl "
- "tx on|off port_id",
+ .help_str = "set flow_ctrl tx on|off <port_id>: "
+ "Change tx flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5213,8 +5325,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_tx = {
cmdline_parse_inst_t cmd_link_flow_control_set_hw = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_hw,
- .help_str = "Change high water flow control parameter: set flow_ctrl "
- "high_water value port_id",
+ .help_str = "set flow_ctrl high_water <value> <port_id>: "
+ "Change high water flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5228,8 +5340,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_hw = {
cmdline_parse_inst_t cmd_link_flow_control_set_lw = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_lw,
- .help_str = "Change low water flow control parameter: set flow_ctrl "
- "low_water value port_id",
+ .help_str = "set flow_ctrl low_water <value> <port_id>: "
+ "Change low water flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5243,8 +5355,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_lw = {
cmdline_parse_inst_t cmd_link_flow_control_set_pt = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_pt,
- .help_str = "Change pause time flow control parameter: set flow_ctrl "
- "pause_time value port_id",
+ .help_str = "set flow_ctrl pause_time <value> <port_id>: "
+ "Change pause time flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5258,8 +5370,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_pt = {
cmdline_parse_inst_t cmd_link_flow_control_set_xon = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_xon,
- .help_str = "Change send_xon flow control parameter: set flow_ctrl "
- "send_xon value port_id",
+ .help_str = "set flow_ctrl send_xon <value> <port_id>: "
+ "Change send_xon flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5273,8 +5385,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_xon = {
cmdline_parse_inst_t cmd_link_flow_control_set_macfwd = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_macfwd,
- .help_str = "Change mac ctrl fwd flow control parameter: set flow_ctrl "
- "mac_ctrl_frame_fwd on|off port_id",
+ .help_str = "set flow_ctrl mac_ctrl_frame_fwd on|off <port_id>: "
+ "Change mac ctrl fwd flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5288,8 +5400,8 @@ cmdline_parse_inst_t cmd_link_flow_control_set_macfwd = {
cmdline_parse_inst_t cmd_link_flow_control_set_autoneg = {
.f = cmd_link_flow_ctrl_set_parsed,
.data = (void *)&cmd_link_flow_control_set_autoneg,
- .help_str = "Change autoneg flow control parameter: set flow_ctrl "
- "autoneg on|off port_id",
+ .help_str = "set flow_ctrl autoneg on|off <port_id>: "
+ "Change autoneg flow control parameter",
.tokens = {
(void *)&cmd_lfc_set_set,
(void *)&cmd_lfc_set_flow_ctrl,
@@ -5374,7 +5486,7 @@ cmd_link_flow_ctrl_set_parsed(void *parsed_result,
printf("bad flow contrl parameter, return code = %d \n", ret);
}
-/* *** SETUP ETHERNET PIRORITY FLOW CONTROL *** */
+/* *** SETUP ETHERNET PRIORITY FLOW CONTROL *** */
struct cmd_priority_flow_ctrl_set_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t pfc_ctrl;
@@ -5459,8 +5571,9 @@ cmdline_parse_token_num_t cmd_pfc_set_portid =
cmdline_parse_inst_t cmd_priority_flow_control_set = {
.f = cmd_priority_flow_ctrl_set_parsed,
.data = NULL,
- .help_str = "Configure the Ethernet priority flow control: set pfc_ctrl rx on|off\n\
- tx on|off high_water low_water pause_time priority port_id",
+ .help_str = "set pfc_ctrl rx on|off tx on|off <high_water> <low_water> "
+ "<pause_time> <priority> <port_id>: "
+ "Configure the Ethernet priority flow control",
.tokens = {
(void *)&cmd_pfc_set_set,
(void *)&cmd_pfc_set_flow_ctrl,
@@ -5500,7 +5613,7 @@ cmdline_parse_token_string_t cmd_reset_def =
cmdline_parse_inst_t cmd_reset = {
.f = cmd_reset_parsed,
.data = NULL,
- .help_str = "set default: reset default forwarding configuration",
+ .help_str = "set default: Reset default forwarding configuration",
.tokens = {
(void *)&cmd_reset_set,
(void *)&cmd_reset_def,
@@ -5526,7 +5639,7 @@ static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
cmdline_parse_inst_t cmd_start = {
.f = cmd_start_parsed,
.data = NULL,
- .help_str = "start packet forwarding",
+ .help_str = "start: Start packet forwarding",
.tokens = {
(void *)&cmd_start_start,
NULL,
@@ -5557,7 +5670,8 @@ cmdline_parse_token_string_t cmd_start_tx_first_tx_first =
cmdline_parse_inst_t cmd_start_tx_first = {
.f = cmd_start_tx_first_parsed,
.data = NULL,
- .help_str = "start packet forwarding, after sending 1 burst of packets",
+ .help_str = "start tx_first: Start packet forwarding, "
+ "after sending 1 burst of packets",
.tokens = {
(void *)&cmd_start_tx_first_start,
(void *)&cmd_start_tx_first_tx_first,
@@ -5595,8 +5709,8 @@ cmdline_parse_token_num_t cmd_start_tx_first_n_tx_num =
cmdline_parse_inst_t cmd_start_tx_first_n = {
.f = cmd_start_tx_first_n_parsed,
.data = NULL,
- .help_str = "start packet forwarding, after sending <num> "
- "bursts of packets",
+ .help_str = "start tx_first <num>: "
+ "packet forwarding, after sending <num> bursts of packets",
.tokens = {
(void *)&cmd_start_tx_first_n_start,
(void *)&cmd_start_tx_first_n_tx_first,
@@ -5634,7 +5748,7 @@ static void cmd_set_link_up_parsed(__attribute__((unused)) void *parsed_result,
cmdline_parse_inst_t cmd_set_link_up = {
.f = cmd_set_link_up_parsed,
.data = NULL,
- .help_str = "set link-up port (port id)",
+ .help_str = "set link-up port <port id>",
.tokens = {
(void *)&cmd_set_link_up_set,
(void *)&cmd_set_link_up_link_up,
@@ -5674,7 +5788,7 @@ static void cmd_set_link_down_parsed(
cmdline_parse_inst_t cmd_set_link_down = {
.f = cmd_set_link_down_parsed,
.data = NULL,
- .help_str = "set link-down port (port id)",
+ .help_str = "set link-down port <port id>",
.tokens = {
(void *)&cmd_set_link_down_set,
(void *)&cmd_set_link_down_link_down,
@@ -5743,29 +5857,32 @@ static void cmd_showportall_parsed(void *parsed_result,
struct cmd_showportall_result *res = parsed_result;
if (!strcmp(res->show, "clear")) {
if (!strcmp(res->what, "stats"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
nic_stats_clear(i);
else if (!strcmp(res->what, "xstats"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
nic_xstats_clear(i);
} else if (!strcmp(res->what, "info"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
port_infos_display(i);
else if (!strcmp(res->what, "stats"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
nic_stats_display(i);
else if (!strcmp(res->what, "xstats"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
nic_xstats_display(i);
else if (!strcmp(res->what, "fdir"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
fdir_get_infos(i);
else if (!strcmp(res->what, "stat_qmap"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
nic_stats_mapping_display(i);
else if (!strcmp(res->what, "dcb_tc"))
- FOREACH_PORT(i, ports)
+ RTE_ETH_FOREACH_DEV(i)
port_dcb_info_display(i);
+ else if (!strcmp(res->what, "cap"))
+ RTE_ETH_FOREACH_DEV(i)
+ port_offload_cap_display(i);
}
cmdline_parse_token_string_t cmd_showportall_show =
@@ -5775,13 +5892,14 @@ cmdline_parse_token_string_t cmd_showportall_port =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port");
cmdline_parse_token_string_t cmd_showportall_what =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what,
- "info#stats#xstats#fdir#stat_qmap#dcb_tc");
+ "info#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
cmdline_parse_token_string_t cmd_showportall_all =
TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all");
cmdline_parse_inst_t cmd_showportall = {
.f = cmd_showportall_parsed,
.data = NULL,
- .help_str = "show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc all",
+ .help_str = "show|clear port "
+ "info|stats|xstats|fdir|stat_qmap|dcb_tc|cap all",
.tokens = {
(void *)&cmd_showportall_show,
(void *)&cmd_showportall_port,
@@ -5821,6 +5939,8 @@ static void cmd_showport_parsed(void *parsed_result,
nic_stats_mapping_display(res->portnum);
else if (!strcmp(res->what, "dcb_tc"))
port_dcb_info_display(res->portnum);
+ else if (!strcmp(res->what, "cap"))
+ port_offload_cap_display(res->portnum);
}
cmdline_parse_token_string_t cmd_showport_show =
@@ -5830,14 +5950,16 @@ cmdline_parse_token_string_t cmd_showport_port =
TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port");
cmdline_parse_token_string_t cmd_showport_what =
TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what,
- "info#stats#xstats#fdir#stat_qmap#dcb_tc");
+ "info#stats#xstats#fdir#stat_qmap#dcb_tc#cap");
cmdline_parse_token_num_t cmd_showport_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT8);
cmdline_parse_inst_t cmd_showport = {
.f = cmd_showport_parsed,
.data = NULL,
- .help_str = "show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc X (X = port number)",
+ .help_str = "show|clear port "
+ "info|stats|xstats|fdir|stat_qmap|dcb_tc|cap "
+ "<port_id>",
.tokens = {
(void *)&cmd_showport_show,
(void *)&cmd_showport_port,
@@ -5883,7 +6005,7 @@ cmdline_parse_token_num_t cmd_showqueue_queuenum =
cmdline_parse_inst_t cmd_showqueue = {
.f = cmd_showqueue_parsed,
.data = NULL,
- .help_str = "show rxq|txq info <port number> <queue_number>",
+ .help_str = "show rxq|txq info <port_id> <queue_id>",
.tokens = {
(void *)&cmd_showqueue_show,
(void *)&cmd_showqueue_type,
@@ -5923,7 +6045,7 @@ cmdline_parse_token_num_t cmd_read_reg_reg_off =
cmdline_parse_inst_t cmd_read_reg = {
.f = cmd_read_reg_parsed,
.data = NULL,
- .help_str = "read reg port_id reg_off",
+ .help_str = "read reg <port_id> <reg_off>",
.tokens = {
(void *)&cmd_read_reg_read,
(void *)&cmd_read_reg_reg,
@@ -5975,8 +6097,8 @@ cmdline_parse_token_num_t cmd_read_reg_bit_field_bit2_pos =
cmdline_parse_inst_t cmd_read_reg_bit_field = {
.f = cmd_read_reg_bit_field_parsed,
.data = NULL,
- .help_str = "read regfield port_id reg_off bit_x bit_y "
- "(read register bit field between bit_x and bit_y included)",
+ .help_str = "read regfield <port_id> <reg_off> <bit_x> <bit_y>: "
+ "Read register bit field between bit_x and bit_y included",
.tokens = {
(void *)&cmd_read_reg_bit_field_read,
(void *)&cmd_read_reg_bit_field_regfield,
@@ -6021,7 +6143,7 @@ cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos =
cmdline_parse_inst_t cmd_read_reg_bit = {
.f = cmd_read_reg_bit_parsed,
.data = NULL,
- .help_str = "read regbit port_id reg_off bit_x (0 <= bit_x <= 31)",
+ .help_str = "read regbit <port_id> <reg_off> <bit_x>: 0 <= bit_x <= 31",
.tokens = {
(void *)&cmd_read_reg_bit_read,
(void *)&cmd_read_reg_bit_regbit,
@@ -6064,7 +6186,7 @@ cmdline_parse_token_num_t cmd_write_reg_value =
cmdline_parse_inst_t cmd_write_reg = {
.f = cmd_write_reg_parsed,
.data = NULL,
- .help_str = "write reg port_id reg_off reg_value",
+ .help_str = "write reg <port_id> <reg_off> <reg_value>",
.tokens = {
(void *)&cmd_write_reg_write,
(void *)&cmd_write_reg_reg,
@@ -6121,8 +6243,9 @@ cmdline_parse_token_num_t cmd_write_reg_bit_field_value =
cmdline_parse_inst_t cmd_write_reg_bit_field = {
.f = cmd_write_reg_bit_field_parsed,
.data = NULL,
- .help_str = "write regfield port_id reg_off bit_x bit_y reg_value"
- "(set register bit field between bit_x and bit_y included)",
+ .help_str = "write regfield <port_id> <reg_off> <bit_x> <bit_y> "
+ "<reg_value>: "
+ "Set register bit field between bit_x and bit_y included",
.tokens = {
(void *)&cmd_write_reg_bit_field_write,
(void *)&cmd_write_reg_bit_field_regfield,
@@ -6172,7 +6295,8 @@ cmdline_parse_token_num_t cmd_write_reg_bit_value =
cmdline_parse_inst_t cmd_write_reg_bit = {
.f = cmd_write_reg_bit_parsed,
.data = NULL,
- .help_str = "write regbit port_id reg_off bit_x 0/1 (0 <= bit_x <= 31)",
+ .help_str = "write regbit <port_id> <reg_off> <bit_x> 0|1: "
+ "0 <= bit_x <= 31",
.tokens = {
(void *)&cmd_write_reg_bit_write,
(void *)&cmd_write_reg_bit_regbit,
@@ -6221,7 +6345,7 @@ cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id =
cmdline_parse_inst_t cmd_read_rxd_txd = {
.f = cmd_read_rxd_txd_parsed,
.data = NULL,
- .help_str = "read rxd|txd port_id queue_id rxd_id",
+ .help_str = "read rxd|txd <port_id> <queue_id> <desc_id>",
.tokens = {
(void *)&cmd_read_rxd_txd_read,
(void *)&cmd_read_rxd_txd_rxd_txd,
@@ -6251,7 +6375,7 @@ cmdline_parse_token_string_t cmd_quit_quit =
cmdline_parse_inst_t cmd_quit = {
.f = cmd_quit_parsed,
.data = NULL,
- .help_str = "exit application",
+ .help_str = "quit: Exit application",
.tokens = {
(void *)&cmd_quit_quit,
NULL,
@@ -6275,6 +6399,9 @@ static void cmd_mac_addr_parsed(void *parsed_result,
if (strcmp(res->what, "add") == 0)
ret = rte_eth_dev_mac_addr_add(res->port_num, &res->address, 0);
+ else if (strcmp(res->what, "set") == 0)
+ ret = rte_eth_dev_default_mac_addr_set(res->port_num,
+ &res->address);
else
ret = rte_eth_dev_mac_addr_remove(res->port_num, &res->address);
@@ -6289,7 +6416,7 @@ cmdline_parse_token_string_t cmd_mac_addr_cmd =
"mac_addr");
cmdline_parse_token_string_t cmd_mac_addr_what =
TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what,
- "add#remove");
+ "add#remove#set");
cmdline_parse_token_num_t cmd_mac_addr_portnum =
TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, UINT8);
cmdline_parse_token_etheraddr_t cmd_mac_addr_addr =
@@ -6298,8 +6425,8 @@ cmdline_parse_token_etheraddr_t cmd_mac_addr_addr =
cmdline_parse_inst_t cmd_mac_addr = {
.f = cmd_mac_addr_parsed,
.data = (void *)0,
- .help_str = "mac_addr add|remove X <address>: "
- "add/remove MAC address on port X",
+ .help_str = "mac_addr add|remove|set <port_id> <mac_addr>: "
+ "Add/Remove/Set MAC address on port_id",
.tokens = {
(void *)&cmd_mac_addr_cmd,
(void *)&cmd_mac_addr_what,
@@ -6353,7 +6480,8 @@ cmdline_parse_token_num_t cmd_setqmap_mapvalue =
cmdline_parse_inst_t cmd_set_qmap = {
.f = cmd_set_qmap_parsed,
.data = NULL,
- .help_str = "Set statistics mapping value on tx|rx queue_id of port_id",
+ .help_str = "set stat_qmap rx|tx <port_id> <queue_id> <map_value>: "
+ "Set statistics mapping value on tx|rx queue_id of port_id",
.tokens = {
(void *)&cmd_setqmap_set,
(void *)&cmd_setqmap_qmap,
@@ -6415,7 +6543,7 @@ cmdline_parse_token_string_t cmd_set_uc_hash_mode =
cmdline_parse_inst_t cmd_set_uc_hash_filter = {
.f = cmd_set_uc_hash_parsed,
.data = NULL,
- .help_str = "set port X uta Y on|off(X = port number,Y = MAC address)",
+ .help_str = "set port <port_id> uta <mac_addr> on|off)",
.tokens = {
(void *)&cmd_set_uc_hash_set,
(void *)&cmd_set_uc_hash_port,
@@ -6476,7 +6604,7 @@ cmdline_parse_token_string_t cmd_set_uc_all_hash_mode =
cmdline_parse_inst_t cmd_set_uc_all_hash_filter = {
.f = cmd_set_uc_all_hash_parsed,
.data = NULL,
- .help_str = "set port X uta all on|off (X = port number)",
+ .help_str = "set port <port_id> uta all on|off",
.tokens = {
(void *)&cmd_set_uc_all_hash_set,
(void *)&cmd_set_uc_all_hash_port,
@@ -6575,12 +6703,10 @@ cmdline_parse_token_string_t cmd_set_vf_macvlan_mode =
cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
.f = cmd_set_vf_macvlan_parsed,
.data = NULL,
- .help_str = "set port (portid) vf (vfid) (mac-addr) "
- "(exact-mac|exact-mac-vlan|hashmac|hashmac-vlan) "
- "on|off\n"
- "exact match rule:exact match of MAC or MAC and VLAN; "
- "hash match rule: hash match of MAC and exact match "
- "of VLAN",
+ .help_str = "set port <port_id> vf <vf_id> <mac_addr> "
+ "exact-mac|exact-mac-vlan|hashmac|hashmac-vlan on|off: "
+ "Exact match rule: exact match of MAC or MAC and VLAN; "
+ "hash match rule: hash match of MAC and exact match of VLAN",
.tokens = {
(void *)&cmd_set_vf_macvlan_set,
(void *)&cmd_set_vf_macvlan_port,
@@ -6594,6 +6720,7 @@ cmdline_parse_inst_t cmd_set_vf_macvlan_filter = {
},
};
+#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** CONFIGURE VF TRAFFIC CONTROL *** */
struct cmd_set_vf_traffic {
cmdline_fixed_string_t set;
@@ -6642,8 +6769,7 @@ cmdline_parse_token_string_t cmd_setvf_traffic_mode =
cmdline_parse_inst_t cmd_set_vf_traffic = {
.f = cmd_set_vf_traffic_parsed,
.data = NULL,
- .help_str = "set port X vf Y rx|tx on|off"
- "(X = port number,Y = vf id)",
+ .help_str = "set port <port_id> vf <vf_id> rx|tx on|off",
.tokens = {
(void *)&cmd_setvf_traffic_set,
(void *)&cmd_setvf_traffic_port,
@@ -6689,7 +6815,7 @@ cmd_set_vf_rxmode_parsed(void *parsed_result,
rx_mode |= ETH_VMDQ_ACCEPT_MULTICAST;
}
- ret = rte_eth_dev_set_vf_rxmode(res->port_id,res->vf_id,rx_mode,(uint8_t)is_on);
+ ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, rx_mode, (uint8_t)is_on);
if (ret < 0)
printf("bad VF receive mode parameter, return code = %d \n",
ret);
@@ -6723,7 +6849,8 @@ cmdline_parse_token_string_t cmd_set_vf_rxmode_on =
cmdline_parse_inst_t cmd_set_vf_rxmode = {
.f = cmd_set_vf_rxmode_parsed,
.data = NULL,
- .help_str = "set port X vf Y rxmode AUPE|ROPE|BAM|MPE on|off",
+ .help_str = "set port <port_id> vf <vf_id> rxmode "
+ "AUPE|ROPE|BAM|MPE on|off",
.tokens = {
(void *)&cmd_set_vf_rxmode_set,
(void *)&cmd_set_vf_rxmode_port,
@@ -6736,6 +6863,7 @@ cmdline_parse_inst_t cmd_set_vf_rxmode = {
NULL,
},
};
+#endif
/* *** ADD MAC ADDRESS FILTER FOR A VF OF A PORT *** */
struct cmd_vf_mac_addr_result {
@@ -6788,8 +6916,8 @@ cmdline_parse_token_etheraddr_t cmd_vf_mac_addr_addr =
cmdline_parse_inst_t cmd_vf_mac_addr_filter = {
.f = cmd_vf_mac_addr_parsed,
.data = (void *)0,
- .help_str = "mac_addr add port X vf Y ethaddr:(X = port number,"
- "Y = VF number)add MAC address filtering for a VF on port X",
+ .help_str = "mac_addr add port <port_id> vf <vf_id> <mac_addr>: "
+ "Add MAC address filtering for a VF on port_id",
.tokens = {
(void *)&cmd_vf_mac_addr_cmd,
(void *)&cmd_vf_mac_addr_what,
@@ -6819,11 +6947,37 @@ cmd_vf_rx_vlan_filter_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_vf_rx_vlan_filter *res = parsed_result;
+ int ret = -ENOTSUP;
- if (!strcmp(res->what, "add"))
- set_vf_rx_vlan(res->port_id, res->vlan_id,res->vf_mask, 1);
- else
- set_vf_rx_vlan(res->port_id, res->vlan_id,res->vf_mask, 0);
+ __rte_unused int is_add = (strcmp(res->what, "add") == 0) ? 1 : 0;
+
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_vlan_filter(res->port_id,
+ res->vlan_id, res->vf_mask, is_add);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_vlan_filter(res->port_id,
+ res->vlan_id, res->vf_mask, is_add);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vlan_id %d or vf_mask %"PRIu64"\n",
+ res->vlan_id, res->vf_mask);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented or supported\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
}
cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_rx_vlan =
@@ -6851,8 +7005,8 @@ cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_vf_mask =
cmdline_parse_inst_t cmd_vf_rxvlan_filter = {
.f = cmd_vf_rx_vlan_filter_parsed,
.data = NULL,
- .help_str = "rx_vlan add|rm X port Y vf Z (X = VLAN ID,"
- "Y = port number,Z = hexadecimal VF mask)",
+ .help_str = "rx_vlan add|rm <vlan_id> port <port_id> vf <vf_mask>: "
+ "(vf_mask = hexadecimal VF mask)",
.tokens = {
(void *)&cmd_vf_rx_vlan_filter_rx_vlan,
(void *)&cmd_vf_rx_vlan_filter_what,
@@ -6918,8 +7072,8 @@ cmdline_parse_token_num_t cmd_queue_rate_limit_ratenum =
cmdline_parse_inst_t cmd_queue_rate_limit = {
.f = cmd_queue_rate_limit_parsed,
.data = (void *)0,
- .help_str = "set port X queue Y rate Z:(X = port number,"
- "Y = queue number,Z = rate number)set rate limit for a queue on port X",
+ .help_str = "set port <port_id> queue <queue_id> rate <rate_value>: "
+ "Set rate limit for a queue on port_id",
.tokens = {
(void *)&cmd_queue_rate_limit_set,
(void *)&cmd_queue_rate_limit_port,
@@ -6932,6 +7086,7 @@ cmdline_parse_inst_t cmd_queue_rate_limit = {
},
};
+#ifdef RTE_LIBRTE_IXGBE_PMD
/* *** SET RATE LIMIT FOR A VF OF A PORT *** */
struct cmd_vf_rate_limit_result {
cmdline_fixed_string_t set;
@@ -6994,9 +7149,9 @@ cmdline_parse_token_num_t cmd_vf_rate_limit_q_msk_val =
cmdline_parse_inst_t cmd_vf_rate_limit = {
.f = cmd_vf_rate_limit_parsed,
.data = (void *)0,
- .help_str = "set port X vf Y rate Z queue_mask V:(X = port number,"
- "Y = VF number,Z = rate number, V = queue mask value)set rate limit "
- "for queues of VF on port X",
+ .help_str = "set port <port_id> vf <vf_id> rate <rate_value> "
+ "queue_mask <queue_mask_value>: "
+ "Set rate limit for queues of VF on port_id",
.tokens = {
(void *)&cmd_vf_rate_limit_set,
(void *)&cmd_vf_rate_limit_port,
@@ -7010,6 +7165,7 @@ cmdline_parse_inst_t cmd_vf_rate_limit = {
NULL,
},
};
+#endif
/* *** ADD TUNNEL FILTER OF A PORT *** */
struct cmd_tunnel_filter_result {
@@ -7140,12 +7296,10 @@ cmdline_parse_token_num_t cmd_tunnel_filter_queue_num =
cmdline_parse_inst_t cmd_tunnel_filter = {
.f = cmd_tunnel_filter_parsed,
.data = (void *)0,
- .help_str = "add/rm tunnel filter of a port: "
- "tunnel_filter add port_id outer_mac inner_mac ip "
- "inner_vlan tunnel_type(vxlan|nvgre|ipingre) filter_type "
- "(oip|iip|imac-ivlan|imac-ivlan-tenid|imac-tenid|"
- "imac|omac-imac-tenid) "
- "tenant_id queue_num",
+ .help_str = "tunnel_filter add|rm <port_id> <outer_mac> <inner_mac> "
+ "<ip> <inner_vlan> vxlan|nvgre|ipingre oip|iip|imac-ivlan|"
+ "imac-ivlan-tenid|imac-tenid|imac|omac-imac-tenid <tenant_id> "
+ "<queue_id>: Add/Rm tunnel filter of a port",
.tokens = {
(void *)&cmd_tunnel_filter_cmd,
(void *)&cmd_tunnel_filter_what,
@@ -7211,8 +7365,8 @@ cmdline_parse_token_num_t cmd_tunnel_udp_config_port_id =
cmdline_parse_inst_t cmd_tunnel_udp_config = {
.f = cmd_tunnel_udp_config_parsed,
.data = (void *)0,
- .help_str = "add/rm an tunneling UDP port filter: "
- "rx_vxlan_port add udp_port port_id",
+ .help_str = "rx_vxlan_port add|rm <udp_port> <port_id>: "
+ "Add/Remove a tunneling UDP port filter",
.tokens = {
(void *)&cmd_tunnel_udp_config_cmd,
(void *)&cmd_tunnel_udp_config_what,
@@ -7263,7 +7417,7 @@ cmdline_parse_token_num_t cmd_global_config_gre_key_len =
cmdline_parse_inst_t cmd_global_config = {
.f = cmd_global_config_parsed,
.data = (void *)NULL,
- .help_str = "global_config <port_id> gre-key-len <number>",
+ .help_str = "global_config <port_id> gre-key-len <key_len>",
.tokens = {
(void *)&cmd_global_config_cmd,
(void *)&cmd_global_config_port_id,
@@ -7371,8 +7525,9 @@ cmd_set_mirror_mask_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_set_mirror_mask = {
.f = cmd_set_mirror_mask_parsed,
.data = NULL,
- .help_str = "set port X mirror-rule Y pool-mirror-up|pool-mirror-down|vlan-mirror"
- " pool_mask|vlan_id[,vlan_id]* dst-pool Z on|off",
+ .help_str = "set port <port_id> mirror-rule <rule_id> "
+ "pool-mirror-up|pool-mirror-down|vlan-mirror "
+ "<pool_mask|vlan_id[,vlan_id]*> dst-pool <pool_id> on|off",
.tokens = {
(void *)&cmd_mirror_mask_set,
(void *)&cmd_mirror_mask_port,
@@ -7388,7 +7543,7 @@ cmdline_parse_inst_t cmd_set_mirror_mask = {
},
};
-/* *** CONFIGURE VM MIRROR UDLINK/DOWNLINK RULE *** */
+/* *** CONFIGURE VM MIRROR UPLINK/DOWNLINK RULE *** */
struct cmd_set_mirror_link_result {
cmdline_fixed_string_t set;
cmdline_fixed_string_t port;
@@ -7462,8 +7617,8 @@ cmd_set_mirror_link_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_set_mirror_link = {
.f = cmd_set_mirror_link_parsed,
.data = NULL,
- .help_str = "set port X mirror-rule Y uplink-mirror|"
- "downlink-mirror dst-pool Z on|off",
+ .help_str = "set port <port_id> mirror-rule <rule_id> "
+ "uplink-mirror|downlink-mirror dst-pool <pool_id> on|off",
.tokens = {
(void *)&cmd_mirror_link_set,
(void *)&cmd_mirror_link_port,
@@ -7519,7 +7674,7 @@ cmd_reset_mirror_rule_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_reset_mirror_rule = {
.f = cmd_reset_mirror_rule_parsed,
.data = NULL,
- .help_str = "reset port X mirror-rule Y",
+ .help_str = "reset port <port_id> mirror-rule <rule_id>",
.tokens = {
(void *)&cmd_rm_mirror_rule_reset,
(void *)&cmd_rm_mirror_rule_port,
@@ -7564,6 +7719,8 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_eal_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_log_types"))
+ rte_log_dump(stdout);
}
cmdline_parse_token_string_t cmd_dump_dump =
@@ -7573,12 +7730,13 @@ cmdline_parse_token_string_t cmd_dump_dump =
"dump_struct_sizes#"
"dump_ring#"
"dump_mempool#"
- "dump_devargs");
+ "dump_devargs#"
+ "dump_log_types");
cmdline_parse_inst_t cmd_dump = {
.f = cmd_dump_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
- .help_str = "dump status",
+ .help_str = "Dump status",
.tokens = { /* token list, NULL terminated */
(void *)&cmd_dump_dump,
NULL,
@@ -7626,7 +7784,7 @@ cmdline_parse_token_string_t cmd_dump_one_name =
cmdline_parse_inst_t cmd_dump_one = {
.f = cmd_dump_one_parsed, /* function to call */
.data = NULL, /* 2nd arg of func */
- .help_str = "dump one ring/mempool: dump_ring|dump_mempool <name>",
+ .help_str = "dump_ring|dump_mempool <name>: Dump one ring/mempool",
.tokens = { /* token list, NULL terminated */
(void *)&cmd_dump_one_dump,
(void *)&cmd_dump_one_name,
@@ -7711,7 +7869,8 @@ cmdline_parse_token_num_t cmd_syn_filter_queue_id =
cmdline_parse_inst_t cmd_syn_filter = {
.f = cmd_syn_filter_parsed,
.data = NULL,
- .help_str = "add/delete syn filter",
+ .help_str = "syn_filter <port_id> add|del priority high|low queue "
+ "<queue_id>: Add/Delete syn filter",
.tokens = {
(void *)&cmd_syn_filter_filter,
(void *)&cmd_syn_filter_port_id,
@@ -7850,7 +8009,9 @@ cmdline_parse_token_num_t cmd_2tuple_filter_queue_id =
cmdline_parse_inst_t cmd_2tuple_filter = {
.f = cmd_2tuple_filter_parsed,
.data = NULL,
- .help_str = "add a 2tuple filter",
+ .help_str = "2tuple_filter <port_id> add|del dst_port <value> protocol "
+ "<value> mask <value> tcp_flags <value> priority <value> queue "
+ "<queue_id>: Add a 2tuple filter",
.tokens = {
(void *)&cmd_2tuple_filter_filter,
(void *)&cmd_2tuple_filter_port_id,
@@ -8045,7 +8206,10 @@ cmdline_parse_token_num_t cmd_5tuple_filter_queue_id =
cmdline_parse_inst_t cmd_5tuple_filter = {
.f = cmd_5tuple_filter_parsed,
.data = NULL,
- .help_str = "add/del a 5tuple filter",
+ .help_str = "5tuple_filter <port_id> add|del dst_ip <value> "
+ "src_ip <value> dst_port <value> src_port <value> "
+ "protocol <value> mask <value> tcp_flags <value> "
+ "priority <value> queue <queue_id>: Add/Del a 5tuple filter",
.tokens = {
(void *)&cmd_5tuple_filter_filter,
(void *)&cmd_5tuple_filter_port_id,
@@ -8240,7 +8404,9 @@ cmdline_parse_token_num_t cmd_flex_filter_queue_id =
cmdline_parse_inst_t cmd_flex_filter = {
.f = cmd_flex_filter_parsed,
.data = NULL,
- .help_str = "add/del a flex filter",
+ .help_str = "flex_filter <port_id> add|del len <value> bytes "
+ "<value> mask <value> priority <value> queue <queue_id>: "
+ "Add/Del a flex filter",
.tokens = {
(void *)&cmd_flex_filter_filter,
(void *)&cmd_flex_filter_port_id,
@@ -8352,7 +8518,9 @@ cmd_ethertype_filter_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_ethertype_filter = {
.f = cmd_ethertype_filter_parsed,
.data = NULL,
- .help_str = "add or delete an ethertype filter entry",
+ .help_str = "ethertype_filter <port_id> add|del mac_addr|mac_ignr "
+ "<mac_addr> ethertype <value> drop|fw queue <queue_id>: "
+ "Add or delete an ethertype filter entry",
.tokens = {
(void *)&cmd_ethertype_filter_filter,
(void *)&cmd_ethertype_filter_port_id,
@@ -8573,6 +8741,7 @@ cmd_flow_director_filter_parsed(void *parsed_result,
case RTE_ETH_FLOW_FRAG_IPV4:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
entry.input.flow.ip4_flow.proto = res->proto_value;
+ /* fall-through */
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
IPV4_ADDR_TO_UINT(res->ip_dst,
@@ -8605,6 +8774,7 @@ cmd_flow_director_filter_parsed(void *parsed_result,
case RTE_ETH_FLOW_FRAG_IPV6:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
entry.input.flow.ipv6_flow.proto = res->proto_value;
+ /* fall-through */
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
IPV6_ADDR_TO_ARRAY(res->ip_dst,
@@ -8840,7 +9010,14 @@ cmdline_parse_token_num_t cmd_flow_director_tunnel_id_value =
cmdline_parse_inst_t cmd_add_del_ip_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete an ip flow director entry on NIC",
+ .help_str = "flow_director_filter <port_id> mode IP add|del|update flow"
+ " ipv4-other|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|"
+ "ipv6-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|"
+ "l2_payload src <src_ip> dst <dst_ip> tos <tos_value> "
+ "proto <proto_value> ttl <ttl_value> vlan <vlan_value> "
+ "flexbytes <flexbyte_vaues> drop|fw <pf_vf> queue <queue_id> "
+ "fd_id <fd_id_value>: "
+ "Add or delete an ip flow director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -8876,7 +9053,8 @@ cmdline_parse_inst_t cmd_add_del_ip_flow_director = {
cmdline_parse_inst_t cmd_add_del_udp_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete an udp/tcp flow director entry on NIC",
+ .help_str = "flow_director_filter ... : Add or delete an udp/tcp flow "
+ "director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -8912,7 +9090,8 @@ cmdline_parse_inst_t cmd_add_del_udp_flow_director = {
cmdline_parse_inst_t cmd_add_del_sctp_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete a sctp flow director entry on NIC",
+ .help_str = "flow_director_filter ... : Add or delete a sctp flow "
+ "director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -8950,7 +9129,8 @@ cmdline_parse_inst_t cmd_add_del_sctp_flow_director = {
cmdline_parse_inst_t cmd_add_del_l2_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete a L2 flow director entry on NIC",
+ .help_str = "flow_director_filter ... : Add or delete a L2 flow "
+ "director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -8976,7 +9156,8 @@ cmdline_parse_inst_t cmd_add_del_l2_flow_director = {
cmdline_parse_inst_t cmd_add_del_mac_vlan_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete a MAC VLAN flow director entry on NIC",
+ .help_str = "flow_director_filter ... : Add or delete a MAC VLAN flow "
+ "director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -9001,7 +9182,8 @@ cmdline_parse_inst_t cmd_add_del_mac_vlan_flow_director = {
cmdline_parse_inst_t cmd_add_del_tunnel_flow_director = {
.f = cmd_flow_director_filter_parsed,
.data = NULL,
- .help_str = "add or delete a tunnel flow director entry on NIC",
+ .help_str = "flow_director_filter ... : Add or delete a tunnel flow "
+ "director entry on NIC",
.tokens = {
(void *)&cmd_flow_director_filter,
(void *)&cmd_flow_director_port_id,
@@ -9064,7 +9246,8 @@ cmd_flush_flow_director_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_flush_flow_director = {
.f = cmd_flush_flow_director_parsed,
.data = NULL,
- .help_str = "flush all flow director entries of a device on NIC",
+ .help_str = "flush_flow_director <port_id>: "
+ "Flush all flow director entries of a device on NIC",
.tokens = {
(void *)&cmd_flush_flow_director_flush,
(void *)&cmd_flush_flow_director_port_id,
@@ -9225,7 +9408,8 @@ cmdline_parse_token_num_t cmd_flow_director_mask_tunnel_id_value =
cmdline_parse_inst_t cmd_set_flow_director_ip_mask = {
.f = cmd_flow_director_mask_parsed,
.data = NULL,
- .help_str = "set IP mode flow director's mask on NIC",
+ .help_str = "flow_director_mask ... : "
+ "Set IP mode flow director's mask on NIC",
.tokens = {
(void *)&cmd_flow_director_mask,
(void *)&cmd_flow_director_mask_port_id,
@@ -9248,7 +9432,8 @@ cmdline_parse_inst_t cmd_set_flow_director_ip_mask = {
cmdline_parse_inst_t cmd_set_flow_director_mac_vlan_mask = {
.f = cmd_flow_director_mask_parsed,
.data = NULL,
- .help_str = "set MAC VLAN mode flow director's mask on NIC",
+ .help_str = "flow_director_mask ... : Set MAC VLAN mode "
+ "flow director's mask on NIC",
.tokens = {
(void *)&cmd_flow_director_mask,
(void *)&cmd_flow_director_mask_port_id,
@@ -9263,7 +9448,8 @@ cmdline_parse_inst_t cmd_set_flow_director_mac_vlan_mask = {
cmdline_parse_inst_t cmd_set_flow_director_tunnel_mask = {
.f = cmd_flow_director_mask_parsed,
.data = NULL,
- .help_str = "set tunnel mode flow director's mask on NIC",
+ .help_str = "flow_director_mask ... : Set tunnel mode "
+ "flow director's mask on NIC",
.tokens = {
(void *)&cmd_flow_director_mask,
(void *)&cmd_flow_director_mask_port_id,
@@ -9391,7 +9577,8 @@ cmdline_parse_token_string_t cmd_flow_director_flexmask_mask =
cmdline_parse_inst_t cmd_set_flow_director_flex_mask = {
.f = cmd_flow_director_flex_mask_parsed,
.data = NULL,
- .help_str = "set flow director's flex mask on NIC",
+ .help_str = "flow_director_flex_mask ... : "
+ "Set flow director's flex mask on NIC",
.tokens = {
(void *)&cmd_flow_director_flexmask,
(void *)&cmd_flow_director_flexmask_port_id,
@@ -9509,7 +9696,8 @@ cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_cfg =
cmdline_parse_inst_t cmd_set_flow_director_flex_payload = {
.f = cmd_flow_director_flxpld_parsed,
.data = NULL,
- .help_str = "set flow director's flex payload on NIC",
+ .help_str = "flow_director_flexpayload ... : "
+ "Set flow director's flex payload on NIC",
.tokens = {
(void *)&cmd_flow_director_flexpayload,
(void *)&cmd_flow_director_flexpayload_port_id,
@@ -9519,6 +9707,9 @@ cmdline_parse_inst_t cmd_set_flow_director_flex_payload = {
},
};
+/* Generic flow interface command. */
+extern cmdline_parse_inst_t cmd_flow;
+
/* *** Classification Filters Control *** */
/* *** Get symmetric hash enable per port *** */
struct cmd_get_sym_hash_ena_per_port_result {
@@ -9567,7 +9758,7 @@ cmdline_parse_token_num_t cmd_get_sym_hash_ena_per_port_port_id =
cmdline_parse_inst_t cmd_get_sym_hash_ena_per_port = {
.f = cmd_get_sym_hash_per_port_parsed,
.data = NULL,
- .help_str = "get_sym_hash_ena_per_port port_id",
+ .help_str = "get_sym_hash_ena_per_port <port_id>",
.tokens = {
(void *)&cmd_get_sym_hash_ena_per_port_all,
(void *)&cmd_get_sym_hash_ena_per_port_port_id,
@@ -9626,7 +9817,7 @@ cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_enable =
cmdline_parse_inst_t cmd_set_sym_hash_ena_per_port = {
.f = cmd_set_sym_hash_per_port_parsed,
.data = NULL,
- .help_str = "set_sym_hash_ena_per_port port_id enable|disable",
+ .help_str = "set_sym_hash_ena_per_port <port_id> enable|disable",
.tokens = {
(void *)&cmd_set_sym_hash_ena_per_port_all,
(void *)&cmd_set_sym_hash_ena_per_port_port_id,
@@ -9744,7 +9935,7 @@ cmdline_parse_token_num_t cmd_get_hash_global_config_port_id =
cmdline_parse_inst_t cmd_get_hash_global_config = {
.f = cmd_get_hash_global_config_parsed,
.data = NULL,
- .help_str = "get_hash_global_config port_id",
+ .help_str = "get_hash_global_config <port_id>",
.tokens = {
(void *)&cmd_get_hash_global_config_all,
(void *)&cmd_get_hash_global_config_port_id,
@@ -9827,11 +10018,11 @@ cmdline_parse_token_string_t cmd_set_hash_global_config_enable =
cmdline_parse_inst_t cmd_set_hash_global_config = {
.f = cmd_set_hash_global_config_parsed,
.data = NULL,
- .help_str = "set_hash_global_config port_id "
+ .help_str = "set_hash_global_config <port_id> "
"toeplitz|simple_xor|default "
- "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"
- "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload "
- "enable|disable",
+ "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
+ "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
+ "l2_payload enable|disable",
.tokens = {
(void *)&cmd_set_hash_global_config_all,
(void *)&cmd_set_hash_global_config_port_id,
@@ -10082,7 +10273,8 @@ cmdline_parse_token_etheraddr_t cmd_mcast_addr_addr =
cmdline_parse_inst_t cmd_mcast_addr = {
.f = cmd_mcast_addr_parsed,
.data = (void *)0,
- .help_str = "mcast_addr add|remove X <mcast_addr>: add/remove multicast MAC address on port X",
+ .help_str = "mcast_addr add|remove <port_id> <mcast_addr>: "
+ "Add/Remove multicast MAC address on port_id",
.tokens = {
(void *)&cmd_mcast_addr_cmd,
(void *)&cmd_mcast_addr_what,
@@ -10174,7 +10366,7 @@ cmd_config_l2_tunnel_eth_type_all_parsed
entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type);
entry.ether_type = res->eth_type_val;
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
rte_eth_dev_l2_tunnel_eth_type_conf(pid, &entry);
}
}
@@ -10182,7 +10374,7 @@ cmd_config_l2_tunnel_eth_type_all_parsed
cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_all = {
.f = cmd_config_l2_tunnel_eth_type_all_parsed,
.data = NULL,
- .help_str = "port config all l2-tunnel ether-type",
+ .help_str = "port config all l2-tunnel E-tag ether-type <value>",
.tokens = {
(void *)&cmd_config_l2_tunnel_eth_type_port,
(void *)&cmd_config_l2_tunnel_eth_type_config,
@@ -10218,7 +10410,7 @@ cmd_config_l2_tunnel_eth_type_specific_parsed(
cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_specific = {
.f = cmd_config_l2_tunnel_eth_type_specific_parsed,
.data = NULL,
- .help_str = "port config l2-tunnel ether-type",
+ .help_str = "port config <port_id> l2-tunnel E-tag ether-type <value>",
.tokens = {
(void *)&cmd_config_l2_tunnel_eth_type_port,
(void *)&cmd_config_l2_tunnel_eth_type_config,
@@ -10290,7 +10482,7 @@ cmd_config_l2_tunnel_en_dis_all_parsed(
else
en = 0;
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
rte_eth_dev_l2_tunnel_offload_set(pid,
&entry,
ETH_L2_TUNNEL_ENABLE_MASK,
@@ -10301,7 +10493,7 @@ cmd_config_l2_tunnel_en_dis_all_parsed(
cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_all = {
.f = cmd_config_l2_tunnel_en_dis_all_parsed,
.data = NULL,
- .help_str = "port config all l2-tunnel enable/disable",
+ .help_str = "port config all l2-tunnel E-tag enable|disable",
.tokens = {
(void *)&cmd_config_l2_tunnel_en_dis_port,
(void *)&cmd_config_l2_tunnel_en_dis_config,
@@ -10344,7 +10536,7 @@ cmd_config_l2_tunnel_en_dis_specific_parsed(
cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_specific = {
.f = cmd_config_l2_tunnel_en_dis_specific_parsed,
.data = NULL,
- .help_str = "port config l2-tunnel enable/disable",
+ .help_str = "port config <port_id> l2-tunnel E-tag enable|disable",
.tokens = {
(void *)&cmd_config_l2_tunnel_en_dis_port,
(void *)&cmd_config_l2_tunnel_en_dis_config,
@@ -10517,7 +10709,7 @@ cmd_config_e_tag_insertion_dis_parsed(
cmdline_parse_inst_t cmd_config_e_tag_insertion_en = {
.f = cmd_config_e_tag_insertion_en_parsed,
.data = NULL,
- .help_str = "E-tag insertion enable",
+ .help_str = "E-tag ... : E-tag insertion enable",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10536,7 +10728,7 @@ cmdline_parse_inst_t cmd_config_e_tag_insertion_en = {
cmdline_parse_inst_t cmd_config_e_tag_insertion_dis = {
.f = cmd_config_e_tag_insertion_dis_parsed,
.data = NULL,
- .help_str = "E-tag insertion disable",
+ .help_str = "E-tag ... : E-tag insertion disable",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10583,7 +10775,7 @@ cmd_config_e_tag_stripping_parsed(
cmdline_parse_inst_t cmd_config_e_tag_stripping_en_dis = {
.f = cmd_config_e_tag_stripping_parsed,
.data = NULL,
- .help_str = "E-tag stripping enable/disable",
+ .help_str = "E-tag ... : E-tag stripping enable/disable",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10627,7 +10819,7 @@ cmd_config_e_tag_forwarding_parsed(
cmdline_parse_inst_t cmd_config_e_tag_forwarding_en_dis = {
.f = cmd_config_e_tag_forwarding_parsed,
.data = NULL,
- .help_str = "E-tag forwarding enable/disable",
+ .help_str = "E-tag ... : E-tag forwarding enable/disable",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10682,7 +10874,7 @@ cmd_config_e_tag_filter_add_parsed(
cmdline_parse_inst_t cmd_config_e_tag_filter_add = {
.f = cmd_config_e_tag_filter_add_parsed,
.data = NULL,
- .help_str = "E-tag filter add",
+ .help_str = "E-tag ... : E-tag filter add",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10739,7 +10931,7 @@ cmd_config_e_tag_filter_del_parsed(
cmdline_parse_inst_t cmd_config_e_tag_filter_del = {
.f = cmd_config_e_tag_filter_del_parsed,
.data = NULL,
- .help_str = "E-tag filter delete",
+ .help_str = "E-tag ... : E-tag filter delete",
.tokens = {
(void *)&cmd_config_e_tag_e_tag,
(void *)&cmd_config_e_tag_set,
@@ -10752,7 +10944,6 @@ cmdline_parse_inst_t cmd_config_e_tag_filter_del = {
NULL,
},
};
-#ifdef RTE_LIBRTE_IXGBE_PMD
/* vf vlan anti spoof configuration */
@@ -10804,14 +10995,24 @@ cmd_set_vf_vlan_anti_spoof_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_vlan_anti_spoof_result *res = parsed_result;
- int ret = 0;
- int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_vf_vlan_anti_spoof(res->port_id, res->vf_id,
- is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_vlan_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_vlan_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
+
switch (ret) {
case 0:
break;
@@ -10821,6 +11022,9 @@ cmd_set_vf_vlan_anti_spoof_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -10829,7 +11033,7 @@ cmd_set_vf_vlan_anti_spoof_parsed(
cmdline_parse_inst_t cmd_set_vf_vlan_anti_spoof = {
.f = cmd_set_vf_vlan_anti_spoof_parsed,
.data = NULL,
- .help_str = "set vf vlan antispoof port_id vf_id on|off",
+ .help_str = "set vf vlan antispoof <port_id> <vf_id> on|off",
.tokens = {
(void *)&cmd_vf_vlan_anti_spoof_set,
(void *)&cmd_vf_vlan_anti_spoof_vf,
@@ -10892,14 +11096,24 @@ cmd_set_vf_mac_anti_spoof_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_mac_anti_spoof_result *res = parsed_result;
- int ret;
- int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_vf_mac_anti_spoof(res->port_id, res->vf_id,
- is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_mac_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_mac_anti_spoof(res->port_id,
+ res->vf_id, is_on);
+#endif
+
switch (ret) {
case 0:
break;
@@ -10909,6 +11123,9 @@ cmd_set_vf_mac_anti_spoof_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -10917,7 +11134,7 @@ cmd_set_vf_mac_anti_spoof_parsed(
cmdline_parse_inst_t cmd_set_vf_mac_anti_spoof = {
.f = cmd_set_vf_mac_anti_spoof_parsed,
.data = NULL,
- .help_str = "set vf mac antispoof port_id vf_id on|off",
+ .help_str = "set vf mac antispoof <port_id> <vf_id> on|off",
.tokens = {
(void *)&cmd_vf_mac_anti_spoof_set,
(void *)&cmd_vf_mac_anti_spoof_vf,
@@ -10980,13 +11197,24 @@ cmd_set_vf_vlan_stripq_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_vlan_stripq_result *res = parsed_result;
- int ret = 0;
- int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_vf_vlan_stripq(res->port_id, res->vf_id, is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_vlan_stripq(res->port_id,
+ res->vf_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_vlan_stripq(res->port_id,
+ res->vf_id, is_on);
+#endif
+
switch (ret) {
case 0:
break;
@@ -10996,6 +11224,9 @@ cmd_set_vf_vlan_stripq_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11004,7 +11235,7 @@ cmd_set_vf_vlan_stripq_parsed(
cmdline_parse_inst_t cmd_set_vf_vlan_stripq = {
.f = cmd_set_vf_vlan_stripq_parsed,
.data = NULL,
- .help_str = "set vf vlan stripq port_id vf_id on|off",
+ .help_str = "set vf vlan stripq <port_id> <vf_id> on|off",
.tokens = {
(void *)&cmd_vf_vlan_stripq_set,
(void *)&cmd_vf_vlan_stripq_vf,
@@ -11067,12 +11298,22 @@ cmd_set_vf_vlan_insert_parsed(
__attribute__((unused)) void *data)
{
struct cmd_vf_vlan_insert_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_vf_vlan_insert(res->port_id, res->vf_id, res->vlan_id);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_vlan_insert(res->port_id, res->vf_id,
+ res->vlan_id);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_vlan_insert(res->port_id, res->vf_id,
+ res->vlan_id);
+#endif
+
switch (ret) {
case 0:
break;
@@ -11082,6 +11323,9 @@ cmd_set_vf_vlan_insert_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11090,7 +11334,7 @@ cmd_set_vf_vlan_insert_parsed(
cmdline_parse_inst_t cmd_set_vf_vlan_insert = {
.f = cmd_set_vf_vlan_insert_parsed,
.data = NULL,
- .help_str = "set vf vlan insert port_id vf_id vlan_id",
+ .help_str = "set vf vlan insert <port_id> <vf_id> <vlan_id>",
.tokens = {
(void *)&cmd_vf_vlan_insert_set,
(void *)&cmd_vf_vlan_insert_vf,
@@ -11143,13 +11387,22 @@ cmd_set_tx_loopback_parsed(
__attribute__((unused)) void *data)
{
struct cmd_tx_loopback_result *res = parsed_result;
- int ret;
- int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_tx_loopback(res->port_id, is_on);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_tx_loopback(res->port_id, is_on);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_tx_loopback(res->port_id, is_on);
+#endif
+
switch (ret) {
case 0:
break;
@@ -11159,6 +11412,9 @@ cmd_set_tx_loopback_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11167,7 +11423,7 @@ cmd_set_tx_loopback_parsed(
cmdline_parse_inst_t cmd_set_tx_loopback = {
.f = cmd_set_tx_loopback_parsed,
.data = NULL,
- .help_str = "set tx loopback port_id on|off",
+ .help_str = "set tx loopback <port_id> on|off",
.tokens = {
(void *)&cmd_tx_loopback_set,
(void *)&cmd_tx_loopback_tx,
@@ -11178,6 +11434,7 @@ cmdline_parse_inst_t cmd_set_tx_loopback = {
},
};
+#ifdef RTE_LIBRTE_IXGBE_PMD
/* all queues drop enable configuration */
/* Common result structure for all queues drop enable */
@@ -11239,6 +11496,9 @@ cmd_set_all_queues_drop_en_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11247,7 +11507,7 @@ cmd_set_all_queues_drop_en_parsed(
cmdline_parse_inst_t cmd_set_all_queues_drop_en = {
.f = cmd_set_all_queues_drop_en_parsed,
.data = NULL,
- .help_str = "set all queues drop port_id on|off",
+ .help_str = "set all queues drop <port_id> on|off",
.tokens = {
(void *)&cmd_all_queues_drop_en_set,
(void *)&cmd_all_queues_drop_en_all,
@@ -11334,7 +11594,7 @@ cmd_set_vf_split_drop_en_parsed(
cmdline_parse_inst_t cmd_set_vf_split_drop_en = {
.f = cmd_set_vf_split_drop_en_parsed,
.data = NULL,
- .help_str = "set vf split drop port_id vf_id on|off",
+ .help_str = "set vf split drop <port_id> <vf_id> on|off",
.tokens = {
(void *)&cmd_vf_split_drop_en_set,
(void *)&cmd_vf_split_drop_en_vf,
@@ -11346,6 +11606,7 @@ cmdline_parse_inst_t cmd_set_vf_split_drop_en = {
NULL,
},
};
+#endif
/* vf mac address configuration */
@@ -11397,13 +11658,22 @@ cmd_set_vf_mac_addr_parsed(
__attribute__((unused)) void *data)
{
struct cmd_set_vf_mac_addr_result *res = parsed_result;
- int ret;
+ int ret = -ENOTSUP;
if (port_id_is_invalid(res->port_id, ENABLED_WARN))
return;
- ret = rte_pmd_ixgbe_set_vf_mac_addr(res->port_id, res->vf_id,
- &res->mac_addr);
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_mac_addr(res->port_id, res->vf_id,
+ &res->mac_addr);
+#endif
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_mac_addr(res->port_id, res->vf_id,
+ &res->mac_addr);
+#endif
+
switch (ret) {
case 0:
break;
@@ -11413,6 +11683,9 @@ cmd_set_vf_mac_addr_parsed(
case -ENODEV:
printf("invalid port_id %d\n", res->port_id);
break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
default:
printf("programming error: (%s)\n", strerror(-ret));
}
@@ -11421,7 +11694,7 @@ cmd_set_vf_mac_addr_parsed(
cmdline_parse_inst_t cmd_set_vf_mac_addr = {
.f = cmd_set_vf_mac_addr_parsed,
.data = NULL,
- .help_str = "set vf mac addr port_id vf_id xx:xx:xx:xx:xx:xx",
+ .help_str = "set vf mac addr <port_id> <vf_id> <mac_addr>",
.tokens = {
(void *)&cmd_set_vf_mac_addr_set,
(void *)&cmd_set_vf_mac_addr_vf,
@@ -11433,8 +11706,1865 @@ cmdline_parse_inst_t cmd_set_vf_mac_addr = {
NULL,
},
};
+
+#ifdef RTE_LIBRTE_IXGBE_PMD
+/* MACsec configuration */
+
+/* Common result structure for MACsec offload enable */
+struct cmd_macsec_offload_on_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t macsec;
+ cmdline_fixed_string_t offload;
+ uint8_t port_id;
+ cmdline_fixed_string_t on;
+ cmdline_fixed_string_t encrypt;
+ cmdline_fixed_string_t en_on_off;
+ cmdline_fixed_string_t replay_protect;
+ cmdline_fixed_string_t rp_on_off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_on_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_on_macsec =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_on_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ offload, "offload");
+cmdline_parse_token_num_t cmd_macsec_offload_on_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_on_on =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ on, "on");
+cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ encrypt, "encrypt");
+cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ en_on_off, "on#off");
+cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ replay_protect, "replay-protect");
+cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_on_result,
+ rp_on_off, "on#off");
+
+static void
+cmd_set_macsec_offload_on_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_macsec_offload_on_result *res = parsed_result;
+ int ret;
+ portid_t port_id = res->port_id;
+ int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0;
+ int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_MACSEC;
+ ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", port_id);
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_on = {
+ .f = cmd_set_macsec_offload_on_parsed,
+ .data = NULL,
+ .help_str = "set macsec offload <port_id> on "
+ "encrypt on|off replay-protect on|off",
+ .tokens = {
+ (void *)&cmd_macsec_offload_on_set,
+ (void *)&cmd_macsec_offload_on_macsec,
+ (void *)&cmd_macsec_offload_on_offload,
+ (void *)&cmd_macsec_offload_on_port_id,
+ (void *)&cmd_macsec_offload_on_on,
+ (void *)&cmd_macsec_offload_on_encrypt,
+ (void *)&cmd_macsec_offload_on_en_on_off,
+ (void *)&cmd_macsec_offload_on_replay_protect,
+ (void *)&cmd_macsec_offload_on_rp_on_off,
+ NULL,
+ },
+};
+
+/* Common result structure for MACsec offload disable */
+struct cmd_macsec_offload_off_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t macsec;
+ cmdline_fixed_string_t offload;
+ uint8_t port_id;
+ cmdline_fixed_string_t off;
+};
+
+/* Common CLI fields for MACsec offload disable */
+cmdline_parse_token_string_t cmd_macsec_offload_off_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_off_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_macsec_offload_off_macsec =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_off_result,
+ macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_offload_off_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_off_result,
+ offload, "offload");
+cmdline_parse_token_num_t cmd_macsec_offload_off_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_offload_off_result,
+ port_id, UINT8);
+cmdline_parse_token_string_t cmd_macsec_offload_off_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_offload_off_result,
+ off, "off");
+
+static void
+cmd_set_macsec_offload_off_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_macsec_offload_off_result *res = parsed_result;
+ int ret;
+ portid_t port_id = res->port_id;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ ports[port_id].tx_ol_flags &= ~TESTPMD_TX_OFFLOAD_MACSEC;
+ ret = rte_pmd_ixgbe_macsec_disable(port_id);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", port_id);
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_macsec_offload_off = {
+ .f = cmd_set_macsec_offload_off_parsed,
+ .data = NULL,
+ .help_str = "set macsec offload <port_id> off",
+ .tokens = {
+ (void *)&cmd_macsec_offload_off_set,
+ (void *)&cmd_macsec_offload_off_macsec,
+ (void *)&cmd_macsec_offload_off_offload,
+ (void *)&cmd_macsec_offload_off_port_id,
+ (void *)&cmd_macsec_offload_off_off,
+ NULL,
+ },
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sc_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t macsec;
+ cmdline_fixed_string_t sc;
+ cmdline_fixed_string_t tx_rx;
+ uint8_t port_id;
+ struct ether_addr mac;
+ uint16_t pi;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sc_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_macsec_sc_macsec =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sc_sc =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ sc, "sc");
+cmdline_parse_token_string_t cmd_macsec_sc_tx_rx =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sc_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ port_id, UINT8);
+cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac =
+ TOKEN_ETHERADDR_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ mac);
+cmdline_parse_token_num_t cmd_macsec_sc_pi =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sc_result,
+ pi, UINT16);
+
+static void
+cmd_set_macsec_sc_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_macsec_sc_result *res = parsed_result;
+ int ret;
+ int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+
+ ret = is_tx ?
+ rte_pmd_ixgbe_macsec_config_txsc(res->port_id,
+ res->mac.addr_bytes) :
+ rte_pmd_ixgbe_macsec_config_rxsc(res->port_id,
+ res->mac.addr_bytes, res->pi);
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sc = {
+ .f = cmd_set_macsec_sc_parsed,
+ .data = NULL,
+ .help_str = "set macsec sc tx|rx <port_id> <mac> <pi>",
+ .tokens = {
+ (void *)&cmd_macsec_sc_set,
+ (void *)&cmd_macsec_sc_macsec,
+ (void *)&cmd_macsec_sc_sc,
+ (void *)&cmd_macsec_sc_tx_rx,
+ (void *)&cmd_macsec_sc_port_id,
+ (void *)&cmd_macsec_sc_mac,
+ (void *)&cmd_macsec_sc_pi,
+ NULL,
+ },
+};
+
+/* Common result structure for MACsec secure connection configure */
+struct cmd_macsec_sa_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t macsec;
+ cmdline_fixed_string_t sa;
+ cmdline_fixed_string_t tx_rx;
+ uint8_t port_id;
+ uint8_t idx;
+ uint8_t an;
+ uint32_t pn;
+ cmdline_fixed_string_t key;
+};
+
+/* Common CLI fields for MACsec secure connection configure */
+cmdline_parse_token_string_t cmd_macsec_sa_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_macsec_sa_macsec =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ macsec, "macsec");
+cmdline_parse_token_string_t cmd_macsec_sa_sa =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ sa, "sa");
+cmdline_parse_token_string_t cmd_macsec_sa_tx_rx =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ tx_rx, "tx#rx");
+cmdline_parse_token_num_t cmd_macsec_sa_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_idx =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ idx, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_an =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ an, UINT8);
+cmdline_parse_token_num_t cmd_macsec_sa_pn =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ pn, UINT32);
+cmdline_parse_token_string_t cmd_macsec_sa_key =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_macsec_sa_result,
+ key, NULL);
+
+static void
+cmd_set_macsec_sa_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_macsec_sa_result *res = parsed_result;
+ int ret;
+ int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0;
+ uint8_t key[16] = { 0 };
+ uint8_t xdgt0;
+ uint8_t xdgt1;
+ int key_len;
+ int i;
+
+ key_len = strlen(res->key) / 2;
+ if (key_len > 16)
+ key_len = 16;
+
+ for (i = 0; i < key_len; i++) {
+ xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2));
+ if (xdgt0 == 0xFF)
+ return;
+ xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1);
+ if (xdgt1 == 0xFF)
+ return;
+ key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1);
+ }
+
+ ret = is_tx ?
+ rte_pmd_ixgbe_macsec_select_txsa(res->port_id,
+ res->idx, res->an, res->pn, key) :
+ rte_pmd_ixgbe_macsec_select_rxsa(res->port_id,
+ res->idx, res->an, res->pn, key);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid idx %d or an %d\n", res->idx, res->an);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_macsec_sa = {
+ .f = cmd_set_macsec_sa_parsed,
+ .data = NULL,
+ .help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>",
+ .tokens = {
+ (void *)&cmd_macsec_sa_set,
+ (void *)&cmd_macsec_sa_macsec,
+ (void *)&cmd_macsec_sa_sa,
+ (void *)&cmd_macsec_sa_tx_rx,
+ (void *)&cmd_macsec_sa_port_id,
+ (void *)&cmd_macsec_sa_idx,
+ (void *)&cmd_macsec_sa_an,
+ (void *)&cmd_macsec_sa_pn,
+ (void *)&cmd_macsec_sa_key,
+ NULL,
+ },
+};
+#endif
+
+/* VF unicast promiscuous mode configuration */
+
+/* Common result structure for VF unicast promiscuous mode */
+struct cmd_vf_promisc_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t promisc;
+ uint8_t port_id;
+ uint32_t vf_id;
+ cmdline_fixed_string_t on_off;
+};
+
+/* Common CLI fields for VF unicast promiscuous mode enable disable */
+cmdline_parse_token_string_t cmd_vf_promisc_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vf_promisc_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_vf_promisc_promisc =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ promisc, "promisc");
+cmdline_parse_token_num_t cmd_vf_promisc_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_vf_promisc_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ vf_id, UINT32);
+cmdline_parse_token_string_t cmd_vf_promisc_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_promisc_result,
+ on_off, "on#off");
+
+static void
+cmd_set_vf_promisc_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_promisc_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_unicast_promisc(res->port_id,
+ res->vf_id, is_on);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d\n", res->vf_id);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_vf_promisc = {
+ .f = cmd_set_vf_promisc_parsed,
+ .data = NULL,
+ .help_str = "set vf promisc <port_id> <vf_id> on|off: "
+ "Set unicast promiscuous mode for a VF from the PF",
+ .tokens = {
+ (void *)&cmd_vf_promisc_set,
+ (void *)&cmd_vf_promisc_vf,
+ (void *)&cmd_vf_promisc_promisc,
+ (void *)&cmd_vf_promisc_port_id,
+ (void *)&cmd_vf_promisc_vf_id,
+ (void *)&cmd_vf_promisc_on_off,
+ NULL,
+ },
+};
+
+/* VF multicast promiscuous mode configuration */
+
+/* Common result structure for VF multicast promiscuous mode */
+struct cmd_vf_allmulti_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t allmulti;
+ uint8_t port_id;
+ uint32_t vf_id;
+ cmdline_fixed_string_t on_off;
+};
+
+/* Common CLI fields for VF multicast promiscuous mode enable disable */
+cmdline_parse_token_string_t cmd_vf_allmulti_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vf_allmulti_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_vf_allmulti_allmulti =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ allmulti, "allmulti");
+cmdline_parse_token_num_t cmd_vf_allmulti_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_vf_allmulti_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ vf_id, UINT32);
+cmdline_parse_token_string_t cmd_vf_allmulti_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_allmulti_result,
+ on_off, "on#off");
+
+static void
+cmd_set_vf_allmulti_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_allmulti_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_multicast_promisc(res->port_id,
+ res->vf_id, is_on);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d\n", res->vf_id);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_vf_allmulti = {
+ .f = cmd_set_vf_allmulti_parsed,
+ .data = NULL,
+ .help_str = "set vf allmulti <port_id> <vf_id> on|off: "
+ "Set multicast promiscuous mode for a VF from the PF",
+ .tokens = {
+ (void *)&cmd_vf_allmulti_set,
+ (void *)&cmd_vf_allmulti_vf,
+ (void *)&cmd_vf_allmulti_allmulti,
+ (void *)&cmd_vf_allmulti_port_id,
+ (void *)&cmd_vf_allmulti_vf_id,
+ (void *)&cmd_vf_allmulti_on_off,
+ NULL,
+ },
+};
+
+/* vf broadcast mode configuration */
+
+/* Common result structure for vf broadcast */
+struct cmd_set_vf_broadcast_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t broadcast;
+ uint8_t port_id;
+ uint16_t vf_id;
+ cmdline_fixed_string_t on_off;
+};
+
+/* Common CLI fields for vf broadcast enable disable */
+cmdline_parse_token_string_t cmd_set_vf_broadcast_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_vf_broadcast_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_set_vf_broadcast_broadcast =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ broadcast, "broadcast");
+cmdline_parse_token_num_t cmd_set_vf_broadcast_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_set_vf_broadcast_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ vf_id, UINT16);
+cmdline_parse_token_string_t cmd_set_vf_broadcast_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_broadcast_result,
+ on_off, "on#off");
+
+static void
+cmd_set_vf_broadcast_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_vf_broadcast_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_broadcast(res->port_id,
+ res->vf_id, is_on);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_vf_broadcast = {
+ .f = cmd_set_vf_broadcast_parsed,
+ .data = NULL,
+ .help_str = "set vf broadcast <port_id> <vf_id> on|off",
+ .tokens = {
+ (void *)&cmd_set_vf_broadcast_set,
+ (void *)&cmd_set_vf_broadcast_vf,
+ (void *)&cmd_set_vf_broadcast_broadcast,
+ (void *)&cmd_set_vf_broadcast_port_id,
+ (void *)&cmd_set_vf_broadcast_vf_id,
+ (void *)&cmd_set_vf_broadcast_on_off,
+ NULL,
+ },
+};
+
+/* vf vlan tag configuration */
+
+/* Common result structure for vf vlan tag */
+struct cmd_set_vf_vlan_tag_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t vlan;
+ cmdline_fixed_string_t tag;
+ uint8_t port_id;
+ uint16_t vf_id;
+ cmdline_fixed_string_t on_off;
+};
+
+/* Common CLI fields for vf vlan tag enable disable */
+cmdline_parse_token_string_t cmd_set_vf_vlan_tag_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_set_vf_vlan_tag_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_set_vf_vlan_tag_vlan =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ vlan, "vlan");
+cmdline_parse_token_string_t cmd_set_vf_vlan_tag_tag =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ tag, "tag");
+cmdline_parse_token_num_t cmd_set_vf_vlan_tag_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_set_vf_vlan_tag_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ vf_id, UINT16);
+cmdline_parse_token_string_t cmd_set_vf_vlan_tag_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_set_vf_vlan_tag_result,
+ on_off, "on#off");
+
+static void
+cmd_set_vf_vlan_tag_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_vf_vlan_tag_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_vlan_tag(res->port_id,
+ res->vf_id, is_on);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_set_vf_vlan_tag = {
+ .f = cmd_set_vf_vlan_tag_parsed,
+ .data = NULL,
+ .help_str = "set vf vlan tag <port_id> <vf_id> on|off",
+ .tokens = {
+ (void *)&cmd_set_vf_vlan_tag_set,
+ (void *)&cmd_set_vf_vlan_tag_vf,
+ (void *)&cmd_set_vf_vlan_tag_vlan,
+ (void *)&cmd_set_vf_vlan_tag_tag,
+ (void *)&cmd_set_vf_vlan_tag_port_id,
+ (void *)&cmd_set_vf_vlan_tag_vf_id,
+ (void *)&cmd_set_vf_vlan_tag_on_off,
+ NULL,
+ },
+};
+
+/* Common definition of VF and TC TX bandwidth configuration */
+struct cmd_vf_tc_bw_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t tc;
+ cmdline_fixed_string_t tx;
+ cmdline_fixed_string_t min_bw;
+ cmdline_fixed_string_t max_bw;
+ cmdline_fixed_string_t strict_link_prio;
+ uint8_t port_id;
+ uint16_t vf_id;
+ uint8_t tc_no;
+ uint32_t bw;
+ cmdline_fixed_string_t bw_list;
+ uint8_t tc_map;
+};
+
+cmdline_parse_token_string_t cmd_vf_tc_bw_set =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ set, "set");
+cmdline_parse_token_string_t cmd_vf_tc_bw_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_vf_tc_bw_tc =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ tc, "tc");
+cmdline_parse_token_string_t cmd_vf_tc_bw_tx =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ tx, "tx");
+cmdline_parse_token_string_t cmd_vf_tc_bw_strict_link_prio =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ strict_link_prio, "strict-link-priority");
+cmdline_parse_token_string_t cmd_vf_tc_bw_min_bw =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ min_bw, "min-bandwidth");
+cmdline_parse_token_string_t cmd_vf_tc_bw_max_bw =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ max_bw, "max-bandwidth");
+cmdline_parse_token_num_t cmd_vf_tc_bw_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_vf_tc_bw_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ vf_id, UINT16);
+cmdline_parse_token_num_t cmd_vf_tc_bw_tc_no =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ tc_no, UINT8);
+cmdline_parse_token_num_t cmd_vf_tc_bw_bw =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ bw, UINT32);
+cmdline_parse_token_string_t cmd_vf_tc_bw_bw_list =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ bw_list, NULL);
+cmdline_parse_token_num_t cmd_vf_tc_bw_tc_map =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_vf_tc_bw_result,
+ tc_map, UINT8);
+
+/* VF max bandwidth setting */
+static void
+cmd_vf_max_bw_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_tc_bw_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_max_bw(res->port_id,
+ res->vf_id, res->bw);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d or bandwidth %d\n",
+ res->vf_id, res->bw);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_vf_max_bw = {
+ .f = cmd_vf_max_bw_parsed,
+ .data = NULL,
+ .help_str = "set vf tx max-bandwidth <port_id> <vf_id> <bandwidth>",
+ .tokens = {
+ (void *)&cmd_vf_tc_bw_set,
+ (void *)&cmd_vf_tc_bw_vf,
+ (void *)&cmd_vf_tc_bw_tx,
+ (void *)&cmd_vf_tc_bw_max_bw,
+ (void *)&cmd_vf_tc_bw_port_id,
+ (void *)&cmd_vf_tc_bw_vf_id,
+ (void *)&cmd_vf_tc_bw_bw,
+ NULL,
+ },
+};
+
+static int
+vf_tc_min_bw_parse_bw_list(uint8_t *bw_list,
+ uint8_t *tc_num,
+ char *str)
+{
+ uint32_t size;
+ const char *p, *p0 = str;
+ char s[256];
+ char *end;
+ char *str_fld[16];
+ uint16_t i;
+ int ret;
+
+ p = strchr(p0, '(');
+ if (p == NULL) {
+ printf("The bandwidth-list should be '(bw1, bw2, ...)'\n");
+ return -1;
+ }
+ p++;
+ p0 = strchr(p, ')');
+ if (p0 == NULL) {
+ printf("The bandwidth-list should be '(bw1, bw2, ...)'\n");
+ return -1;
+ }
+ size = p0 - p;
+ if (size >= sizeof(s)) {
+ printf("The string size exceeds the internal buffer size\n");
+ return -1;
+ }
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ ret = rte_strsplit(s, sizeof(s), str_fld, 16, ',');
+ if (ret <= 0) {
+ printf("Failed to get the bandwidth list. ");
+ return -1;
+ }
+ *tc_num = ret;
+ for (i = 0; i < ret; i++)
+ bw_list[i] = (uint8_t)strtoul(str_fld[i], &end, 0);
+
+ return 0;
+}
+
+/* TC min bandwidth setting */
+static void
+cmd_vf_tc_min_bw_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_tc_bw_result *res = parsed_result;
+ uint8_t tc_num;
+ uint8_t bw[16];
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ ret = vf_tc_min_bw_parse_bw_list(bw, &tc_num, res->bw_list);
+ if (ret)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_tc_bw_alloc(res->port_id, res->vf_id,
+ tc_num, bw);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d or bandwidth\n", res->vf_id);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_vf_tc_min_bw = {
+ .f = cmd_vf_tc_min_bw_parsed,
+ .data = NULL,
+ .help_str = "set vf tc tx min-bandwidth <port_id> <vf_id>"
+ " <bw1, bw2, ...>",
+ .tokens = {
+ (void *)&cmd_vf_tc_bw_set,
+ (void *)&cmd_vf_tc_bw_vf,
+ (void *)&cmd_vf_tc_bw_tc,
+ (void *)&cmd_vf_tc_bw_tx,
+ (void *)&cmd_vf_tc_bw_min_bw,
+ (void *)&cmd_vf_tc_bw_port_id,
+ (void *)&cmd_vf_tc_bw_vf_id,
+ (void *)&cmd_vf_tc_bw_bw_list,
+ NULL,
+ },
+};
+
+static void
+cmd_tc_min_bw_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_tc_bw_result *res = parsed_result;
+ struct rte_port *port;
+ uint8_t tc_num;
+ uint8_t bw[16];
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ port = &ports[res->port_id];
+ /** Check if the port is not started **/
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Please stop port %d first\n", res->port_id);
+ return;
+ }
+
+ ret = vf_tc_min_bw_parse_bw_list(bw, &tc_num, res->bw_list);
+ if (ret)
+ return;
+
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ ret = rte_pmd_ixgbe_set_tc_bw_alloc(res->port_id, tc_num, bw);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid bandwidth\n");
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_tc_min_bw = {
+ .f = cmd_tc_min_bw_parsed,
+ .data = NULL,
+ .help_str = "set tc tx min-bandwidth <port_id> <bw1, bw2, ...>",
+ .tokens = {
+ (void *)&cmd_vf_tc_bw_set,
+ (void *)&cmd_vf_tc_bw_tc,
+ (void *)&cmd_vf_tc_bw_tx,
+ (void *)&cmd_vf_tc_bw_min_bw,
+ (void *)&cmd_vf_tc_bw_port_id,
+ (void *)&cmd_vf_tc_bw_bw_list,
+ NULL,
+ },
+};
+
+/* TC max bandwidth setting */
+static void
+cmd_vf_tc_max_bw_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_tc_bw_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_vf_tc_max_bw(res->port_id, res->vf_id,
+ res->tc_no, res->bw);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d, tc_no %d or bandwidth %d\n",
+ res->vf_id, res->tc_no, res->bw);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_vf_tc_max_bw = {
+ .f = cmd_vf_tc_max_bw_parsed,
+ .data = NULL,
+ .help_str = "set vf tc tx max-bandwidth <port_id> <vf_id> <tc_no>"
+ " <bandwidth>",
+ .tokens = {
+ (void *)&cmd_vf_tc_bw_set,
+ (void *)&cmd_vf_tc_bw_vf,
+ (void *)&cmd_vf_tc_bw_tc,
+ (void *)&cmd_vf_tc_bw_tx,
+ (void *)&cmd_vf_tc_bw_max_bw,
+ (void *)&cmd_vf_tc_bw_port_id,
+ (void *)&cmd_vf_tc_bw_vf_id,
+ (void *)&cmd_vf_tc_bw_tc_no,
+ (void *)&cmd_vf_tc_bw_bw,
+ NULL,
+ },
+};
+
+/* Strict link priority scheduling mode setting */
+static void
+cmd_strict_link_prio_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_vf_tc_bw_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_set_tc_strict_prio(res->port_id, res->tc_map);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid tc_bitmap 0x%x\n", res->tc_map);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_strict_link_prio = {
+ .f = cmd_strict_link_prio_parsed,
+ .data = NULL,
+ .help_str = "set tx strict-link-priority <port_id> <tc_bitmap>",
+ .tokens = {
+ (void *)&cmd_vf_tc_bw_set,
+ (void *)&cmd_vf_tc_bw_tx,
+ (void *)&cmd_vf_tc_bw_strict_link_prio,
+ (void *)&cmd_vf_tc_bw_port_id,
+ (void *)&cmd_vf_tc_bw_tc_map,
+ NULL,
+ },
+};
+
+/* Load dynamic device personalization*/
+struct cmd_ddp_add_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t add;
+ uint8_t port_id;
+ char filepath[];
+};
+
+cmdline_parse_token_string_t cmd_ddp_add_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_add_add =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, add, "add");
+cmdline_parse_token_num_t cmd_ddp_add_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_add_result, port_id, UINT8);
+cmdline_parse_token_string_t cmd_ddp_add_filepath =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, filepath, NULL);
+
+static void
+cmd_ddp_add_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_add_result *res = parsed_result;
+ uint8_t *buff;
+ uint32_t size;
+ int ret = -ENOTSUP;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ buff = open_ddp_package_file(res->filepath, &size);
+ if (!buff)
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_process_ddp_package(res->port_id,
+ buff, size,
+ RTE_PMD_I40E_PKG_OP_WR_ADD);
+#endif
+
+ if (ret < 0)
+ printf("Failed to load profile.\n");
+ else if (ret > 0)
+ printf("Profile has already existed.\n");
+
+ close_ddp_package_file(buff);
+}
+
+cmdline_parse_inst_t cmd_ddp_add = {
+ .f = cmd_ddp_add_parsed,
+ .data = NULL,
+ .help_str = "ddp add <port_id> <profile_path>",
+ .tokens = {
+ (void *)&cmd_ddp_add_ddp,
+ (void *)&cmd_ddp_add_add,
+ (void *)&cmd_ddp_add_port_id,
+ (void *)&cmd_ddp_add_filepath,
+ NULL,
+ },
+};
+
+/* Get dynamic device personalization profile info list*/
+#define PROFILE_INFO_SIZE 48
+#define MAX_PROFILE_NUM 16
+
+struct cmd_ddp_get_list_result {
+ cmdline_fixed_string_t ddp;
+ cmdline_fixed_string_t get;
+ cmdline_fixed_string_t list;
+ uint8_t port_id;
+};
+
+cmdline_parse_token_string_t cmd_ddp_get_list_ddp =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, ddp, "ddp");
+cmdline_parse_token_string_t cmd_ddp_get_list_get =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, get, "get");
+cmdline_parse_token_string_t cmd_ddp_get_list_list =
+ TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, list, "list");
+cmdline_parse_token_num_t cmd_ddp_get_list_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ddp_get_list_result, port_id, UINT8);
+
+static void
+cmd_ddp_get_list_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ddp_get_list_result *res = parsed_result;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_profile_list *p_list;
+ struct rte_pmd_i40e_profile_info *p_info;
+ uint32_t p_num;
+ uint32_t size;
+ uint32_t i;
+#endif
+ int ret = -ENOTSUP;
+
+ if (res->port_id > nb_ports) {
+ printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+ return;
+ }
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4;
+ p_list = (struct rte_pmd_i40e_profile_list *)malloc(size);
+ if (!p_list)
+ printf("%s: Failed to malloc buffer\n", __func__);
+
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_get_ddp_list(res->port_id,
+ (uint8_t *)p_list, size);
+
+ if (!ret) {
+ p_num = p_list->p_count;
+ printf("Profile number is: %d\n\n", p_num);
+
+ for (i = 0; i < p_num; i++) {
+ p_info = &p_list->p_info[i];
+ printf("Profile %d:\n", i);
+ printf("Track id: 0x%x\n", p_info->track_id);
+ printf("Version: %d.%d.%d.%d\n",
+ p_info->version.major,
+ p_info->version.minor,
+ p_info->version.update,
+ p_info->version.draft);
+ printf("Profile name: %s\n\n", p_info->name);
+ }
+ }
+
+ free(p_list);
+#endif
+
+ if (ret < 0)
+ printf("Failed to get ddp list\n");
+}
+
+cmdline_parse_inst_t cmd_ddp_get_list = {
+ .f = cmd_ddp_get_list_parsed,
+ .data = NULL,
+ .help_str = "ddp get list <port_id>",
+ .tokens = {
+ (void *)&cmd_ddp_get_list_ddp,
+ (void *)&cmd_ddp_get_list_get,
+ (void *)&cmd_ddp_get_list_list,
+ (void *)&cmd_ddp_get_list_port_id,
+ NULL,
+ },
+};
+
+/* show vf stats */
+
+/* Common result structure for show vf stats */
+struct cmd_show_vf_stats_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t stats;
+ uint8_t port_id;
+ uint16_t vf_id;
+};
+
+/* Common CLI fields show vf stats*/
+cmdline_parse_token_string_t cmd_show_vf_stats_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_vf_stats_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_show_vf_stats_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_vf_stats_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_show_vf_stats_stats =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_show_vf_stats_result,
+ stats, "stats");
+cmdline_parse_token_num_t cmd_show_vf_stats_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_show_vf_stats_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_show_vf_stats_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_show_vf_stats_result,
+ vf_id, UINT16);
+
+static void
+cmd_show_vf_stats_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_show_vf_stats_result *res = parsed_result;
+ struct rte_eth_stats stats;
+ int ret = -ENOTSUP;
+ static const char *nic_stats_border = "########################";
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ memset(&stats, 0, sizeof(stats));
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_get_vf_stats(res->port_id,
+ res->vf_id,
+ &stats);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d\n", res->vf_id);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+
+ printf("\n %s NIC statistics for port %-2d vf %-2d %s\n",
+ nic_stats_border, res->port_id, res->vf_id, nic_stats_border);
+
+ printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
+ "%-"PRIu64"\n",
+ stats.ipackets, stats.imissed, stats.ibytes);
+ printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
+ printf(" RX-nombuf: %-10"PRIu64"\n",
+ stats.rx_nombuf);
+ printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
+ "%-"PRIu64"\n",
+ stats.opackets, stats.oerrors, stats.obytes);
+
+ printf(" %s############################%s\n",
+ nic_stats_border, nic_stats_border);
+}
+
+cmdline_parse_inst_t cmd_show_vf_stats = {
+ .f = cmd_show_vf_stats_parsed,
+ .data = NULL,
+ .help_str = "show vf stats <port_id> <vf_id>",
+ .tokens = {
+ (void *)&cmd_show_vf_stats_show,
+ (void *)&cmd_show_vf_stats_vf,
+ (void *)&cmd_show_vf_stats_stats,
+ (void *)&cmd_show_vf_stats_port_id,
+ (void *)&cmd_show_vf_stats_vf_id,
+ NULL,
+ },
+};
+
+/* clear vf stats */
+
+/* Common result structure for clear vf stats */
+struct cmd_clear_vf_stats_result {
+ cmdline_fixed_string_t clear;
+ cmdline_fixed_string_t vf;
+ cmdline_fixed_string_t stats;
+ uint8_t port_id;
+ uint16_t vf_id;
+};
+
+/* Common CLI fields clear vf stats*/
+cmdline_parse_token_string_t cmd_clear_vf_stats_clear =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_clear_vf_stats_result,
+ clear, "clear");
+cmdline_parse_token_string_t cmd_clear_vf_stats_vf =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_clear_vf_stats_result,
+ vf, "vf");
+cmdline_parse_token_string_t cmd_clear_vf_stats_stats =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_clear_vf_stats_result,
+ stats, "stats");
+cmdline_parse_token_num_t cmd_clear_vf_stats_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_clear_vf_stats_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_clear_vf_stats_vf_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_clear_vf_stats_result,
+ vf_id, UINT16);
+
+static void
+cmd_clear_vf_stats_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_clear_vf_stats_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_reset_vf_stats(res->port_id,
+ res->vf_id);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid vf_id %d\n", res->vf_id);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_clear_vf_stats = {
+ .f = cmd_clear_vf_stats_parsed,
+ .data = NULL,
+ .help_str = "clear vf stats <port_id> <vf_id>",
+ .tokens = {
+ (void *)&cmd_clear_vf_stats_clear,
+ (void *)&cmd_clear_vf_stats_vf,
+ (void *)&cmd_clear_vf_stats_stats,
+ (void *)&cmd_clear_vf_stats_port_id,
+ (void *)&cmd_clear_vf_stats_vf_id,
+ NULL,
+ },
+};
+
+/* ptype mapping get */
+
+/* Common result structure for ptype mapping get */
+struct cmd_ptype_mapping_get_result {
+ cmdline_fixed_string_t ptype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t get;
+ uint8_t port_id;
+ uint8_t valid_only;
+};
+
+/* Common CLI fields for ptype mapping get */
+cmdline_parse_token_string_t cmd_ptype_mapping_get_ptype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_get_result,
+ ptype, "ptype");
+cmdline_parse_token_string_t cmd_ptype_mapping_get_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_get_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_ptype_mapping_get_get =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_get_result,
+ get, "get");
+cmdline_parse_token_num_t cmd_ptype_mapping_get_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_get_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_ptype_mapping_get_valid_only =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_get_result,
+ valid_only, UINT8);
+
+static void
+cmd_ptype_mapping_get_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ptype_mapping_get_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ int max_ptype_num = 256;
+ struct rte_pmd_i40e_ptype_mapping mapping[max_ptype_num];
+ uint16_t count;
+ int i;
+#endif
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_ptype_mapping_get(res->port_id,
+ mapping,
+ max_ptype_num,
+ &count,
+ res->valid_only);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ if (!ret) {
+ for (i = 0; i < count; i++)
+ printf("%3d\t0x%08x\n",
+ mapping[i].hw_ptype, mapping[i].sw_ptype);
+ }
+#endif
+}
+
+cmdline_parse_inst_t cmd_ptype_mapping_get = {
+ .f = cmd_ptype_mapping_get_parsed,
+ .data = NULL,
+ .help_str = "ptype mapping get <port_id> <valid_only>",
+ .tokens = {
+ (void *)&cmd_ptype_mapping_get_ptype,
+ (void *)&cmd_ptype_mapping_get_mapping,
+ (void *)&cmd_ptype_mapping_get_get,
+ (void *)&cmd_ptype_mapping_get_port_id,
+ (void *)&cmd_ptype_mapping_get_valid_only,
+ NULL,
+ },
+};
+
+/* ptype mapping replace */
+
+/* Common result structure for ptype mapping replace */
+struct cmd_ptype_mapping_replace_result {
+ cmdline_fixed_string_t ptype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t replace;
+ uint8_t port_id;
+ uint32_t target;
+ uint8_t mask;
+ uint32_t pkt_type;
+};
+
+/* Common CLI fields for ptype mapping replace */
+cmdline_parse_token_string_t cmd_ptype_mapping_replace_ptype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ ptype, "ptype");
+cmdline_parse_token_string_t cmd_ptype_mapping_replace_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_ptype_mapping_replace_replace =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ replace, "replace");
+cmdline_parse_token_num_t cmd_ptype_mapping_replace_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_ptype_mapping_replace_target =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ target, UINT32);
+cmdline_parse_token_num_t cmd_ptype_mapping_replace_mask =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ mask, UINT8);
+cmdline_parse_token_num_t cmd_ptype_mapping_replace_pkt_type =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_replace_result,
+ pkt_type, UINT32);
+
+static void
+cmd_ptype_mapping_replace_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ptype_mapping_replace_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_ptype_mapping_replace(res->port_id,
+ res->target,
+ res->mask,
+ res->pkt_type);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid ptype 0x%8x or 0x%8x\n",
+ res->target, res->pkt_type);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_ptype_mapping_replace = {
+ .f = cmd_ptype_mapping_replace_parsed,
+ .data = NULL,
+ .help_str =
+ "ptype mapping replace <port_id> <target> <mask> <pkt_type>",
+ .tokens = {
+ (void *)&cmd_ptype_mapping_replace_ptype,
+ (void *)&cmd_ptype_mapping_replace_mapping,
+ (void *)&cmd_ptype_mapping_replace_replace,
+ (void *)&cmd_ptype_mapping_replace_port_id,
+ (void *)&cmd_ptype_mapping_replace_target,
+ (void *)&cmd_ptype_mapping_replace_mask,
+ (void *)&cmd_ptype_mapping_replace_pkt_type,
+ NULL,
+ },
+};
+
+/* ptype mapping reset */
+
+/* Common result structure for ptype mapping reset */
+struct cmd_ptype_mapping_reset_result {
+ cmdline_fixed_string_t ptype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t reset;
+ uint8_t port_id;
+};
+
+/* Common CLI fields for ptype mapping reset*/
+cmdline_parse_token_string_t cmd_ptype_mapping_reset_ptype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_reset_result,
+ ptype, "ptype");
+cmdline_parse_token_string_t cmd_ptype_mapping_reset_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_reset_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_ptype_mapping_reset_reset =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_reset_result,
+ reset, "reset");
+cmdline_parse_token_num_t cmd_ptype_mapping_reset_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_reset_result,
+ port_id, UINT8);
+
+static void
+cmd_ptype_mapping_reset_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ptype_mapping_reset_result *res = parsed_result;
+ int ret = -ENOTSUP;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ ret = rte_pmd_i40e_ptype_mapping_reset(res->port_id);
+#endif
+
+ switch (ret) {
+ case 0:
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_ptype_mapping_reset = {
+ .f = cmd_ptype_mapping_reset_parsed,
+ .data = NULL,
+ .help_str = "ptype mapping reset <port_id>",
+ .tokens = {
+ (void *)&cmd_ptype_mapping_reset_ptype,
+ (void *)&cmd_ptype_mapping_reset_mapping,
+ (void *)&cmd_ptype_mapping_reset_reset,
+ (void *)&cmd_ptype_mapping_reset_port_id,
+ NULL,
+ },
+};
+
+/* ptype mapping update */
+
+/* Common result structure for ptype mapping update */
+struct cmd_ptype_mapping_update_result {
+ cmdline_fixed_string_t ptype;
+ cmdline_fixed_string_t mapping;
+ cmdline_fixed_string_t reset;
+ uint8_t port_id;
+ uint8_t hw_ptype;
+ uint32_t sw_ptype;
+};
+
+/* Common CLI fields for ptype mapping update*/
+cmdline_parse_token_string_t cmd_ptype_mapping_update_ptype =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ ptype, "ptype");
+cmdline_parse_token_string_t cmd_ptype_mapping_update_mapping =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ mapping, "mapping");
+cmdline_parse_token_string_t cmd_ptype_mapping_update_update =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ reset, "update");
+cmdline_parse_token_num_t cmd_ptype_mapping_update_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ port_id, UINT8);
+cmdline_parse_token_num_t cmd_ptype_mapping_update_hw_ptype =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ hw_ptype, UINT8);
+cmdline_parse_token_num_t cmd_ptype_mapping_update_sw_ptype =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_ptype_mapping_update_result,
+ sw_ptype, UINT32);
+
+static void
+cmd_ptype_mapping_update_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_ptype_mapping_update_result *res = parsed_result;
+ int ret = -ENOTSUP;
+#ifdef RTE_LIBRTE_I40E_PMD
+ struct rte_pmd_i40e_ptype_mapping mapping;
+#endif
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+#ifdef RTE_LIBRTE_I40E_PMD
+ mapping.hw_ptype = res->hw_ptype;
+ mapping.sw_ptype = res->sw_ptype;
+ ret = rte_pmd_i40e_ptype_mapping_update(res->port_id,
+ &mapping,
+ 1,
+ 0);
#endif
+ switch (ret) {
+ case 0:
+ break;
+ case -EINVAL:
+ printf("invalid ptype 0x%8x\n", res->sw_ptype);
+ break;
+ case -ENODEV:
+ printf("invalid port_id %d\n", res->port_id);
+ break;
+ case -ENOTSUP:
+ printf("function not implemented\n");
+ break;
+ default:
+ printf("programming error: (%s)\n", strerror(-ret));
+ }
+}
+
+cmdline_parse_inst_t cmd_ptype_mapping_update = {
+ .f = cmd_ptype_mapping_update_parsed,
+ .data = NULL,
+ .help_str = "ptype mapping update <port_id> <hw_ptype> <sw_ptype>",
+ .tokens = {
+ (void *)&cmd_ptype_mapping_update_ptype,
+ (void *)&cmd_ptype_mapping_update_mapping,
+ (void *)&cmd_ptype_mapping_update_update,
+ (void *)&cmd_ptype_mapping_update_port_id,
+ (void *)&cmd_ptype_mapping_update_hw_ptype,
+ (void *)&cmd_ptype_mapping_update_sw_ptype,
+ NULL,
+ },
+};
+
+/* Common result structure for file commands */
+struct cmd_cmdfile_result {
+ cmdline_fixed_string_t load;
+ cmdline_fixed_string_t filename;
+};
+
+/* Common CLI fields for file commands */
+cmdline_parse_token_string_t cmd_load_cmdfile =
+ TOKEN_STRING_INITIALIZER(struct cmd_cmdfile_result, load, "load");
+cmdline_parse_token_string_t cmd_load_cmdfile_filename =
+ TOKEN_STRING_INITIALIZER(struct cmd_cmdfile_result, filename, NULL);
+
+static void
+cmd_load_from_file_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_cmdfile_result *res = parsed_result;
+
+ cmdline_read_from_file(res->filename);
+}
+
+cmdline_parse_inst_t cmd_load_from_file = {
+ .f = cmd_load_from_file_parsed,
+ .data = NULL,
+ .help_str = "load <filename>",
+ .tokens = {
+ (void *)&cmd_load_cmdfile,
+ (void *)&cmd_load_cmdfile_filename,
+ NULL,
+ },
+};
+
/* ******************************************************************************** */
/* list of instructions */
@@ -11442,6 +13572,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_help_brief,
(cmdline_parse_inst_t *)&cmd_help_long,
(cmdline_parse_inst_t *)&cmd_quit,
+ (cmdline_parse_inst_t *)&cmd_load_from_file,
(cmdline_parse_inst_t *)&cmd_showport,
(cmdline_parse_inst_t *)&cmd_showqueue,
(cmdline_parse_inst_t *)&cmd_showportall,
@@ -11537,15 +13668,11 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_burst,
(cmdline_parse_inst_t *)&cmd_config_thresh,
(cmdline_parse_inst_t *)&cmd_config_threshold,
- (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
(cmdline_parse_inst_t *)&cmd_set_uc_hash_filter,
(cmdline_parse_inst_t *)&cmd_set_uc_all_hash_filter,
(cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter,
(cmdline_parse_inst_t *)&cmd_set_vf_macvlan_filter,
- (cmdline_parse_inst_t *)&cmd_set_vf_traffic,
- (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
(cmdline_parse_inst_t *)&cmd_queue_rate_limit,
- (cmdline_parse_inst_t *)&cmd_vf_rate_limit,
(cmdline_parse_inst_t *)&cmd_tunnel_filter,
(cmdline_parse_inst_t *)&cmd_tunnel_udp_config,
(cmdline_parse_inst_t *)&cmd_global_config,
@@ -11580,6 +13707,7 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_set_hash_global_config,
(cmdline_parse_inst_t *)&cmd_set_hash_input_set,
(cmdline_parse_inst_t *)&cmd_set_fdir_input_set,
+ (cmdline_parse_inst_t *)&cmd_flow,
(cmdline_parse_inst_t *)&cmd_mcast_addr,
(cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_all,
(cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_specific,
@@ -11591,19 +13719,65 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_config_e_tag_forwarding_en_dis,
(cmdline_parse_inst_t *)&cmd_config_e_tag_filter_add,
(cmdline_parse_inst_t *)&cmd_config_e_tag_filter_del,
-#ifdef RTE_LIBRTE_IXGBE_PMD
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_anti_spoof,
(cmdline_parse_inst_t *)&cmd_set_vf_mac_anti_spoof,
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq,
(cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert,
(cmdline_parse_inst_t *)&cmd_set_tx_loopback,
+#ifdef RTE_LIBRTE_IXGBE_PMD
(cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en,
(cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en,
- (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+ (cmdline_parse_inst_t *)&cmd_set_macsec_offload_on,
+ (cmdline_parse_inst_t *)&cmd_set_macsec_offload_off,
+ (cmdline_parse_inst_t *)&cmd_set_macsec_sc,
+ (cmdline_parse_inst_t *)&cmd_set_macsec_sa,
+ (cmdline_parse_inst_t *)&cmd_set_vf_rxmode,
+ (cmdline_parse_inst_t *)&cmd_set_vf_traffic,
+ (cmdline_parse_inst_t *)&cmd_vf_rate_limit,
#endif
+ (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter,
+ (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr,
+ (cmdline_parse_inst_t *)&cmd_set_vf_promisc,
+ (cmdline_parse_inst_t *)&cmd_set_vf_allmulti,
+ (cmdline_parse_inst_t *)&cmd_set_vf_broadcast,
+ (cmdline_parse_inst_t *)&cmd_set_vf_vlan_tag,
+ (cmdline_parse_inst_t *)&cmd_vf_max_bw,
+ (cmdline_parse_inst_t *)&cmd_vf_tc_min_bw,
+ (cmdline_parse_inst_t *)&cmd_vf_tc_max_bw,
+ (cmdline_parse_inst_t *)&cmd_strict_link_prio,
+ (cmdline_parse_inst_t *)&cmd_tc_min_bw,
+ (cmdline_parse_inst_t *)&cmd_ddp_add,
+ (cmdline_parse_inst_t *)&cmd_ddp_get_list,
+ (cmdline_parse_inst_t *)&cmd_show_vf_stats,
+ (cmdline_parse_inst_t *)&cmd_clear_vf_stats,
+ (cmdline_parse_inst_t *)&cmd_ptype_mapping_get,
+ (cmdline_parse_inst_t *)&cmd_ptype_mapping_replace,
+ (cmdline_parse_inst_t *)&cmd_ptype_mapping_reset,
+ (cmdline_parse_inst_t *)&cmd_ptype_mapping_update,
NULL,
};
+/* read cmdline commands from file */
+void
+cmdline_read_from_file(const char *filename)
+{
+ struct cmdline *cl;
+
+ cl = cmdline_file_new(main_ctx, "testpmd> ", filename);
+ if (cl == NULL) {
+ printf("Failed to create file based cmdline context: %s\n",
+ filename);
+ return;
+ }
+
+ cmdline_interact(cl);
+ cmdline_quit(cl);
+
+ cmdline_free(cl);
+
+ printf("Read CLI commands from %s\n", filename);
+}
+
/* prompt function, called from main on MASTER lcore */
void
prompt(void)
@@ -11632,7 +13806,7 @@ cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue)
if (id == (portid_t)RTE_PORT_ALL) {
portid_t pid;
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
/* check if need_reconfig has been set to 1 */
if (ports[pid].need_reconfig == 0)
ports[pid].need_reconfig = dev;
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
new file mode 100644
index 00000000..0fd69f90
--- /dev/null
+++ b/app/test-pmd/cmdline_flow.c
@@ -0,0 +1,2812 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <ctype.h>
+#include <string.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_byteorder.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+#include <rte_flow.h>
+
+#include "testpmd.h"
+
+/** Parser token indices. */
+enum index {
+ /* Special tokens. */
+ ZERO = 0,
+ END,
+
+ /* Common tokens. */
+ INTEGER,
+ UNSIGNED,
+ PREFIX,
+ BOOLEAN,
+ STRING,
+ MAC_ADDR,
+ IPV4_ADDR,
+ IPV6_ADDR,
+ RULE_ID,
+ PORT_ID,
+ GROUP_ID,
+ PRIORITY_LEVEL,
+
+ /* Top-level command. */
+ FLOW,
+
+ /* Sub-level commands. */
+ VALIDATE,
+ CREATE,
+ DESTROY,
+ FLUSH,
+ QUERY,
+ LIST,
+
+ /* Destroy arguments. */
+ DESTROY_RULE,
+
+ /* Query arguments. */
+ QUERY_ACTION,
+
+ /* List arguments. */
+ LIST_GROUP,
+
+ /* Validate/create arguments. */
+ GROUP,
+ PRIORITY,
+ INGRESS,
+ EGRESS,
+
+ /* Validate/create pattern. */
+ PATTERN,
+ ITEM_PARAM_IS,
+ ITEM_PARAM_SPEC,
+ ITEM_PARAM_LAST,
+ ITEM_PARAM_MASK,
+ ITEM_PARAM_PREFIX,
+ ITEM_NEXT,
+ ITEM_END,
+ ITEM_VOID,
+ ITEM_INVERT,
+ ITEM_ANY,
+ ITEM_ANY_NUM,
+ ITEM_PF,
+ ITEM_VF,
+ ITEM_VF_ID,
+ ITEM_PORT,
+ ITEM_PORT_INDEX,
+ ITEM_RAW,
+ ITEM_RAW_RELATIVE,
+ ITEM_RAW_SEARCH,
+ ITEM_RAW_OFFSET,
+ ITEM_RAW_LIMIT,
+ ITEM_RAW_PATTERN,
+ ITEM_ETH,
+ ITEM_ETH_DST,
+ ITEM_ETH_SRC,
+ ITEM_ETH_TYPE,
+ ITEM_VLAN,
+ ITEM_VLAN_TPID,
+ ITEM_VLAN_TCI,
+ ITEM_VLAN_PCP,
+ ITEM_VLAN_DEI,
+ ITEM_VLAN_VID,
+ ITEM_IPV4,
+ ITEM_IPV4_TOS,
+ ITEM_IPV4_TTL,
+ ITEM_IPV4_PROTO,
+ ITEM_IPV4_SRC,
+ ITEM_IPV4_DST,
+ ITEM_IPV6,
+ ITEM_IPV6_TC,
+ ITEM_IPV6_FLOW,
+ ITEM_IPV6_PROTO,
+ ITEM_IPV6_HOP,
+ ITEM_IPV6_SRC,
+ ITEM_IPV6_DST,
+ ITEM_ICMP,
+ ITEM_ICMP_TYPE,
+ ITEM_ICMP_CODE,
+ ITEM_UDP,
+ ITEM_UDP_SRC,
+ ITEM_UDP_DST,
+ ITEM_TCP,
+ ITEM_TCP_SRC,
+ ITEM_TCP_DST,
+ ITEM_SCTP,
+ ITEM_SCTP_SRC,
+ ITEM_SCTP_DST,
+ ITEM_SCTP_TAG,
+ ITEM_SCTP_CKSUM,
+ ITEM_VXLAN,
+ ITEM_VXLAN_VNI,
+ ITEM_E_TAG,
+ ITEM_E_TAG_GRP_ECID_B,
+ ITEM_NVGRE,
+ ITEM_NVGRE_TNI,
+ ITEM_MPLS,
+ ITEM_MPLS_LABEL,
+ ITEM_GRE,
+ ITEM_GRE_PROTO,
+
+ /* Validate/create actions. */
+ ACTIONS,
+ ACTION_NEXT,
+ ACTION_END,
+ ACTION_VOID,
+ ACTION_PASSTHRU,
+ ACTION_MARK,
+ ACTION_MARK_ID,
+ ACTION_FLAG,
+ ACTION_QUEUE,
+ ACTION_QUEUE_INDEX,
+ ACTION_DROP,
+ ACTION_COUNT,
+ ACTION_DUP,
+ ACTION_DUP_INDEX,
+ ACTION_RSS,
+ ACTION_RSS_QUEUES,
+ ACTION_RSS_QUEUE,
+ ACTION_PF,
+ ACTION_VF,
+ ACTION_VF_ORIGINAL,
+ ACTION_VF_ID,
+};
+
+/** Size of pattern[] field in struct rte_flow_item_raw. */
+#define ITEM_RAW_PATTERN_SIZE 36
+
+/** Storage size for struct rte_flow_item_raw including pattern. */
+#define ITEM_RAW_SIZE \
+ (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
+
+/** Number of queue[] entries in struct rte_flow_action_rss. */
+#define ACTION_RSS_NUM 32
+
+/** Storage size for struct rte_flow_action_rss including queues. */
+#define ACTION_RSS_SIZE \
+ (offsetof(struct rte_flow_action_rss, queue) + \
+ sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
+
+/** Maximum number of subsequent tokens and arguments on the stack. */
+#define CTX_STACK_SIZE 16
+
+/** Parser context. */
+struct context {
+ /** Stack of subsequent token lists to process. */
+ const enum index *next[CTX_STACK_SIZE];
+ /** Arguments for stacked tokens. */
+ const void *args[CTX_STACK_SIZE];
+ enum index curr; /**< Current token index. */
+ enum index prev; /**< Index of the last token seen. */
+ int next_num; /**< Number of entries in next[]. */
+ int args_num; /**< Number of entries in args[]. */
+ uint32_t reparse:1; /**< Start over from the beginning. */
+ uint32_t eol:1; /**< EOL has been detected. */
+ uint32_t last:1; /**< No more arguments. */
+ uint16_t port; /**< Current port ID (for completions). */
+ uint32_t objdata; /**< Object-specific data. */
+ void *object; /**< Address of current object for relative offsets. */
+ void *objmask; /**< Object a full mask must be written to. */
+};
+
+/** Token argument. */
+struct arg {
+ uint32_t hton:1; /**< Use network byte ordering. */
+ uint32_t sign:1; /**< Value is signed. */
+ uint32_t offset; /**< Relative offset from ctx->object. */
+ uint32_t size; /**< Field size. */
+ const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
+};
+
+/** Parser token definition. */
+struct token {
+ /** Type displayed during completion (defaults to "TOKEN"). */
+ const char *type;
+ /** Help displayed during completion (defaults to token name). */
+ const char *help;
+ /** Private data used by parser functions. */
+ const void *priv;
+ /**
+ * Lists of subsequent tokens to push on the stack. Each call to the
+ * parser consumes the last entry of that stack.
+ */
+ const enum index *const *next;
+ /** Arguments stack for subsequent tokens that need them. */
+ const struct arg *const *args;
+ /**
+ * Token-processing callback, returns -1 in case of error, the
+ * length of the matched string otherwise. If NULL, attempts to
+ * match the token name.
+ *
+ * If buf is not NULL, the result should be stored in it according
+ * to context. An error is returned if not large enough.
+ */
+ int (*call)(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size);
+ /**
+ * Callback that provides possible values for this token, used for
+ * completion. Returns -1 in case of error, the number of possible
+ * values otherwise. If NULL, the token name is used.
+ *
+ * If buf is not NULL, entry index ent is written to buf and the
+ * full length of the entry is returned (same behavior as
+ * snprintf()).
+ */
+ int (*comp)(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size);
+ /** Mandatory token name, no default value. */
+ const char *name;
+};
+
+/** Static initializer for the next field. */
+#define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
+
+/** Static initializer for a NEXT() entry. */
+#define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
+
+/** Static initializer for the args field. */
+#define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
+
+/** Static initializer for ARGS() to target a field. */
+#define ARGS_ENTRY(s, f) \
+ (&(const struct arg){ \
+ .offset = offsetof(s, f), \
+ .size = sizeof(((s *)0)->f), \
+ })
+
+/** Static initializer for ARGS() to target a bit-field. */
+#define ARGS_ENTRY_BF(s, f, b) \
+ (&(const struct arg){ \
+ .size = sizeof(s), \
+ .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
+ })
+
+/** Static initializer for ARGS() to target an arbitrary bit-mask. */
+#define ARGS_ENTRY_MASK(s, f, m) \
+ (&(const struct arg){ \
+ .offset = offsetof(s, f), \
+ .size = sizeof(((s *)0)->f), \
+ .mask = (const void *)(m), \
+ })
+
+/** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
+#define ARGS_ENTRY_MASK_HTON(s, f, m) \
+ (&(const struct arg){ \
+ .hton = 1, \
+ .offset = offsetof(s, f), \
+ .size = sizeof(((s *)0)->f), \
+ .mask = (const void *)(m), \
+ })
+
+/** Static initializer for ARGS() to target a pointer. */
+#define ARGS_ENTRY_PTR(s, f) \
+ (&(const struct arg){ \
+ .size = sizeof(*((s *)0)->f), \
+ })
+
+/** Static initializer for ARGS() with arbitrary size. */
+#define ARGS_ENTRY_USZ(s, f, sz) \
+ (&(const struct arg){ \
+ .offset = offsetof(s, f), \
+ .size = (sz), \
+ })
+
+/** Same as ARGS_ENTRY() using network byte ordering. */
+#define ARGS_ENTRY_HTON(s, f) \
+ (&(const struct arg){ \
+ .hton = 1, \
+ .offset = offsetof(s, f), \
+ .size = sizeof(((s *)0)->f), \
+ })
+
+/** Parser output buffer layout expected by cmd_flow_parsed(). */
+struct buffer {
+ enum index command; /**< Flow command. */
+ uint16_t port; /**< Affected port ID. */
+ union {
+ struct {
+ struct rte_flow_attr attr;
+ struct rte_flow_item *pattern;
+ struct rte_flow_action *actions;
+ uint32_t pattern_n;
+ uint32_t actions_n;
+ uint8_t *data;
+ } vc; /**< Validate/create arguments. */
+ struct {
+ uint32_t *rule;
+ uint32_t rule_n;
+ } destroy; /**< Destroy arguments. */
+ struct {
+ uint32_t rule;
+ enum rte_flow_action_type action;
+ } query; /**< Query arguments. */
+ struct {
+ uint32_t *group;
+ uint32_t group_n;
+ } list; /**< List arguments. */
+ } args; /**< Command arguments. */
+};
+
+/** Private data for pattern items. */
+struct parse_item_priv {
+ enum rte_flow_item_type type; /**< Item type. */
+ uint32_t size; /**< Size of item specification structure. */
+};
+
+#define PRIV_ITEM(t, s) \
+ (&(const struct parse_item_priv){ \
+ .type = RTE_FLOW_ITEM_TYPE_ ## t, \
+ .size = s, \
+ })
+
+/** Private data for actions. */
+struct parse_action_priv {
+ enum rte_flow_action_type type; /**< Action type. */
+ uint32_t size; /**< Size of action configuration structure. */
+};
+
+#define PRIV_ACTION(t, s) \
+ (&(const struct parse_action_priv){ \
+ .type = RTE_FLOW_ACTION_TYPE_ ## t, \
+ .size = s, \
+ })
+
+static const enum index next_vc_attr[] = {
+ GROUP,
+ PRIORITY,
+ INGRESS,
+ EGRESS,
+ PATTERN,
+ ZERO,
+};
+
+static const enum index next_destroy_attr[] = {
+ DESTROY_RULE,
+ END,
+ ZERO,
+};
+
+static const enum index next_list_attr[] = {
+ LIST_GROUP,
+ END,
+ ZERO,
+};
+
+static const enum index item_param[] = {
+ ITEM_PARAM_IS,
+ ITEM_PARAM_SPEC,
+ ITEM_PARAM_LAST,
+ ITEM_PARAM_MASK,
+ ITEM_PARAM_PREFIX,
+ ZERO,
+};
+
+static const enum index next_item[] = {
+ ITEM_END,
+ ITEM_VOID,
+ ITEM_INVERT,
+ ITEM_ANY,
+ ITEM_PF,
+ ITEM_VF,
+ ITEM_PORT,
+ ITEM_RAW,
+ ITEM_ETH,
+ ITEM_VLAN,
+ ITEM_IPV4,
+ ITEM_IPV6,
+ ITEM_ICMP,
+ ITEM_UDP,
+ ITEM_TCP,
+ ITEM_SCTP,
+ ITEM_VXLAN,
+ ITEM_E_TAG,
+ ITEM_NVGRE,
+ ITEM_MPLS,
+ ITEM_GRE,
+ ZERO,
+};
+
+static const enum index item_any[] = {
+ ITEM_ANY_NUM,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_vf[] = {
+ ITEM_VF_ID,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_port[] = {
+ ITEM_PORT_INDEX,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_raw[] = {
+ ITEM_RAW_RELATIVE,
+ ITEM_RAW_SEARCH,
+ ITEM_RAW_OFFSET,
+ ITEM_RAW_LIMIT,
+ ITEM_RAW_PATTERN,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_eth[] = {
+ ITEM_ETH_DST,
+ ITEM_ETH_SRC,
+ ITEM_ETH_TYPE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_vlan[] = {
+ ITEM_VLAN_TPID,
+ ITEM_VLAN_TCI,
+ ITEM_VLAN_PCP,
+ ITEM_VLAN_DEI,
+ ITEM_VLAN_VID,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_ipv4[] = {
+ ITEM_IPV4_TOS,
+ ITEM_IPV4_TTL,
+ ITEM_IPV4_PROTO,
+ ITEM_IPV4_SRC,
+ ITEM_IPV4_DST,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_ipv6[] = {
+ ITEM_IPV6_TC,
+ ITEM_IPV6_FLOW,
+ ITEM_IPV6_PROTO,
+ ITEM_IPV6_HOP,
+ ITEM_IPV6_SRC,
+ ITEM_IPV6_DST,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp[] = {
+ ITEM_ICMP_TYPE,
+ ITEM_ICMP_CODE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_udp[] = {
+ ITEM_UDP_SRC,
+ ITEM_UDP_DST,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_tcp[] = {
+ ITEM_TCP_SRC,
+ ITEM_TCP_DST,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_sctp[] = {
+ ITEM_SCTP_SRC,
+ ITEM_SCTP_DST,
+ ITEM_SCTP_TAG,
+ ITEM_SCTP_CKSUM,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_vxlan[] = {
+ ITEM_VXLAN_VNI,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_e_tag[] = {
+ ITEM_E_TAG_GRP_ECID_B,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_nvgre[] = {
+ ITEM_NVGRE_TNI,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_mpls[] = {
+ ITEM_MPLS_LABEL,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_gre[] = {
+ ITEM_GRE_PROTO,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index next_action[] = {
+ ACTION_END,
+ ACTION_VOID,
+ ACTION_PASSTHRU,
+ ACTION_MARK,
+ ACTION_FLAG,
+ ACTION_QUEUE,
+ ACTION_DROP,
+ ACTION_COUNT,
+ ACTION_DUP,
+ ACTION_RSS,
+ ACTION_PF,
+ ACTION_VF,
+ ZERO,
+};
+
+static const enum index action_mark[] = {
+ ACTION_MARK_ID,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_queue[] = {
+ ACTION_QUEUE_INDEX,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_dup[] = {
+ ACTION_DUP_INDEX,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_rss[] = {
+ ACTION_RSS_QUEUES,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_vf[] = {
+ ACTION_VF_ORIGINAL,
+ ACTION_VF_ID,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static int parse_init(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_vc(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_vc_spec(struct context *, const struct token *,
+ const char *, unsigned int, void *, unsigned int);
+static int parse_vc_conf(struct context *, const struct token *,
+ const char *, unsigned int, void *, unsigned int);
+static int parse_vc_action_rss_queue(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_destroy(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_flush(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_query(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_action(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_list(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_int(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_prefix(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_boolean(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_string(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_mac_addr(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_ipv4_addr(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_ipv6_addr(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int parse_port(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
+static int comp_none(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+static int comp_boolean(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+static int comp_action(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+static int comp_port(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+static int comp_rule_id(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+static int comp_vc_action_rss_queue(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
+
+/** Token definitions. */
+static const struct token token_list[] = {
+ /* Special tokens. */
+ [ZERO] = {
+ .name = "ZERO",
+ .help = "null entry, abused as the entry point",
+ .next = NEXT(NEXT_ENTRY(FLOW)),
+ },
+ [END] = {
+ .name = "",
+ .type = "RETURN",
+ .help = "command may end here",
+ },
+ /* Common tokens. */
+ [INTEGER] = {
+ .name = "{int}",
+ .type = "INTEGER",
+ .help = "integer value",
+ .call = parse_int,
+ .comp = comp_none,
+ },
+ [UNSIGNED] = {
+ .name = "{unsigned}",
+ .type = "UNSIGNED",
+ .help = "unsigned integer value",
+ .call = parse_int,
+ .comp = comp_none,
+ },
+ [PREFIX] = {
+ .name = "{prefix}",
+ .type = "PREFIX",
+ .help = "prefix length for bit-mask",
+ .call = parse_prefix,
+ .comp = comp_none,
+ },
+ [BOOLEAN] = {
+ .name = "{boolean}",
+ .type = "BOOLEAN",
+ .help = "any boolean value",
+ .call = parse_boolean,
+ .comp = comp_boolean,
+ },
+ [STRING] = {
+ .name = "{string}",
+ .type = "STRING",
+ .help = "fixed string",
+ .call = parse_string,
+ .comp = comp_none,
+ },
+ [MAC_ADDR] = {
+ .name = "{MAC address}",
+ .type = "MAC-48",
+ .help = "standard MAC address notation",
+ .call = parse_mac_addr,
+ .comp = comp_none,
+ },
+ [IPV4_ADDR] = {
+ .name = "{IPv4 address}",
+ .type = "IPV4 ADDRESS",
+ .help = "standard IPv4 address notation",
+ .call = parse_ipv4_addr,
+ .comp = comp_none,
+ },
+ [IPV6_ADDR] = {
+ .name = "{IPv6 address}",
+ .type = "IPV6 ADDRESS",
+ .help = "standard IPv6 address notation",
+ .call = parse_ipv6_addr,
+ .comp = comp_none,
+ },
+ [RULE_ID] = {
+ .name = "{rule id}",
+ .type = "RULE ID",
+ .help = "rule identifier",
+ .call = parse_int,
+ .comp = comp_rule_id,
+ },
+ [PORT_ID] = {
+ .name = "{port_id}",
+ .type = "PORT ID",
+ .help = "port identifier",
+ .call = parse_port,
+ .comp = comp_port,
+ },
+ [GROUP_ID] = {
+ .name = "{group_id}",
+ .type = "GROUP ID",
+ .help = "group identifier",
+ .call = parse_int,
+ .comp = comp_none,
+ },
+ [PRIORITY_LEVEL] = {
+ .name = "{level}",
+ .type = "PRIORITY",
+ .help = "priority level",
+ .call = parse_int,
+ .comp = comp_none,
+ },
+ /* Top-level command. */
+ [FLOW] = {
+ .name = "flow",
+ .type = "{command} {port_id} [{arg} [...]]",
+ .help = "manage ingress/egress flow rules",
+ .next = NEXT(NEXT_ENTRY
+ (VALIDATE,
+ CREATE,
+ DESTROY,
+ FLUSH,
+ LIST,
+ QUERY)),
+ .call = parse_init,
+ },
+ /* Sub-level commands. */
+ [VALIDATE] = {
+ .name = "validate",
+ .help = "check whether a flow rule can be created",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_vc,
+ },
+ [CREATE] = {
+ .name = "create",
+ .help = "create a flow rule",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_vc,
+ },
+ [DESTROY] = {
+ .name = "destroy",
+ .help = "destroy specific flow rules",
+ .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_destroy,
+ },
+ [FLUSH] = {
+ .name = "flush",
+ .help = "destroy all flow rules",
+ .next = NEXT(NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_flush,
+ },
+ [QUERY] = {
+ .name = "query",
+ .help = "query an existing flow rule",
+ .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
+ NEXT_ENTRY(RULE_ID),
+ NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
+ ARGS_ENTRY(struct buffer, args.query.rule),
+ ARGS_ENTRY(struct buffer, port)),
+ .call = parse_query,
+ },
+ [LIST] = {
+ .name = "list",
+ .help = "list existing flow rules",
+ .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_list,
+ },
+ /* Destroy arguments. */
+ [DESTROY_RULE] = {
+ .name = "rule",
+ .help = "specify a rule identifier",
+ .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
+ .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
+ .call = parse_destroy,
+ },
+ /* Query arguments. */
+ [QUERY_ACTION] = {
+ .name = "{action}",
+ .type = "ACTION",
+ .help = "action to query, must be part of the rule",
+ .call = parse_action,
+ .comp = comp_action,
+ },
+ /* List arguments. */
+ [LIST_GROUP] = {
+ .name = "group",
+ .help = "specify a group",
+ .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
+ .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
+ .call = parse_list,
+ },
+ /* Validate/create attributes. */
+ [GROUP] = {
+ .name = "group",
+ .help = "specify a group",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
+ .call = parse_vc,
+ },
+ [PRIORITY] = {
+ .name = "priority",
+ .help = "specify a priority level",
+ .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
+ .call = parse_vc,
+ },
+ [INGRESS] = {
+ .name = "ingress",
+ .help = "affect rule to ingress",
+ .next = NEXT(next_vc_attr),
+ .call = parse_vc,
+ },
+ [EGRESS] = {
+ .name = "egress",
+ .help = "affect rule to egress",
+ .next = NEXT(next_vc_attr),
+ .call = parse_vc,
+ },
+ /* Validate/create pattern. */
+ [PATTERN] = {
+ .name = "pattern",
+ .help = "submit a list of pattern items",
+ .next = NEXT(next_item),
+ .call = parse_vc,
+ },
+ [ITEM_PARAM_IS] = {
+ .name = "is",
+ .help = "match value perfectly (with full bit-mask)",
+ .call = parse_vc_spec,
+ },
+ [ITEM_PARAM_SPEC] = {
+ .name = "spec",
+ .help = "match value according to configured bit-mask",
+ .call = parse_vc_spec,
+ },
+ [ITEM_PARAM_LAST] = {
+ .name = "last",
+ .help = "specify upper bound to establish a range",
+ .call = parse_vc_spec,
+ },
+ [ITEM_PARAM_MASK] = {
+ .name = "mask",
+ .help = "specify bit-mask with relevant bits set to one",
+ .call = parse_vc_spec,
+ },
+ [ITEM_PARAM_PREFIX] = {
+ .name = "prefix",
+ .help = "generate bit-mask from a prefix length",
+ .call = parse_vc_spec,
+ },
+ [ITEM_NEXT] = {
+ .name = "/",
+ .help = "specify next pattern item",
+ .next = NEXT(next_item),
+ },
+ [ITEM_END] = {
+ .name = "end",
+ .help = "end list of pattern items",
+ .priv = PRIV_ITEM(END, 0),
+ .next = NEXT(NEXT_ENTRY(ACTIONS)),
+ .call = parse_vc,
+ },
+ [ITEM_VOID] = {
+ .name = "void",
+ .help = "no-op pattern item",
+ .priv = PRIV_ITEM(VOID, 0),
+ .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
+ .call = parse_vc,
+ },
+ [ITEM_INVERT] = {
+ .name = "invert",
+ .help = "perform actions when pattern does not match",
+ .priv = PRIV_ITEM(INVERT, 0),
+ .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
+ .call = parse_vc,
+ },
+ [ITEM_ANY] = {
+ .name = "any",
+ .help = "match any protocol for the current layer",
+ .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
+ .next = NEXT(item_any),
+ .call = parse_vc,
+ },
+ [ITEM_ANY_NUM] = {
+ .name = "num",
+ .help = "number of layers covered",
+ .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
+ },
+ [ITEM_PF] = {
+ .name = "pf",
+ .help = "match packets addressed to the physical function",
+ .priv = PRIV_ITEM(PF, 0),
+ .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
+ .call = parse_vc,
+ },
+ [ITEM_VF] = {
+ .name = "vf",
+ .help = "match packets addressed to a virtual function ID",
+ .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
+ .next = NEXT(item_vf),
+ .call = parse_vc,
+ },
+ [ITEM_VF_ID] = {
+ .name = "id",
+ .help = "destination VF ID",
+ .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
+ },
+ [ITEM_PORT] = {
+ .name = "port",
+ .help = "device-specific physical port index to use",
+ .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
+ .next = NEXT(item_port),
+ .call = parse_vc,
+ },
+ [ITEM_PORT_INDEX] = {
+ .name = "index",
+ .help = "physical port index",
+ .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
+ },
+ [ITEM_RAW] = {
+ .name = "raw",
+ .help = "match an arbitrary byte string",
+ .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
+ .next = NEXT(item_raw),
+ .call = parse_vc,
+ },
+ [ITEM_RAW_RELATIVE] = {
+ .name = "relative",
+ .help = "look for pattern after the previous item",
+ .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
+ relative, 1)),
+ },
+ [ITEM_RAW_SEARCH] = {
+ .name = "search",
+ .help = "search pattern from offset (see also limit)",
+ .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
+ search, 1)),
+ },
+ [ITEM_RAW_OFFSET] = {
+ .name = "offset",
+ .help = "absolute or relative offset for pattern",
+ .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
+ },
+ [ITEM_RAW_LIMIT] = {
+ .name = "limit",
+ .help = "search area limit for start of pattern",
+ .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
+ },
+ [ITEM_RAW_PATTERN] = {
+ .name = "pattern",
+ .help = "byte string to look for",
+ .next = NEXT(item_raw,
+ NEXT_ENTRY(STRING),
+ NEXT_ENTRY(ITEM_PARAM_IS,
+ ITEM_PARAM_SPEC,
+ ITEM_PARAM_MASK)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
+ ARGS_ENTRY_USZ(struct rte_flow_item_raw,
+ pattern,
+ ITEM_RAW_PATTERN_SIZE)),
+ },
+ [ITEM_ETH] = {
+ .name = "eth",
+ .help = "match Ethernet header",
+ .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
+ .next = NEXT(item_eth),
+ .call = parse_vc,
+ },
+ [ITEM_ETH_DST] = {
+ .name = "dst",
+ .help = "destination MAC",
+ .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
+ },
+ [ITEM_ETH_SRC] = {
+ .name = "src",
+ .help = "source MAC",
+ .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
+ },
+ [ITEM_ETH_TYPE] = {
+ .name = "type",
+ .help = "EtherType",
+ .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
+ },
+ [ITEM_VLAN] = {
+ .name = "vlan",
+ .help = "match 802.1Q/ad VLAN tag",
+ .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
+ .next = NEXT(item_vlan),
+ .call = parse_vc,
+ },
+ [ITEM_VLAN_TPID] = {
+ .name = "tpid",
+ .help = "tag protocol identifier",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
+ },
+ [ITEM_VLAN_TCI] = {
+ .name = "tci",
+ .help = "tag control information",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
+ },
+ [ITEM_VLAN_PCP] = {
+ .name = "pcp",
+ .help = "priority code point",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
+ tci, "\xe0\x00")),
+ },
+ [ITEM_VLAN_DEI] = {
+ .name = "dei",
+ .help = "drop eligible indicator",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
+ tci, "\x10\x00")),
+ },
+ [ITEM_VLAN_VID] = {
+ .name = "vid",
+ .help = "VLAN identifier",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
+ tci, "\x0f\xff")),
+ },
+ [ITEM_IPV4] = {
+ .name = "ipv4",
+ .help = "match IPv4 header",
+ .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
+ .next = NEXT(item_ipv4),
+ .call = parse_vc,
+ },
+ [ITEM_IPV4_TOS] = {
+ .name = "tos",
+ .help = "type of service",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.type_of_service)),
+ },
+ [ITEM_IPV4_TTL] = {
+ .name = "ttl",
+ .help = "time to live",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.time_to_live)),
+ },
+ [ITEM_IPV4_PROTO] = {
+ .name = "proto",
+ .help = "next protocol ID",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.next_proto_id)),
+ },
+ [ITEM_IPV4_SRC] = {
+ .name = "src",
+ .help = "source address",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.src_addr)),
+ },
+ [ITEM_IPV4_DST] = {
+ .name = "dst",
+ .help = "destination address",
+ .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
+ hdr.dst_addr)),
+ },
+ [ITEM_IPV6] = {
+ .name = "ipv6",
+ .help = "match IPv6 header",
+ .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
+ .next = NEXT(item_ipv6),
+ .call = parse_vc,
+ },
+ [ITEM_IPV6_TC] = {
+ .name = "tc",
+ .help = "traffic class",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
+ hdr.vtc_flow,
+ "\x0f\xf0\x00\x00")),
+ },
+ [ITEM_IPV6_FLOW] = {
+ .name = "flow",
+ .help = "flow label",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
+ hdr.vtc_flow,
+ "\x00\x0f\xff\xff")),
+ },
+ [ITEM_IPV6_PROTO] = {
+ .name = "proto",
+ .help = "protocol (next header)",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
+ hdr.proto)),
+ },
+ [ITEM_IPV6_HOP] = {
+ .name = "hop",
+ .help = "hop limit",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
+ hdr.hop_limits)),
+ },
+ [ITEM_IPV6_SRC] = {
+ .name = "src",
+ .help = "source address",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
+ hdr.src_addr)),
+ },
+ [ITEM_IPV6_DST] = {
+ .name = "dst",
+ .help = "destination address",
+ .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
+ hdr.dst_addr)),
+ },
+ [ITEM_ICMP] = {
+ .name = "icmp",
+ .help = "match ICMP header",
+ .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
+ .next = NEXT(item_icmp),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP_TYPE] = {
+ .name = "type",
+ .help = "ICMP packet type",
+ .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
+ hdr.icmp_type)),
+ },
+ [ITEM_ICMP_CODE] = {
+ .name = "code",
+ .help = "ICMP packet code",
+ .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
+ hdr.icmp_code)),
+ },
+ [ITEM_UDP] = {
+ .name = "udp",
+ .help = "match UDP header",
+ .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
+ .next = NEXT(item_udp),
+ .call = parse_vc,
+ },
+ [ITEM_UDP_SRC] = {
+ .name = "src",
+ .help = "UDP source port",
+ .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
+ hdr.src_port)),
+ },
+ [ITEM_UDP_DST] = {
+ .name = "dst",
+ .help = "UDP destination port",
+ .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
+ hdr.dst_port)),
+ },
+ [ITEM_TCP] = {
+ .name = "tcp",
+ .help = "match TCP header",
+ .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
+ .next = NEXT(item_tcp),
+ .call = parse_vc,
+ },
+ [ITEM_TCP_SRC] = {
+ .name = "src",
+ .help = "TCP source port",
+ .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
+ hdr.src_port)),
+ },
+ [ITEM_TCP_DST] = {
+ .name = "dst",
+ .help = "TCP destination port",
+ .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
+ hdr.dst_port)),
+ },
+ [ITEM_SCTP] = {
+ .name = "sctp",
+ .help = "match SCTP header",
+ .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
+ .next = NEXT(item_sctp),
+ .call = parse_vc,
+ },
+ [ITEM_SCTP_SRC] = {
+ .name = "src",
+ .help = "SCTP source port",
+ .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
+ hdr.src_port)),
+ },
+ [ITEM_SCTP_DST] = {
+ .name = "dst",
+ .help = "SCTP destination port",
+ .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
+ hdr.dst_port)),
+ },
+ [ITEM_SCTP_TAG] = {
+ .name = "tag",
+ .help = "validation tag",
+ .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
+ hdr.tag)),
+ },
+ [ITEM_SCTP_CKSUM] = {
+ .name = "cksum",
+ .help = "checksum",
+ .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
+ hdr.cksum)),
+ },
+ [ITEM_VXLAN] = {
+ .name = "vxlan",
+ .help = "match VXLAN header",
+ .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
+ .next = NEXT(item_vxlan),
+ .call = parse_vc,
+ },
+ [ITEM_VXLAN_VNI] = {
+ .name = "vni",
+ .help = "VXLAN identifier",
+ .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
+ },
+ [ITEM_E_TAG] = {
+ .name = "e_tag",
+ .help = "match E-Tag header",
+ .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
+ .next = NEXT(item_e_tag),
+ .call = parse_vc,
+ },
+ [ITEM_E_TAG_GRP_ECID_B] = {
+ .name = "grp_ecid_b",
+ .help = "GRP and E-CID base",
+ .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
+ rsvd_grp_ecid_b,
+ "\x3f\xff")),
+ },
+ [ITEM_NVGRE] = {
+ .name = "nvgre",
+ .help = "match NVGRE header",
+ .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
+ .next = NEXT(item_nvgre),
+ .call = parse_vc,
+ },
+ [ITEM_NVGRE_TNI] = {
+ .name = "tni",
+ .help = "virtual subnet ID",
+ .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
+ },
+ [ITEM_MPLS] = {
+ .name = "mpls",
+ .help = "match MPLS header",
+ .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
+ .next = NEXT(item_mpls),
+ .call = parse_vc,
+ },
+ [ITEM_MPLS_LABEL] = {
+ .name = "label",
+ .help = "MPLS label",
+ .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
+ label_tc_s,
+ "\xff\xff\xf0")),
+ },
+ [ITEM_GRE] = {
+ .name = "gre",
+ .help = "match GRE header",
+ .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+ .next = NEXT(item_gre),
+ .call = parse_vc,
+ },
+ [ITEM_GRE_PROTO] = {
+ .name = "protocol",
+ .help = "GRE protocol type",
+ .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
+ protocol)),
+ },
+ /* Validate/create actions. */
+ [ACTIONS] = {
+ .name = "actions",
+ .help = "submit a list of associated actions",
+ .next = NEXT(next_action),
+ .call = parse_vc,
+ },
+ [ACTION_NEXT] = {
+ .name = "/",
+ .help = "specify next action",
+ .next = NEXT(next_action),
+ },
+ [ACTION_END] = {
+ .name = "end",
+ .help = "end list of actions",
+ .priv = PRIV_ACTION(END, 0),
+ .call = parse_vc,
+ },
+ [ACTION_VOID] = {
+ .name = "void",
+ .help = "no-op action",
+ .priv = PRIV_ACTION(VOID, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_PASSTHRU] = {
+ .name = "passthru",
+ .help = "let subsequent rule process matched packets",
+ .priv = PRIV_ACTION(PASSTHRU, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_MARK] = {
+ .name = "mark",
+ .help = "attach 32 bit value to packets",
+ .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
+ .next = NEXT(action_mark),
+ .call = parse_vc,
+ },
+ [ACTION_MARK_ID] = {
+ .name = "id",
+ .help = "32 bit value to return with packets",
+ .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_FLAG] = {
+ .name = "flag",
+ .help = "flag packets",
+ .priv = PRIV_ACTION(FLAG, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_QUEUE] = {
+ .name = "queue",
+ .help = "assign packets to a given queue index",
+ .priv = PRIV_ACTION(QUEUE,
+ sizeof(struct rte_flow_action_queue)),
+ .next = NEXT(action_queue),
+ .call = parse_vc,
+ },
+ [ACTION_QUEUE_INDEX] = {
+ .name = "index",
+ .help = "queue index to use",
+ .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_DROP] = {
+ .name = "drop",
+ .help = "drop packets (note: passthru has priority)",
+ .priv = PRIV_ACTION(DROP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_COUNT] = {
+ .name = "count",
+ .help = "enable counters for this rule",
+ .priv = PRIV_ACTION(COUNT, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_DUP] = {
+ .name = "dup",
+ .help = "duplicate packets to a given queue index",
+ .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
+ .next = NEXT(action_dup),
+ .call = parse_vc,
+ },
+ [ACTION_DUP_INDEX] = {
+ .name = "index",
+ .help = "queue index to duplicate packets to",
+ .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_RSS] = {
+ .name = "rss",
+ .help = "spread packets among several queues",
+ .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
+ .next = NEXT(action_rss),
+ .call = parse_vc,
+ },
+ [ACTION_RSS_QUEUES] = {
+ .name = "queues",
+ .help = "queue indices to use",
+ .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_RSS_QUEUE] = {
+ .name = "{queue}",
+ .help = "queue index",
+ .call = parse_vc_action_rss_queue,
+ .comp = comp_vc_action_rss_queue,
+ },
+ [ACTION_PF] = {
+ .name = "pf",
+ .help = "redirect packets to physical device function",
+ .priv = PRIV_ACTION(PF, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_VF] = {
+ .name = "vf",
+ .help = "redirect packets to virtual device function",
+ .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+ .next = NEXT(action_vf),
+ .call = parse_vc,
+ },
+ [ACTION_VF_ORIGINAL] = {
+ .name = "original",
+ .help = "use original VF ID if possible",
+ .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
+ original, 1)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_VF_ID] = {
+ .name = "id",
+ .help = "VF ID to redirect packets to",
+ .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
+ .call = parse_vc_conf,
+ },
+};
+
+/** Remove and return last entry from argument stack. */
+static const struct arg *
+pop_args(struct context *ctx)
+{
+ return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
+}
+
+/** Add entry on top of the argument stack. */
+static int
+push_args(struct context *ctx, const struct arg *arg)
+{
+ if (ctx->args_num == CTX_STACK_SIZE)
+ return -1;
+ ctx->args[ctx->args_num++] = arg;
+ return 0;
+}
+
+/** Spread value into buffer according to bit-mask. */
+static size_t
+arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
+{
+ uint32_t i = arg->size;
+ uint32_t end = 0;
+ int sub = 1;
+ int add = 0;
+ size_t len = 0;
+
+ if (!arg->mask)
+ return 0;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ if (!arg->hton) {
+ i = 0;
+ end = arg->size;
+ sub = 0;
+ add = 1;
+ }
+#endif
+ while (i != end) {
+ unsigned int shift = 0;
+ uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
+
+ for (shift = 0; arg->mask[i] >> shift; ++shift) {
+ if (!(arg->mask[i] & (1 << shift)))
+ continue;
+ ++len;
+ if (!dst)
+ continue;
+ *buf &= ~(1 << shift);
+ *buf |= (val & 1) << shift;
+ val >>= 1;
+ }
+ i += add;
+ }
+ return len;
+}
+
+/**
+ * Parse a prefix length and generate a bit-mask.
+ *
+ * Last argument (ctx->args) is retrieved to determine mask size, storage
+ * location and whether the result must use network byte ordering.
+ */
+static int
+parse_prefix(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
+ char *end;
+ uintmax_t u;
+ unsigned int bytes;
+ unsigned int extra;
+
+ (void)token;
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ errno = 0;
+ u = strtoumax(str, &end, 0);
+ if (errno || (size_t)(end - str) != len)
+ goto error;
+ if (arg->mask) {
+ uintmax_t v = 0;
+
+ extra = arg_entry_bf_fill(NULL, 0, arg);
+ if (u > extra)
+ goto error;
+ if (!ctx->object)
+ return len;
+ extra -= u;
+ while (u--)
+ (v <<= 1, v |= 1);
+ v <<= extra;
+ if (!arg_entry_bf_fill(ctx->object, v, arg) ||
+ !arg_entry_bf_fill(ctx->objmask, -1, arg))
+ goto error;
+ return len;
+ }
+ bytes = u / 8;
+ extra = u % 8;
+ size = arg->size;
+ if (bytes > size || bytes + !!extra > size)
+ goto error;
+ if (!ctx->object)
+ return len;
+ buf = (uint8_t *)ctx->object + arg->offset;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ if (!arg->hton) {
+ memset((uint8_t *)buf + size - bytes, 0xff, bytes);
+ memset(buf, 0x00, size - bytes);
+ if (extra)
+ ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
+ } else
+#endif
+ {
+ memset(buf, 0xff, bytes);
+ memset((uint8_t *)buf + bytes, 0x00, size - bytes);
+ if (extra)
+ ((uint8_t *)buf)[bytes] = conv[extra];
+ }
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
+ return len;
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/** Default parsing function for token name matching. */
+static int
+parse_default(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ (void)ctx;
+ (void)buf;
+ (void)size;
+ if (strncmp(str, token->name, len))
+ return -1;
+ return len;
+}
+
+/** Parse flow command, initialize output buffer for subsequent tokens. */
+static int
+parse_init(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ /* Make sure buffer is large enough. */
+ if (size < sizeof(*out))
+ return -1;
+ /* Initialize buffer. */
+ memset(out, 0x00, sizeof(*out));
+ memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ return len;
+}
+
+/** Parse tokens for validate/create commands. */
+static int
+parse_vc(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ uint8_t *data;
+ uint32_t data_size;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != VALIDATE && ctx->curr != CREATE)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.vc.data = (uint8_t *)out + size;
+ return len;
+ }
+ ctx->objdata = 0;
+ ctx->object = &out->args.vc.attr;
+ ctx->objmask = NULL;
+ switch (ctx->curr) {
+ case GROUP:
+ case PRIORITY:
+ return len;
+ case INGRESS:
+ out->args.vc.attr.ingress = 1;
+ return len;
+ case EGRESS:
+ out->args.vc.attr.egress = 1;
+ return len;
+ case PATTERN:
+ out->args.vc.pattern =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+ sizeof(double));
+ ctx->object = out->args.vc.pattern;
+ ctx->objmask = NULL;
+ return len;
+ case ACTIONS:
+ out->args.vc.actions =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)
+ (out->args.vc.pattern +
+ out->args.vc.pattern_n),
+ sizeof(double));
+ ctx->object = out->args.vc.actions;
+ ctx->objmask = NULL;
+ return len;
+ default:
+ if (!token->priv)
+ return -1;
+ break;
+ }
+ if (!out->args.vc.actions) {
+ const struct parse_item_priv *priv = token->priv;
+ struct rte_flow_item *item =
+ out->args.vc.pattern + out->args.vc.pattern_n;
+
+ data_size = priv->size * 3; /* spec, last, mask */
+ data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
+ (out->args.vc.data - data_size),
+ sizeof(double));
+ if ((uint8_t *)item + sizeof(*item) > data)
+ return -1;
+ *item = (struct rte_flow_item){
+ .type = priv->type,
+ };
+ ++out->args.vc.pattern_n;
+ ctx->object = item;
+ ctx->objmask = NULL;
+ } else {
+ const struct parse_action_priv *priv = token->priv;
+ struct rte_flow_action *action =
+ out->args.vc.actions + out->args.vc.actions_n;
+
+ data_size = priv->size; /* configuration */
+ data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
+ (out->args.vc.data - data_size),
+ sizeof(double));
+ if ((uint8_t *)action + sizeof(*action) > data)
+ return -1;
+ *action = (struct rte_flow_action){
+ .type = priv->type,
+ };
+ ++out->args.vc.actions_n;
+ ctx->object = action;
+ ctx->objmask = NULL;
+ }
+ memset(data, 0, data_size);
+ out->args.vc.data = data;
+ ctx->objdata = data_size;
+ return len;
+}
+
+/** Parse pattern item parameter type. */
+static int
+parse_vc_spec(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_item *item;
+ uint32_t data_size;
+ int index;
+ int objmask = 0;
+
+ (void)size;
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Parse parameter types. */
+ switch (ctx->curr) {
+ static const enum index prefix[] = NEXT_ENTRY(PREFIX);
+
+ case ITEM_PARAM_IS:
+ index = 0;
+ objmask = 1;
+ break;
+ case ITEM_PARAM_SPEC:
+ index = 0;
+ break;
+ case ITEM_PARAM_LAST:
+ index = 1;
+ break;
+ case ITEM_PARAM_PREFIX:
+ /* Modify next token to expect a prefix. */
+ if (ctx->next_num < 2)
+ return -1;
+ ctx->next[ctx->next_num - 2] = prefix;
+ /* Fall through. */
+ case ITEM_PARAM_MASK:
+ index = 2;
+ break;
+ default:
+ return -1;
+ }
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->args.vc.pattern_n)
+ return -1;
+ item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
+ data_size = ctx->objdata / 3; /* spec, last, mask */
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data + (data_size * index);
+ if (objmask) {
+ ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
+ item->mask = ctx->objmask;
+ } else
+ ctx->objmask = NULL;
+ /* Update relevant item pointer. */
+ *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
+ ctx->object;
+ return len;
+}
+
+/** Parse action configuration field. */
+static int
+parse_vc_conf(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+
+ (void)size;
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Update configuration pointer. */
+ action->conf = ctx->object;
+ return len;
+}
+
+/**
+ * Parse queue field for RSS action.
+ *
+ * Valid tokens are queue indices and the "end" token.
+ */
+static int
+parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
+ int ret;
+ int i;
+
+ (void)token;
+ (void)buf;
+ (void)size;
+ if (ctx->curr != ACTION_RSS_QUEUE)
+ return -1;
+ i = ctx->objdata >> 16;
+ if (!strncmp(str, "end", len)) {
+ ctx->objdata &= 0xffff;
+ return len;
+ }
+ if (i >= ACTION_RSS_NUM)
+ return -1;
+ if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
+ return -1;
+ ret = parse_int(ctx, token, str, len, NULL, 0);
+ if (ret < 0) {
+ pop_args(ctx);
+ return -1;
+ }
+ ++i;
+ ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
+ /* Repeat token. */
+ if (ctx->next_num == RTE_DIM(ctx->next))
+ return -1;
+ ctx->next[ctx->next_num++] = next;
+ if (!ctx->object)
+ return len;
+ ((struct rte_flow_action_rss *)ctx->object)->num = i;
+ return len;
+}
+
+/** Parse tokens for destroy command. */
+static int
+parse_destroy(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != DESTROY)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.destroy.rule =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+ sizeof(double));
+ return len;
+ }
+ if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
+ sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
+ return -1;
+ ctx->objdata = 0;
+ ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
+ ctx->objmask = NULL;
+ return len;
+}
+
+/** Parse tokens for flush command. */
+static int
+parse_flush(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != FLUSH)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ }
+ return len;
+}
+
+/** Parse tokens for query command. */
+static int
+parse_query(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != QUERY)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ }
+ return len;
+}
+
+/** Parse action names. */
+static int
+parse_action(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ const struct arg *arg = pop_args(ctx);
+ unsigned int i;
+
+ (void)size;
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ /* Parse action name. */
+ for (i = 0; next_action[i]; ++i) {
+ const struct parse_action_priv *priv;
+
+ token = &token_list[next_action[i]];
+ if (strncmp(token->name, str, len))
+ continue;
+ priv = token->priv;
+ if (!priv)
+ goto error;
+ if (out)
+ memcpy((uint8_t *)ctx->object + arg->offset,
+ &priv->type,
+ arg->size);
+ return len;
+ }
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/** Parse tokens for list command. */
+static int
+parse_list(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != LIST)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.list.group =
+ (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
+ sizeof(double));
+ return len;
+ }
+ if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
+ sizeof(*out->args.list.group)) > (uint8_t *)out + size)
+ return -1;
+ ctx->objdata = 0;
+ ctx->object = out->args.list.group + out->args.list.group_n++;
+ ctx->objmask = NULL;
+ return len;
+}
+
+/**
+ * Parse signed/unsigned integers 8 to 64-bit long.
+ *
+ * Last argument (ctx->args) is retrieved to determine integer type and
+ * storage location.
+ */
+static int
+parse_int(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ uintmax_t u;
+ char *end;
+
+ (void)token;
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ errno = 0;
+ u = arg->sign ?
+ (uintmax_t)strtoimax(str, &end, 0) :
+ strtoumax(str, &end, 0);
+ if (errno || (size_t)(end - str) != len)
+ goto error;
+ if (!ctx->object)
+ return len;
+ if (arg->mask) {
+ if (!arg_entry_bf_fill(ctx->object, u, arg) ||
+ !arg_entry_bf_fill(ctx->objmask, -1, arg))
+ goto error;
+ return len;
+ }
+ buf = (uint8_t *)ctx->object + arg->offset;
+ size = arg->size;
+objmask:
+ switch (size) {
+ case sizeof(uint8_t):
+ *(uint8_t *)buf = u;
+ break;
+ case sizeof(uint16_t):
+ *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
+ break;
+ case sizeof(uint8_t [3]):
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ if (!arg->hton) {
+ ((uint8_t *)buf)[0] = u;
+ ((uint8_t *)buf)[1] = u >> 8;
+ ((uint8_t *)buf)[2] = u >> 16;
+ break;
+ }
+#endif
+ ((uint8_t *)buf)[0] = u >> 16;
+ ((uint8_t *)buf)[1] = u >> 8;
+ ((uint8_t *)buf)[2] = u;
+ break;
+ case sizeof(uint32_t):
+ *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
+ break;
+ case sizeof(uint64_t):
+ *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
+ break;
+ default:
+ goto error;
+ }
+ if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
+ u = -1;
+ buf = (uint8_t *)ctx->objmask + arg->offset;
+ goto objmask;
+ }
+ return len;
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/**
+ * Parse a string.
+ *
+ * Two arguments (ctx->args) are retrieved from the stack to store data and
+ * its length (in that order).
+ */
+static int
+parse_string(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg_data = pop_args(ctx);
+ const struct arg *arg_len = pop_args(ctx);
+ char tmp[16]; /* Ought to be enough. */
+ int ret;
+
+ /* Arguments are expected. */
+ if (!arg_data)
+ return -1;
+ if (!arg_len) {
+ push_args(ctx, arg_data);
+ return -1;
+ }
+ size = arg_data->size;
+ /* Bit-mask fill is not supported. */
+ if (arg_data->mask || size < len)
+ goto error;
+ if (!ctx->object)
+ return len;
+ /* Let parse_int() fill length information first. */
+ ret = snprintf(tmp, sizeof(tmp), "%u", len);
+ if (ret < 0)
+ goto error;
+ push_args(ctx, arg_len);
+ ret = parse_int(ctx, token, tmp, ret, NULL, 0);
+ if (ret < 0) {
+ pop_args(ctx);
+ goto error;
+ }
+ buf = (uint8_t *)ctx->object + arg_data->offset;
+ /* Output buffer is not necessarily NUL-terminated. */
+ memcpy(buf, str, len);
+ memset((uint8_t *)buf + len, 0x55, size - len);
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
+ return len;
+error:
+ push_args(ctx, arg_len);
+ push_args(ctx, arg_data);
+ return -1;
+}
+
+/**
+ * Parse a MAC address.
+ *
+ * Last argument (ctx->args) is retrieved to determine storage size and
+ * location.
+ */
+static int
+parse_mac_addr(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ struct ether_addr tmp;
+ int ret;
+
+ (void)token;
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ size = arg->size;
+ /* Bit-mask fill is not supported. */
+ if (arg->mask || size != sizeof(tmp))
+ goto error;
+ /* Only network endian is supported. */
+ if (!arg->hton)
+ goto error;
+ ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
+ if (ret < 0 || (unsigned int)ret != len)
+ goto error;
+ if (!ctx->object)
+ return len;
+ buf = (uint8_t *)ctx->object + arg->offset;
+ memcpy(buf, &tmp, size);
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
+ return len;
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/**
+ * Parse an IPv4 address.
+ *
+ * Last argument (ctx->args) is retrieved to determine storage size and
+ * location.
+ */
+static int
+parse_ipv4_addr(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ char str2[len + 1];
+ struct in_addr tmp;
+ int ret;
+
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ size = arg->size;
+ /* Bit-mask fill is not supported. */
+ if (arg->mask || size != sizeof(tmp))
+ goto error;
+ /* Only network endian is supported. */
+ if (!arg->hton)
+ goto error;
+ memcpy(str2, str, len);
+ str2[len] = '\0';
+ ret = inet_pton(AF_INET, str2, &tmp);
+ if (ret != 1) {
+ /* Attempt integer parsing. */
+ push_args(ctx, arg);
+ return parse_int(ctx, token, str, len, buf, size);
+ }
+ if (!ctx->object)
+ return len;
+ buf = (uint8_t *)ctx->object + arg->offset;
+ memcpy(buf, &tmp, size);
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
+ return len;
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/**
+ * Parse an IPv6 address.
+ *
+ * Last argument (ctx->args) is retrieved to determine storage size and
+ * location.
+ */
+static int
+parse_ipv6_addr(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ char str2[len + 1];
+ struct in6_addr tmp;
+ int ret;
+
+ (void)token;
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ size = arg->size;
+ /* Bit-mask fill is not supported. */
+ if (arg->mask || size != sizeof(tmp))
+ goto error;
+ /* Only network endian is supported. */
+ if (!arg->hton)
+ goto error;
+ memcpy(str2, str, len);
+ str2[len] = '\0';
+ ret = inet_pton(AF_INET6, str2, &tmp);
+ if (ret != 1)
+ goto error;
+ if (!ctx->object)
+ return len;
+ buf = (uint8_t *)ctx->object + arg->offset;
+ memcpy(buf, &tmp, size);
+ if (ctx->objmask)
+ memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
+ return len;
+error:
+ push_args(ctx, arg);
+ return -1;
+}
+
+/** Boolean values (even indices stand for false). */
+static const char *const boolean_name[] = {
+ "0", "1",
+ "false", "true",
+ "no", "yes",
+ "N", "Y",
+ NULL,
+};
+
+/**
+ * Parse a boolean value.
+ *
+ * Last argument (ctx->args) is retrieved to determine storage size and
+ * location.
+ */
+static int
+parse_boolean(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ const struct arg *arg = pop_args(ctx);
+ unsigned int i;
+ int ret;
+
+ /* Argument is expected. */
+ if (!arg)
+ return -1;
+ for (i = 0; boolean_name[i]; ++i)
+ if (!strncmp(str, boolean_name[i], len))
+ break;
+ /* Process token as integer. */
+ if (boolean_name[i])
+ str = i & 1 ? "1" : "0";
+ push_args(ctx, arg);
+ ret = parse_int(ctx, token, str, strlen(str), buf, size);
+ return ret > 0 ? (int)len : ret;
+}
+
+/** Parse port and update context. */
+static int
+parse_port(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = &(struct buffer){ .port = 0 };
+ int ret;
+
+ if (buf)
+ out = buf;
+ else {
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ size = sizeof(*out);
+ }
+ ret = parse_int(ctx, token, str, len, out, size);
+ if (ret >= 0)
+ ctx->port = out->port;
+ if (!buf)
+ ctx->object = NULL;
+ return ret;
+}
+
+/** No completion. */
+static int
+comp_none(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ (void)ctx;
+ (void)token;
+ (void)ent;
+ (void)buf;
+ (void)size;
+ return 0;
+}
+
+/** Complete boolean values. */
+static int
+comp_boolean(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i;
+
+ (void)ctx;
+ (void)token;
+ for (i = 0; boolean_name[i]; ++i)
+ if (buf && i == ent)
+ return snprintf(buf, size, "%s", boolean_name[i]);
+ if (buf)
+ return -1;
+ return i;
+}
+
+/** Complete action names. */
+static int
+comp_action(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i;
+
+ (void)ctx;
+ (void)token;
+ for (i = 0; next_action[i]; ++i)
+ if (buf && i == ent)
+ return snprintf(buf, size, "%s",
+ token_list[next_action[i]].name);
+ if (buf)
+ return -1;
+ return i;
+}
+
+/** Complete available ports. */
+static int
+comp_port(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i = 0;
+ portid_t p;
+
+ (void)ctx;
+ (void)token;
+ RTE_ETH_FOREACH_DEV(p) {
+ if (buf && i == ent)
+ return snprintf(buf, size, "%u", p);
+ ++i;
+ }
+ if (buf)
+ return -1;
+ return i;
+}
+
+/** Complete available rule IDs. */
+static int
+comp_rule_id(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i = 0;
+ struct rte_port *port;
+ struct port_flow *pf;
+
+ (void)token;
+ if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
+ ctx->port == (uint16_t)RTE_PORT_ALL)
+ return -1;
+ port = &ports[ctx->port];
+ for (pf = port->flow_list; pf != NULL; pf = pf->next) {
+ if (buf && i == ent)
+ return snprintf(buf, size, "%u", pf->id);
+ ++i;
+ }
+ if (buf)
+ return -1;
+ return i;
+}
+
+/** Complete queue field for RSS action. */
+static int
+comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ static const char *const str[] = { "", "end", NULL };
+ unsigned int i;
+
+ (void)ctx;
+ (void)token;
+ for (i = 0; str[i] != NULL; ++i)
+ if (buf && i == ent)
+ return snprintf(buf, size, "%s", str[i]);
+ if (buf)
+ return -1;
+ return i;
+}
+
+/** Internal context. */
+static struct context cmd_flow_context;
+
+/** Global parser instance (cmdline API). */
+cmdline_parse_inst_t cmd_flow;
+
+/** Initialize context. */
+static void
+cmd_flow_context_init(struct context *ctx)
+{
+ /* A full memset() is not necessary. */
+ ctx->curr = ZERO;
+ ctx->prev = ZERO;
+ ctx->next_num = 0;
+ ctx->args_num = 0;
+ ctx->reparse = 0;
+ ctx->eol = 0;
+ ctx->last = 0;
+ ctx->port = 0;
+ ctx->objdata = 0;
+ ctx->object = NULL;
+ ctx->objmask = NULL;
+}
+
+/** Parse a token (cmdline API). */
+static int
+cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
+ unsigned int size)
+{
+ struct context *ctx = &cmd_flow_context;
+ const struct token *token;
+ const enum index *list;
+ int len;
+ int i;
+
+ (void)hdr;
+ /* Restart as requested. */
+ if (ctx->reparse)
+ cmd_flow_context_init(ctx);
+ token = &token_list[ctx->curr];
+ /* Check argument length. */
+ ctx->eol = 0;
+ ctx->last = 1;
+ for (len = 0; src[len]; ++len)
+ if (src[len] == '#' || isspace(src[len]))
+ break;
+ if (!len)
+ return -1;
+ /* Last argument and EOL detection. */
+ for (i = len; src[i]; ++i)
+ if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
+ break;
+ else if (!isspace(src[i])) {
+ ctx->last = 0;
+ break;
+ }
+ for (; src[i]; ++i)
+ if (src[i] == '\r' || src[i] == '\n') {
+ ctx->eol = 1;
+ break;
+ }
+ /* Initialize context if necessary. */
+ if (!ctx->next_num) {
+ if (!token->next)
+ return 0;
+ ctx->next[ctx->next_num++] = token->next[0];
+ }
+ /* Process argument through candidates. */
+ ctx->prev = ctx->curr;
+ list = ctx->next[ctx->next_num - 1];
+ for (i = 0; list[i]; ++i) {
+ const struct token *next = &token_list[list[i]];
+ int tmp;
+
+ ctx->curr = list[i];
+ if (next->call)
+ tmp = next->call(ctx, next, src, len, result, size);
+ else
+ tmp = parse_default(ctx, next, src, len, result, size);
+ if (tmp == -1 || tmp != len)
+ continue;
+ token = next;
+ break;
+ }
+ if (!list[i])
+ return -1;
+ --ctx->next_num;
+ /* Push subsequent tokens if any. */
+ if (token->next)
+ for (i = 0; token->next[i]; ++i) {
+ if (ctx->next_num == RTE_DIM(ctx->next))
+ return -1;
+ ctx->next[ctx->next_num++] = token->next[i];
+ }
+ /* Push arguments if any. */
+ if (token->args)
+ for (i = 0; token->args[i]; ++i) {
+ if (ctx->args_num == RTE_DIM(ctx->args))
+ return -1;
+ ctx->args[ctx->args_num++] = token->args[i];
+ }
+ return len;
+}
+
+/** Return number of completion entries (cmdline API). */
+static int
+cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
+{
+ struct context *ctx = &cmd_flow_context;
+ const struct token *token = &token_list[ctx->curr];
+ const enum index *list;
+ int i;
+
+ (void)hdr;
+ /* Tell cmd_flow_parse() that context must be reinitialized. */
+ ctx->reparse = 1;
+ /* Count number of tokens in current list. */
+ if (ctx->next_num)
+ list = ctx->next[ctx->next_num - 1];
+ else
+ list = token->next[0];
+ for (i = 0; list[i]; ++i)
+ ;
+ if (!i)
+ return 0;
+ /*
+ * If there is a single token, use its completion callback, otherwise
+ * return the number of entries.
+ */
+ token = &token_list[list[0]];
+ if (i == 1 && token->comp) {
+ /* Save index for cmd_flow_get_help(). */
+ ctx->prev = list[0];
+ return token->comp(ctx, token, 0, NULL, 0);
+ }
+ return i;
+}
+
+/** Return a completion entry (cmdline API). */
+static int
+cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
+ char *dst, unsigned int size)
+{
+ struct context *ctx = &cmd_flow_context;
+ const struct token *token = &token_list[ctx->curr];
+ const enum index *list;
+ int i;
+
+ (void)hdr;
+ /* Tell cmd_flow_parse() that context must be reinitialized. */
+ ctx->reparse = 1;
+ /* Count number of tokens in current list. */
+ if (ctx->next_num)
+ list = ctx->next[ctx->next_num - 1];
+ else
+ list = token->next[0];
+ for (i = 0; list[i]; ++i)
+ ;
+ if (!i)
+ return -1;
+ /* If there is a single token, use its completion callback. */
+ token = &token_list[list[0]];
+ if (i == 1 && token->comp) {
+ /* Save index for cmd_flow_get_help(). */
+ ctx->prev = list[0];
+ return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
+ }
+ /* Otherwise make sure the index is valid and use defaults. */
+ if (index >= i)
+ return -1;
+ token = &token_list[list[index]];
+ snprintf(dst, size, "%s", token->name);
+ /* Save index for cmd_flow_get_help(). */
+ ctx->prev = list[index];
+ return 0;
+}
+
+/** Populate help strings for current token (cmdline API). */
+static int
+cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
+{
+ struct context *ctx = &cmd_flow_context;
+ const struct token *token = &token_list[ctx->prev];
+
+ (void)hdr;
+ /* Tell cmd_flow_parse() that context must be reinitialized. */
+ ctx->reparse = 1;
+ if (!size)
+ return -1;
+ /* Set token type and update global help with details. */
+ snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
+ if (token->help)
+ cmd_flow.help_str = token->help;
+ else
+ cmd_flow.help_str = token->name;
+ return 0;
+}
+
+/** Token definition template (cmdline API). */
+static struct cmdline_token_hdr cmd_flow_token_hdr = {
+ .ops = &(struct cmdline_token_ops){
+ .parse = cmd_flow_parse,
+ .complete_get_nb = cmd_flow_complete_get_nb,
+ .complete_get_elt = cmd_flow_complete_get_elt,
+ .get_help = cmd_flow_get_help,
+ },
+ .offset = 0,
+};
+
+/** Populate the next dynamic token. */
+static void
+cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
+ cmdline_parse_token_hdr_t *(*hdrs)[])
+{
+ struct context *ctx = &cmd_flow_context;
+
+ /* Always reinitialize context before requesting the first token. */
+ if (!(hdr - *hdrs))
+ cmd_flow_context_init(ctx);
+ /* Return NULL when no more tokens are expected. */
+ if (!ctx->next_num && ctx->curr) {
+ *hdr = NULL;
+ return;
+ }
+ /* Determine if command should end here. */
+ if (ctx->eol && ctx->last && ctx->next_num) {
+ const enum index *list = ctx->next[ctx->next_num - 1];
+ int i;
+
+ for (i = 0; list[i]; ++i) {
+ if (list[i] != END)
+ continue;
+ *hdr = NULL;
+ return;
+ }
+ }
+ *hdr = &cmd_flow_token_hdr;
+}
+
+/** Dispatch parsed buffer to function calls. */
+static void
+cmd_flow_parsed(const struct buffer *in)
+{
+ switch (in->command) {
+ case VALIDATE:
+ port_flow_validate(in->port, &in->args.vc.attr,
+ in->args.vc.pattern, in->args.vc.actions);
+ break;
+ case CREATE:
+ port_flow_create(in->port, &in->args.vc.attr,
+ in->args.vc.pattern, in->args.vc.actions);
+ break;
+ case DESTROY:
+ port_flow_destroy(in->port, in->args.destroy.rule_n,
+ in->args.destroy.rule);
+ break;
+ case FLUSH:
+ port_flow_flush(in->port);
+ break;
+ case QUERY:
+ port_flow_query(in->port, in->args.query.rule,
+ in->args.query.action);
+ break;
+ case LIST:
+ port_flow_list(in->port, in->args.list.group_n,
+ in->args.list.group);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Token generator and output processing callback (cmdline API). */
+static void
+cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
+{
+ if (cl == NULL)
+ cmd_flow_tok(arg0, arg2);
+ else
+ cmd_flow_parsed(arg0);
+}
+
+/** Global parser instance (cmdline API). */
+cmdline_parse_inst_t cmd_flow = {
+ .f = cmd_flow_cb,
+ .data = NULL, /**< Unused. */
+ .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
+ .tokens = {
+ NULL,
+ }, /**< Tokens are returned by cmd_flow_tok(). */
+};
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 36c47ab5..4d873cdd 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -92,6 +92,11 @@
#include <rte_ethdev.h>
#include <rte_string_fns.h>
#include <rte_cycles.h>
+#include <rte_flow.h>
+#include <rte_errno.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
#include "testpmd.h"
@@ -169,7 +174,7 @@ nic_stats_display(portid_t port_id)
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return;
@@ -247,7 +252,7 @@ nic_stats_clear(portid_t port_id)
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return;
@@ -329,7 +334,7 @@ nic_stats_mapping_display(portid_t port_id)
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return;
@@ -444,20 +449,24 @@ port_infos_display(portid_t port_id)
struct rte_mempool * mp;
static const char *info_border = "*********************";
portid_t pid;
+ uint16_t mtu;
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return;
}
port = &ports[port_id];
rte_eth_link_get_nowait(port_id, &link);
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(port_id, &dev_info);
printf("\n%s Infos for port %-2d %s\n",
info_border, port_id, info_border);
rte_eth_macaddr_get(port_id, &mac_addr);
print_ethaddr("MAC address: ", &mac_addr);
+ printf("\nDriver name: %s", dev_info.driver_name);
printf("\nConnect to socket: %u", port->socket_id);
if (port_numa[port_id] != NUMA_NO_CONFIG) {
@@ -472,6 +481,10 @@ port_infos_display(portid_t port_id)
printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex"));
+
+ if (!rte_eth_dev_get_mtu(port_id, &mtu))
+ printf("MTU: %u\n", mtu);
+
printf("Promiscuous mode: %s\n",
rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
printf("Allmulticast mode: %s\n",
@@ -500,8 +513,6 @@ port_infos_display(portid_t port_id)
printf(" qinq(extend) off \n");
}
- memset(&dev_info, 0, sizeof(dev_info));
- rte_eth_dev_info_get(port_id, &dev_info);
if (dev_info.hash_key_size > 0)
printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
if (dev_info.reta_size > 0)
@@ -537,13 +548,189 @@ port_infos_display(portid_t port_id)
printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
}
+void
+port_offload_cap_display(portid_t port_id)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ static const char *info_border = "************";
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ printf("\n%s Port %d supported offload features: %s\n",
+ info_border, port_id, info_border);
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ printf("VLAN stripped: ");
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
+ printf("Double VLANs stripped: ");
+ if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ printf("RX IPv4 checksum: ");
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
+ printf("RX UDP checksum: ");
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
+ printf("RX TCP checksum: ");
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
+ printf("RX Outer IPv4 checksum: on");
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
+ printf("Large receive offload: ");
+ if (dev->data->dev_conf.rxmode.enable_lro)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
+ printf("VLAN insert: ");
+ if (ports[port_id].tx_ol_flags &
+ TESTPMD_TX_OFFLOAD_INSERT_VLAN)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
+ printf("Double VLANs insert: ");
+ if (ports[port_id].tx_ol_flags &
+ TESTPMD_TX_OFFLOAD_INSERT_QINQ)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ printf("TX IPv4 checksum: ");
+ if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ printf("TX UDP checksum: ");
+ if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ printf("TX TCP checksum: ");
+ if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
+ printf("TX SCTP checksum: ");
+ if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
+ printf("TX Outer IPv4 checksum: ");
+ if (ports[port_id].tx_ol_flags &
+ TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
+ printf("TX TCP segmentation: ");
+ if (ports[port_id].tso_segsz != 0)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
+ printf("TX UDP segmentation: ");
+ if (ports[port_id].tso_segsz != 0)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
+ printf("TSO for VXLAN tunnel packet: ");
+ if (ports[port_id].tunnel_tso_segsz)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
+ printf("TSO for GRE tunnel packet: ");
+ if (ports[port_id].tunnel_tso_segsz)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
+ printf("TSO for IPIP tunnel packet: ");
+ if (ports[port_id].tunnel_tso_segsz)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
+ printf("TSO for GENEVE tunnel packet: ");
+ if (ports[port_id].tunnel_tso_segsz)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+}
+
int
port_id_is_invalid(portid_t port_id, enum print_warning warning)
{
if (port_id == (portid_t)RTE_PORT_ALL)
return 0;
- if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled)
+ if (rte_eth_dev_is_valid_port(port_id))
return 0;
if (warning == ENABLED_WARN)
@@ -750,6 +937,504 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
printf("Set MTU failed. diag=%d\n", diag);
}
+/* Generic flow management functions. */
+
+/** Generate flow_item[] entry. */
+#define MK_FLOW_ITEM(t, s) \
+ [RTE_FLOW_ITEM_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow pattern items. */
+static const struct {
+ const char *name;
+ size_t size;
+} flow_item[] = {
+ MK_FLOW_ITEM(END, 0),
+ MK_FLOW_ITEM(VOID, 0),
+ MK_FLOW_ITEM(INVERT, 0),
+ MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
+ MK_FLOW_ITEM(PF, 0),
+ MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
+ MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
+ MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
+ MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
+ MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
+ MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
+ MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
+ MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
+ MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
+ MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
+ MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
+ MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
+};
+
+/** Compute storage space needed by item specification. */
+static void
+flow_item_spec_size(const struct rte_flow_item *item,
+ size_t *size, size_t *pad)
+{
+ if (!item->spec)
+ goto empty;
+ switch (item->type) {
+ union {
+ const struct rte_flow_item_raw *raw;
+ } spec;
+
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ spec.raw = item->spec;
+ *size = offsetof(struct rte_flow_item_raw, pattern) +
+ spec.raw->length * sizeof(*spec.raw->pattern);
+ break;
+ default:
+empty:
+ *size = 0;
+ break;
+ }
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Generate flow_action[] entry. */
+#define MK_FLOW_ACTION(t, s) \
+ [RTE_FLOW_ACTION_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = s, \
+ }
+
+/** Information about known flow actions. */
+static const struct {
+ const char *name;
+ size_t size;
+} flow_action[] = {
+ MK_FLOW_ACTION(END, 0),
+ MK_FLOW_ACTION(VOID, 0),
+ MK_FLOW_ACTION(PASSTHRU, 0),
+ MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
+ MK_FLOW_ACTION(FLAG, 0),
+ MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
+ MK_FLOW_ACTION(DROP, 0),
+ MK_FLOW_ACTION(COUNT, 0),
+ MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(PF, 0),
+ MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+};
+
+/** Compute storage space needed by action configuration. */
+static void
+flow_action_conf_size(const struct rte_flow_action *action,
+ size_t *size, size_t *pad)
+{
+ if (!action->conf)
+ goto empty;
+ switch (action->type) {
+ union {
+ const struct rte_flow_action_rss *rss;
+ } conf;
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ conf.rss = action->conf;
+ *size = offsetof(struct rte_flow_action_rss, queue) +
+ conf.rss->num * sizeof(*conf.rss->queue);
+ break;
+ default:
+empty:
+ *size = 0;
+ break;
+ }
+ *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+}
+
+/** Generate a port_flow entry from attributes/pattern/actions. */
+static struct port_flow *
+port_flow_new(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ struct port_flow *pf = NULL;
+ size_t tmp;
+ size_t pad;
+ size_t off1 = 0;
+ size_t off2 = 0;
+ int err = ENOTSUP;
+
+store:
+ item = pattern;
+ if (pf)
+ pf->pattern = (void *)&pf->data[off1];
+ do {
+ struct rte_flow_item *dst = NULL;
+
+ if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
+ !flow_item[item->type].name)
+ goto notsup;
+ if (pf)
+ dst = memcpy(pf->data + off1, item, sizeof(*item));
+ off1 += sizeof(*item);
+ flow_item_spec_size(item, &tmp, &pad);
+ if (item->spec) {
+ if (pf)
+ dst->spec = memcpy(pf->data + off2,
+ item->spec, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->last) {
+ if (pf)
+ dst->last = memcpy(pf->data + off2,
+ item->last, tmp);
+ off2 += tmp + pad;
+ }
+ if (item->mask) {
+ if (pf)
+ dst->mask = memcpy(pf->data + off2,
+ item->mask, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ action = actions;
+ if (pf)
+ pf->actions = (void *)&pf->data[off1];
+ do {
+ struct rte_flow_action *dst = NULL;
+
+ if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
+ !flow_action[action->type].name)
+ goto notsup;
+ if (pf)
+ dst = memcpy(pf->data + off1, action, sizeof(*action));
+ off1 += sizeof(*action);
+ flow_action_conf_size(action, &tmp, &pad);
+ if (action->conf) {
+ if (pf)
+ dst->conf = memcpy(pf->data + off2,
+ action->conf, tmp);
+ off2 += tmp + pad;
+ }
+ off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
+ } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
+ if (pf != NULL)
+ return pf;
+ off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
+ tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
+ pf = calloc(1, tmp + off1 + off2);
+ if (pf == NULL)
+ err = errno;
+ else {
+ *pf = (const struct port_flow){
+ .size = tmp + off1 + off2,
+ .attr = *attr,
+ };
+ tmp -= offsetof(struct port_flow, data);
+ off2 = tmp + off1;
+ off1 = tmp;
+ goto store;
+ }
+notsup:
+ rte_errno = err;
+ return NULL;
+}
+
+/** Print a message out of a flow error. */
+static int
+port_flow_complain(struct rte_flow_error *error)
+{
+ static const char *const errstrlist[] = {
+ [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
+ [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
+ [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
+ [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
+ [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
+ [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
+ };
+ const char *errstr;
+ char buf[32];
+ int err = rte_errno;
+
+ if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
+ !errstrlist[error->type])
+ errstr = "unknown type";
+ else
+ errstr = errstrlist[error->type];
+ printf("Caught error type %d (%s): %s%s\n",
+ error->type, errstr,
+ error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
+ error->cause), buf) : "",
+ error->message ? error->message : "(no stated reason)");
+ return -err;
+}
+
+/** Validate flow rule. */
+int
+port_flow_validate(portid_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow_error error;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x11, sizeof(error));
+ if (rte_flow_validate(port_id, attr, pattern, actions, &error))
+ return port_flow_complain(&error);
+ printf("Flow rule validated\n");
+ return 0;
+}
+
+/** Create flow rule. */
+int
+port_flow_create(portid_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ struct rte_port *port;
+ struct port_flow *pf;
+ uint32_t id;
+ struct rte_flow_error error;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x22, sizeof(error));
+ flow = rte_flow_create(port_id, attr, pattern, actions, &error);
+ if (!flow)
+ return port_flow_complain(&error);
+ port = &ports[port_id];
+ if (port->flow_list) {
+ if (port->flow_list->id == UINT32_MAX) {
+ printf("Highest rule ID is already assigned, delete"
+ " it first");
+ rte_flow_destroy(port_id, flow, NULL);
+ return -ENOMEM;
+ }
+ id = port->flow_list->id + 1;
+ } else
+ id = 0;
+ pf = port_flow_new(attr, pattern, actions);
+ if (!pf) {
+ int err = rte_errno;
+
+ printf("Cannot allocate flow: %s\n", rte_strerror(err));
+ rte_flow_destroy(port_id, flow, NULL);
+ return -err;
+ }
+ pf->next = port->flow_list;
+ pf->id = id;
+ pf->flow = flow;
+ port->flow_list = pf;
+ printf("Flow rule #%u created\n", pf->id);
+ return 0;
+}
+
+/** Destroy a number of flow rules. */
+int
+port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
+{
+ struct rte_port *port;
+ struct port_flow **tmp;
+ uint32_t c = 0;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ tmp = &port->flow_list;
+ while (*tmp) {
+ uint32_t i;
+
+ for (i = 0; i != n; ++i) {
+ struct rte_flow_error error;
+ struct port_flow *pf = *tmp;
+
+ if (rule[i] != pf->id)
+ continue;
+ /*
+ * Poisoning to make sure PMDs update it in case
+ * of error.
+ */
+ memset(&error, 0x33, sizeof(error));
+ if (rte_flow_destroy(port_id, pf->flow, &error)) {
+ ret = port_flow_complain(&error);
+ continue;
+ }
+ printf("Flow rule #%u destroyed\n", pf->id);
+ *tmp = pf->next;
+ free(pf);
+ break;
+ }
+ if (i == n)
+ tmp = &(*tmp)->next;
+ ++c;
+ }
+ return ret;
+}
+
+/** Remove all flow rules. */
+int
+port_flow_flush(portid_t port_id)
+{
+ struct rte_flow_error error;
+ struct rte_port *port;
+ int ret = 0;
+
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x44, sizeof(error));
+ if (rte_flow_flush(port_id, &error)) {
+ ret = port_flow_complain(&error);
+ if (port_id_is_invalid(port_id, DISABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return ret;
+ }
+ port = &ports[port_id];
+ while (port->flow_list) {
+ struct port_flow *pf = port->flow_list->next;
+
+ free(port->flow_list);
+ port->flow_list = pf;
+ }
+ return ret;
+}
+
+/** Query a flow rule. */
+int
+port_flow_query(portid_t port_id, uint32_t rule,
+ enum rte_flow_action_type action)
+{
+ struct rte_flow_error error;
+ struct rte_port *port;
+ struct port_flow *pf;
+ const char *name;
+ union {
+ struct rte_flow_query_count count;
+ } query;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+ for (pf = port->flow_list; pf; pf = pf->next)
+ if (pf->id == rule)
+ break;
+ if (!pf) {
+ printf("Flow rule #%u not found\n", rule);
+ return -ENOENT;
+ }
+ if ((unsigned int)action >= RTE_DIM(flow_action) ||
+ !flow_action[action].name)
+ name = "unknown";
+ else
+ name = flow_action[action].name;
+ switch (action) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ break;
+ default:
+ printf("Cannot query action type %d (%s)\n", action, name);
+ return -ENOTSUP;
+ }
+ /* Poisoning to make sure PMDs update it in case of error. */
+ memset(&error, 0x55, sizeof(error));
+ memset(&query, 0, sizeof(query));
+ if (rte_flow_query(port_id, pf->flow, action, &query, &error))
+ return port_flow_complain(&error);
+ switch (action) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ printf("%s:\n"
+ " hits_set: %u\n"
+ " bytes_set: %u\n"
+ " hits: %" PRIu64 "\n"
+ " bytes: %" PRIu64 "\n",
+ name,
+ query.count.hits_set,
+ query.count.bytes_set,
+ query.count.hits,
+ query.count.bytes);
+ break;
+ default:
+ printf("Cannot display result for action type %d (%s)\n",
+ action, name);
+ break;
+ }
+ return 0;
+}
+
+/** List flow rules. */
+void
+port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
+{
+ struct rte_port *port;
+ struct port_flow *pf;
+ struct port_flow *list = NULL;
+ uint32_t i;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return;
+ port = &ports[port_id];
+ if (!port->flow_list)
+ return;
+ /* Sort flows by group, priority and ID. */
+ for (pf = port->flow_list; pf != NULL; pf = pf->next) {
+ struct port_flow **tmp;
+
+ if (n) {
+ /* Filter out unwanted groups. */
+ for (i = 0; i != n; ++i)
+ if (pf->attr.group == group[i])
+ break;
+ if (i == n)
+ continue;
+ }
+ tmp = &list;
+ while (*tmp &&
+ (pf->attr.group > (*tmp)->attr.group ||
+ (pf->attr.group == (*tmp)->attr.group &&
+ pf->attr.priority > (*tmp)->attr.priority) ||
+ (pf->attr.group == (*tmp)->attr.group &&
+ pf->attr.priority == (*tmp)->attr.priority &&
+ pf->id > (*tmp)->id)))
+ tmp = &(*tmp)->tmp;
+ pf->tmp = *tmp;
+ *tmp = pf;
+ }
+ printf("ID\tGroup\tPrio\tAttr\tRule\n");
+ for (pf = list; pf != NULL; pf = pf->tmp) {
+ const struct rte_flow_item *item = pf->pattern;
+ const struct rte_flow_action *action = pf->actions;
+
+ printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
+ pf->id,
+ pf->attr.group,
+ pf->attr.priority,
+ pf->attr.ingress ? 'i' : '-',
+ pf->attr.egress ? 'e' : '-');
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ printf("%s ", flow_item[item->type].name);
+ ++item;
+ }
+ printf("=>");
+ while (action->type != RTE_FLOW_ACTION_TYPE_END) {
+ if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
+ printf(" %s", flow_action[action->type].name);
+ ++action;
+ }
+ printf("\n");
+ }
+}
+
/*
* RX/TX ring descriptors display functions.
*/
@@ -1601,7 +2286,7 @@ set_fwd_ports_mask(uint64_t portmask)
return;
}
nb_pt = 0;
- for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
if (! ((uint64_t)(1ULL << i) & portmask))
continue;
portlist[nb_pt++] = i;
@@ -2323,43 +3008,28 @@ fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
}
+#ifdef RTE_LIBRTE_IXGBE_PMD
void
set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
{
int diag;
- if (port_id_is_invalid(port_id, ENABLED_WARN))
- return;
if (is_rx)
- diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
+ diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
else
- diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
+ diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
+
if (diag == 0)
return;
if(is_rx)
- printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
+ printf("rte_pmd_ixgbe_set_vf_rx for port_id=%d failed "
"diag=%d\n", port_id, diag);
else
- printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
+ printf("rte_pmd_ixgbe_set_vf_tx for port_id=%d failed "
"diag=%d\n", port_id, diag);
}
-
-void
-set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
-{
- int diag;
-
- if (port_id_is_invalid(port_id, ENABLED_WARN))
- return;
- if (vlan_id_is_invalid(vlan_id))
- return;
- diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
- if (diag == 0)
- return;
- printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
- "diag=%d\n", port_id, diag);
-}
+#endif
int
set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
@@ -2383,30 +3053,20 @@ set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
return diag;
}
+#ifdef RTE_LIBRTE_IXGBE_PMD
int
set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
{
int diag;
- struct rte_eth_link link;
-
- if (q_msk == 0)
- return 0;
- if (port_id_is_invalid(port_id, ENABLED_WARN))
- return 1;
- rte_eth_link_get_nowait(port_id, &link);
- if (rate > link.link_speed) {
- printf("Invalid rate value:%u bigger than link speed: %u\n",
- rate, link.link_speed);
- return 1;
- }
- diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
+ diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, q_msk);
if (diag == 0)
return diag;
- printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
+ printf("rte_pmd_ixgbe_set_vf_rate_limit for port_id=%d failed diag=%d\n",
port_id, diag);
return diag;
}
+#endif
/*
* Functions to manage the set of filtered Multicast MAC addresses.
@@ -2592,3 +3252,70 @@ port_dcb_info_display(uint8_t port_id)
printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
printf("\n");
}
+
+uint8_t *
+open_ddp_package_file(const char *file_path, uint32_t *size)
+{
+ FILE *fh = fopen(file_path, "rb");
+ uint32_t pkg_size;
+ uint8_t *buf = NULL;
+ int ret = 0;
+
+ if (size)
+ *size = 0;
+
+ if (fh == NULL) {
+ printf("%s: Failed to open %s\n", __func__, file_path);
+ return buf;
+ }
+
+ ret = fseek(fh, 0, SEEK_END);
+ if (ret < 0) {
+ fclose(fh);
+ printf("%s: File operations failed\n", __func__);
+ return buf;
+ }
+
+ pkg_size = ftell(fh);
+
+ buf = (uint8_t *)malloc(pkg_size);
+ if (!buf) {
+ fclose(fh);
+ printf("%s: Failed to malloc memory\n", __func__);
+ return buf;
+ }
+
+ ret = fseek(fh, 0, SEEK_SET);
+ if (ret < 0) {
+ fclose(fh);
+ printf("%s: File seek operation failed\n", __func__);
+ close_ddp_package_file(buf);
+ return NULL;
+ }
+
+ ret = fread(buf, 1, pkg_size, fh);
+ if (ret < 0) {
+ fclose(fh);
+ printf("%s: File read operation failed\n", __func__);
+ close_ddp_package_file(buf);
+ return NULL;
+ }
+
+ if (size)
+ *size = pkg_size;
+
+ fclose(fh);
+
+ return buf;
+}
+
+int
+close_ddp_package_file(uint8_t *buf)
+{
+ if (buf) {
+ free((void *)buf);
+ return 0;
+ }
+
+ return -1;
+}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 57e6ae27..66fc9a00 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -70,6 +70,7 @@
#include <rte_sctp.h>
#include <rte_prefetch.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
#define IP_DEFTTL 64 /* from RFC 1340. */
@@ -112,15 +113,6 @@ struct simple_gre_hdr {
} __attribute__((__packed__));
static uint16_t
-get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
-{
- if (ethertype == _htons(ETHER_TYPE_IPv4))
- return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
- else /* assume ethertype == ETHER_TYPE_IPv6 */
- return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
-static uint16_t
get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
{
if (ethertype == _htons(ETHER_TYPE_IPv4))
@@ -370,11 +362,9 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
/* do not recalculate udp cksum if it was 0 */
if (udp_hdr->dgram_cksum != 0) {
udp_hdr->dgram_cksum = 0;
- if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
+ if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
ol_flags |= PKT_TX_UDP_CKSUM;
- udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
- info->ethertype, ol_flags);
- } else {
+ else {
udp_hdr->dgram_cksum =
get_udptcp_checksum(l3_hdr, udp_hdr,
info->ethertype);
@@ -383,15 +373,11 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
tcp_hdr->cksum = 0;
- if (tso_segsz) {
+ if (tso_segsz)
ol_flags |= PKT_TX_TCP_SEG;
- tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
- ol_flags);
- } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
+ else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
ol_flags |= PKT_TX_TCP_CKSUM;
- tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
- ol_flags);
- } else {
+ else {
tcp_hdr->cksum =
get_udptcp_checksum(l3_hdr, tcp_hdr,
info->ethertype);
@@ -431,7 +417,7 @@ process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
ol_flags |= PKT_TX_OUTER_IP_CKSUM;
else
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
- } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ } else
ol_flags |= PKT_TX_OUTER_IPV6;
if (info->outer_l4_proto != IPPROTO_UDP)
@@ -597,7 +583,7 @@ pkt_copy_split(const struct rte_mbuf *pkt)
rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
if (rc < 0)
RTE_LOG(ERR, USER1,
- "mbuf_copy_split for %p(len=%u, nb_seg=%hhu) "
+ "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
"into %u segments failed with error code: %d\n",
pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
@@ -648,6 +634,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
uint16_t nb_rx;
uint16_t nb_tx;
+ uint16_t nb_prep;
uint16_t i;
uint64_t rx_ol_flags, tx_ol_flags;
uint16_t testpmd_ol_flags;
@@ -769,7 +756,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
- testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
+ (testpmd_ol_flags &
+ TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
+ (tx_ol_flags & PKT_TX_OUTER_IPV6)) {
m->outer_l2_len = info.outer_l2_len;
m->outer_l3_len = info.outer_l3_len;
m->l2_len = info.l2_len;
@@ -814,7 +803,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
char buf[256];
printf("-----------------\n");
- printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%hhu:\n",
+ printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
fs->rx_port, m, m->pkt_len, m->nb_segs);
/* dump rx parsed packet info */
rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
@@ -839,8 +828,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
"m->l4_len=%d\n",
m->l2_len, m->l3_len, m->l4_len);
if (info.is_tunnel == 1) {
- if (testpmd_ol_flags &
- TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
+ if ((testpmd_ol_flags &
+ TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
+ (tx_ol_flags & PKT_TX_OUTER_IPV6))
printf("tx: m->outer_l2_len=%d "
"m->outer_l3_len=%d\n",
m->outer_l2_len,
@@ -857,7 +847,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
printf("\n");
}
}
- nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+
+ nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
+ pkts_burst, nb_rx);
+ if (nb_prep != nb_rx)
+ printf("Preparing packet burst to transmit failed: %s\n",
+ rte_strerror(rte_errno));
+
+ nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst,
+ nb_prep);
+
/*
* Retry if necessary
*/
diff --git a/app/test-pmd/flowgen.c b/app/test-pmd/flowgen.c
index b13ff89a..13b4f900 100644
--- a/app/test-pmd/flowgen.c
+++ b/app/test-pmd/flowgen.c
@@ -68,6 +68,7 @@
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c
index 6a4e750f..d4b4c9eb 100644
--- a/app/test-pmd/icmpecho.c
+++ b/app/test-pmd/icmpecho.c
@@ -61,6 +61,7 @@
#include <rte_ip.h>
#include <rte_icmp.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
@@ -199,7 +200,7 @@ ip_proto_name(uint16_t ip_proto)
"OSPFIGP", /**< OSPFIGP */
"SRPC", /**< Strite RPC protocol */
- "LARP", /**< Locus Address Resoloution */
+ "LARP", /**< Locus Address Resolution */
"MTP", /**< Multicast Transport */
"AX25", /**< AX.25 Frames */
"4IN4", /**< IP encapsulated in IP */
@@ -296,7 +297,7 @@ ipv4_hdr_cksum(struct ipv4_hdr *ip_h)
(((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0)
/*
- * Receive a burst of packets, lookup for ICMP echo requets, and, if any,
+ * Receive a burst of packets, lookup for ICMP echo requests, and, if any,
* send back ICMP echo replies.
*/
static void
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index 0d3b37a7..51170ee3 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -34,6 +34,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
+#include <rte_flow.h>
#include "testpmd.h"
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
index 26936b7b..15cb4a20 100644
--- a/app/test-pmd/iofwd.c
+++ b/app/test-pmd/iofwd.c
@@ -64,6 +64,7 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 86e01dea..cf7eab12 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -65,6 +65,7 @@
#include <rte_ethdev.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
@@ -112,6 +113,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+ ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index 36e139f6..3a093512 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -65,6 +65,7 @@
#include <rte_ethdev.h>
#include <rte_ip.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
@@ -112,6 +113,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+ ol_flags |= PKT_TX_MACSEC;
for (i = 0; i < nb_rx; i++) {
if (likely(i < nb_rx - 1))
rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 08e5a76f..fbe6284c 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
#ifdef RTE_LIBRTE_PMD_BOND
#include <rte_eth_bond.h>
#endif
+#include <rte_flow.h>
#include "testpmd.h"
@@ -85,6 +86,7 @@ usage(char* progname)
printf("usage: %s "
#ifdef RTE_LIBRTE_CMDLINE
"[--interactive|-i] "
+ "[--cmdline-file=FILENAME] "
#endif
"[--help|-h] | [--auto-start|-a] | ["
"--coremask=COREMASK --portmask=PORTMASK --numa "
@@ -102,6 +104,7 @@ usage(char* progname)
progname);
#ifdef RTE_LIBRTE_CMDLINE
printf(" --interactive: run in interactive mode.\n");
+ printf(" --cmdline-file: execute cli commands before startup.\n");
#endif
printf(" --auto-start: start forwarding on init "
"[always when non-interactive].\n");
@@ -148,7 +151,11 @@ usage(char* progname)
"the packet will be enqueued into the rx drop-queue. "
"If the drop-queue doesn't exist, the packet is dropped. "
"By default drop-queue=127.\n");
- printf(" --crc-strip: enable CRC stripping by hardware.\n");
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ printf(" --latencystats=N: enable latency and jitter statistcs "
+ "monitoring on forwarding lcore id N.\n");
+#endif
+ printf(" --disable-crc-strip: disable CRC stripping by hardware.\n");
printf(" --enable-lro: enable large receive offload.\n");
printf(" --enable-rx-cksum: enable rx hardware checksum offload.\n");
printf(" --disable-hw-vlan: disable hardware vlan.\n");
@@ -195,6 +202,14 @@ usage(char* progname)
" or total packet length.\n");
printf(" --disable-link-check: disable check on link status when "
"starting/stopping ports.\n");
+ printf(" --no-lsc-interrupt: disable link status change interrupt.\n");
+ printf(" --no-rmv-interrupt: disable device removal interrupt.\n");
+ printf(" --bitrate-stats=N: set the logical core N to perform "
+ "bit-rate calculation.\n");
+ printf(" --print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
+ "enable print of designated event or all of them.");
+ printf(" --mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>: "
+ "disable print of designated event or all of them.");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -354,6 +369,18 @@ parse_queue_stats_mapping_config(const char *q_arg, int is_rx)
return 0;
}
+static void
+print_invalid_socket_id_error(void)
+{
+ unsigned int i = 0;
+
+ printf("Invalid socket id, options are: ");
+ for (i = 0; i < num_sockets; i++) {
+ printf("%u%s", socket_ids[i],
+ (i == num_sockets - 1) ? "\n" : ",");
+ }
+}
+
static int
parse_portnuma_config(const char *q_arg)
{
@@ -393,15 +420,14 @@ parse_portnuma_config(const char *q_arg)
port_id = (uint8_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
- if(socket_id >= max_socket) {
- printf("Invalid socket id, range is [0, %d]\n",
- max_socket - 1);
+ if (new_socket_id(socket_id)) {
+ print_invalid_socket_id_error();
return -1;
}
port_numa[port_id] = socket_id;
@@ -453,15 +479,14 @@ parse_ringnuma_config(const char *q_arg)
port_id = (uint8_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
printf("Valid port range is [0");
- FOREACH_PORT(pid, ports)
+ RTE_ETH_FOREACH_DEV(pid)
printf(", %d", pid);
printf("]\n");
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
- if (socket_id >= max_socket) {
- printf("Invalid socket id, range is [0, %d]\n",
- max_socket - 1);
+ if (new_socket_id(socket_id)) {
+ print_invalid_socket_id_error();
return -1;
}
ring_flag = (uint8_t)int_fld[FLD_FLAG];
@@ -492,6 +517,38 @@ parse_ringnuma_config(const char *q_arg)
return 0;
}
+static int
+parse_event_printing_config(const char *optarg, int enable)
+{
+ uint32_t mask = 0;
+
+ if (!strcmp(optarg, "unknown"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN;
+ else if (!strcmp(optarg, "intr_lsc"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC;
+ else if (!strcmp(optarg, "queue_state"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE;
+ else if (!strcmp(optarg, "intr_reset"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET;
+ else if (!strcmp(optarg, "vf_mbox"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_VF_MBOX;
+ else if (!strcmp(optarg, "macsec"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC;
+ else if (!strcmp(optarg, "intr_rmv"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV;
+ else if (!strcmp(optarg, "all"))
+ mask = ~UINT32_C(0);
+ else {
+ fprintf(stderr, "Invalid event: %s\n", optarg);
+ return -1;
+ }
+ if (enable)
+ event_print_mask |= mask;
+ else
+ event_print_mask &= ~mask;
+ return 0;
+}
+
void
launch_args_parse(int argc, char** argv)
{
@@ -504,6 +561,7 @@ launch_args_parse(int argc, char** argv)
{ "help", 0, 0, 0 },
#ifdef RTE_LIBRTE_CMDLINE
{ "interactive", 0, 0, 0 },
+ { "cmdline-file", 1, 0, 0 },
{ "auto-start", 0, 0, 0 },
{ "eth-peers-configfile", 1, 0, 0 },
{ "eth-peer", 1, 0, 0 },
@@ -514,6 +572,7 @@ launch_args_parse(int argc, char** argv)
{ "coremask", 1, 0, 0 },
{ "portmask", 1, 0, 0 },
{ "numa", 0, 0, 0 },
+ { "no-numa", 0, 0, 0 },
{ "mp-anon", 0, 0, 0 },
{ "port-numa-config", 1, 0, 0 },
{ "ring-numa-config", 1, 0, 0 },
@@ -525,7 +584,13 @@ launch_args_parse(int argc, char** argv)
{ "pkt-filter-report-hash", 1, 0, 0 },
{ "pkt-filter-size", 1, 0, 0 },
{ "pkt-filter-drop-queue", 1, 0, 0 },
- { "crc-strip", 0, 0, 0 },
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ { "latencystats", 1, 0, 0 },
+#endif
+#ifdef RTE_LIBRTE_BITRATE
+ { "bitrate-stats", 1, 0, 0 },
+#endif
+ { "disable-crc-strip", 0, 0, 0 },
{ "enable-lro", 0, 0, 0 },
{ "enable-rx-cksum", 0, 0, 0 },
{ "enable-scatter", 0, 0, 0 },
@@ -560,6 +625,10 @@ launch_args_parse(int argc, char** argv)
{ "no-flush-rx", 0, 0, 0 },
{ "txpkts", 1, 0, 0 },
{ "disable-link-check", 0, 0, 0 },
+ { "no-lsc-interrupt", 0, 0, 0 },
+ { "no-rmv-interrupt", 0, 0, 0 },
+ { "print-event", 1, 0, 0 },
+ { "mask-event", 1, 0, 0 },
{ 0, 0, 0, 0 },
};
@@ -594,6 +663,13 @@ launch_args_parse(int argc, char** argv)
printf("Interactive-mode selected\n");
interactive = 1;
}
+ if (!strcmp(lgopts[opt_idx].name, "cmdline-file")) {
+ printf("CLI commands to be read from %s\n",
+ optarg);
+ snprintf(cmdline_filename,
+ sizeof(cmdline_filename), "%s",
+ optarg);
+ }
if (!strcmp(lgopts[opt_idx].name, "auto-start")) {
printf("Auto-start selected\n");
auto_start = 1;
@@ -650,12 +726,10 @@ launch_args_parse(int argc, char** argv)
parse_fwd_coremask(optarg);
if (!strcmp(lgopts[opt_idx].name, "portmask"))
parse_fwd_portmask(optarg);
- if (!strcmp(lgopts[opt_idx].name, "numa")) {
+ if (!strcmp(lgopts[opt_idx].name, "no-numa"))
+ numa_support = 0;
+ if (!strcmp(lgopts[opt_idx].name, "numa"))
numa_support = 1;
- memset(port_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
- memset(rxring_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
- memset(txring_numa,NUMA_NO_CONFIG,RTE_MAX_ETHPORTS);
- }
if (!strcmp(lgopts[opt_idx].name, "mp-anon")) {
mp_anon = 1;
}
@@ -670,12 +744,13 @@ launch_args_parse(int argc, char** argv)
"invalid ring-numa configuration\n");
if (!strcmp(lgopts[opt_idx].name, "socket-num")) {
n = atoi(optarg);
- if((uint8_t)n < max_socket)
+ if (!new_socket_id((uint8_t)n)) {
socket_num = (uint8_t)n;
- else
+ } else {
+ print_invalid_socket_id_error();
rte_exit(EXIT_FAILURE,
- "The socket number should be < %d\n",
- max_socket);
+ "Invalid socket id");
+ }
}
if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) {
n = atoi(optarg);
@@ -765,8 +840,33 @@ launch_args_parse(int argc, char** argv)
"drop queue %d invalid - must"
"be >= 0 \n", n);
}
- if (!strcmp(lgopts[opt_idx].name, "crc-strip"))
- rx_mode.hw_strip_crc = 1;
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ if (!strcmp(lgopts[opt_idx].name,
+ "latencystats")) {
+ n = atoi(optarg);
+ if (n >= 0) {
+ latencystats_lcore_id = (lcoreid_t) n;
+ latencystats_enabled = 1;
+ } else
+ rte_exit(EXIT_FAILURE,
+ "invalid lcore id %d for latencystats"
+ " must be >= 0\n", n);
+ }
+#endif
+#ifdef RTE_LIBRTE_BITRATE
+ if (!strcmp(lgopts[opt_idx].name, "bitrate-stats")) {
+ n = atoi(optarg);
+ if (n >= 0) {
+ bitrate_lcore_id = (lcoreid_t) n;
+ bitrate_enabled = 1;
+ } else
+ rte_exit(EXIT_FAILURE,
+ "invalid lcore id %d for bitrate stats"
+ " must be >= 0\n", n);
+ }
+#endif
+ if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
+ rx_mode.hw_strip_crc = 0;
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
rx_mode.enable_lro = 1;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
@@ -977,6 +1077,20 @@ launch_args_parse(int argc, char** argv)
no_flush_rx = 1;
if (!strcmp(lgopts[opt_idx].name, "disable-link-check"))
no_link_check = 1;
+ if (!strcmp(lgopts[opt_idx].name, "no-lsc-interrupt"))
+ lsc_interrupt = 0;
+ if (!strcmp(lgopts[opt_idx].name, "no-rmv-interrupt"))
+ rmv_interrupt = 0;
+ if (!strcmp(lgopts[opt_idx].name, "print-event"))
+ if (parse_event_printing_config(optarg, 1)) {
+ rte_exit(EXIT_FAILURE,
+ "invalid print-event argument\n");
+ }
+ if (!strcmp(lgopts[opt_idx].name, "mask-event"))
+ if (parse_event_printing_config(optarg, 0)) {
+ rte_exit(EXIT_FAILURE,
+ "invalid mask-event argument\n");
+ }
break;
case 'h':
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index fff815c6..dcd1d85c 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -67,6 +67,7 @@
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_net.h>
+#include <rte_flow.h>
#include "testpmd.h"
@@ -100,9 +101,7 @@ pkt_burst_receive(struct fwd_stream *fs)
uint64_t start_tsc;
uint64_t end_tsc;
uint64_t core_cycles;
-#endif
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
start_tsc = rte_rdtsc();
#endif
@@ -147,7 +146,8 @@ pkt_burst_receive(struct fwd_stream *fs)
if (ol_flags & PKT_RX_RSS_HASH) {
printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
- } else if (ol_flags & PKT_RX_FDIR) {
+ }
+ if (ol_flags & PKT_RX_FDIR) {
printf(" - FDIR matched ");
if (ol_flags & PKT_RX_FDIR_ID)
printf("ID=0x%x",
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index a0332c26..d1041afa 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -59,6 +59,7 @@
#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
+#include <rte_alarm.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
@@ -78,6 +79,14 @@
#ifdef RTE_LIBRTE_PDUMP
#include <rte_pdump.h>
#endif
+#include <rte_flow.h>
+#include <rte_metrics.h>
+#ifdef RTE_LIBRTE_BITRATE
+#include <rte_bitrate.h>
+#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+#include <rte_latencystats.h>
+#endif
#include "testpmd.h"
@@ -86,6 +95,7 @@ uint16_t verbose_level = 0; /**< Silent by default. */
/* use master core for command line ? */
uint8_t interactive = 0;
uint8_t auto_start = 0;
+char cmdline_filename[PATH_MAX] = {0};
/*
* NUMA support configuration.
@@ -94,7 +104,7 @@ uint8_t auto_start = 0;
* probed ports among the CPU sockets 0 and 1.
* Otherwise, all memory is allocated from CPU socket 0.
*/
-uint8_t numa_support = 0; /**< No numa support by default */
+uint8_t numa_support = 1; /**< numa enabled by default */
/*
* In UMA mode,all memory is allocated from socket 0 if --socket-num is
@@ -110,7 +120,7 @@ uint8_t mp_anon = 0;
/*
* Record the Ethernet address of peer target ports to which packets are
* forwarded.
- * Must be instanciated with the ethernet addresses of peer traffic generator
+ * Must be instantiated with the ethernet addresses of peer traffic generator
* ports.
*/
struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
@@ -262,6 +272,27 @@ uint8_t no_flush_rx = 0; /* flush by default */
uint8_t no_link_check = 0; /* check by default */
/*
+ * Enable link status change notification
+ */
+uint8_t lsc_interrupt = 1; /* enabled by default */
+
+/*
+ * Enable device removal notification.
+ */
+uint8_t rmv_interrupt = 1; /* enabled by default */
+
+/*
+ * Display or mask ether events
+ * Default to all events except VF_MBOX
+ */
+uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
+ (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
+ (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
+ (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
+ (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
+ (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
+
+/*
* NIC bypass mode configuration options.
*/
#ifdef RTE_NIC_BYPASS
@@ -271,6 +302,20 @@ uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+
+/*
+ * Set when latency stats is enabled in the commandline
+ */
+uint8_t latencystats_enabled;
+
+/*
+ * Lcore ID to serive latency statistics.
+ */
+lcoreid_t latencystats_lcore_id = -1;
+
+#endif
+
/*
* Ethernet device configuration.
*/
@@ -283,7 +328,7 @@ struct rte_eth_rxmode rx_mode = {
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
- .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
+ .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
};
struct rte_fdir_conf fdir_conf = {
@@ -320,11 +365,22 @@ struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_a
uint16_t nb_tx_queue_stats_mappings = 0;
uint16_t nb_rx_queue_stats_mappings = 0;
-unsigned max_socket = 0;
+unsigned int num_sockets = 0;
+unsigned int socket_ids[RTE_MAX_NUMA_NODES];
+
+#ifdef RTE_LIBRTE_BITRATE
+/* Bitrate statistics */
+struct rte_stats_bitrates *bitrate_data;
+lcoreid_t bitrate_lcore_id;
+uint8_t bitrate_enabled;
+#endif
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
+static void eth_event_callback(uint8_t port_id,
+ enum rte_eth_event_type type,
+ void *param);
/*
* Check if all the ports are started.
@@ -333,17 +389,19 @@ static void check_all_ports_link_status(uint32_t port_mask);
static int all_ports_started(void);
/*
- * Find next enabled port
+ * Helper function to check if socket is allready discovered.
+ * If yes, return positive value. If not, return zero.
*/
-portid_t
-find_next_port(portid_t p, struct rte_port *ports, int size)
+int
+new_socket_id(unsigned int socket_id)
{
- if (ports == NULL)
- rte_exit(-EINVAL, "failed to find a next port id\n");
+ unsigned int i;
- while ((p < size) && (ports[p].enabled == 0))
- p++;
- return p;
+ for (i = 0; i < num_sockets; i++) {
+ if (socket_ids[i] == socket_id)
+ return 0;
+ }
+ return 1;
}
/*
@@ -358,11 +416,14 @@ set_default_fwd_lcores_config(void)
nb_lc = 0;
for (i = 0; i < RTE_MAX_LCORE; i++) {
- sock_num = rte_lcore_to_socket_id(i) + 1;
- if (sock_num > max_socket) {
- if (sock_num > RTE_MAX_NUMA_NODES)
- rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
- max_socket = sock_num;
+ sock_num = rte_lcore_to_socket_id(i);
+ if (new_socket_id(sock_num)) {
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ rte_exit(EXIT_FAILURE,
+ "Total sockets greater than %u\n",
+ RTE_MAX_NUMA_NODES);
+ }
+ socket_ids[num_sockets++] = sock_num;
}
if (!rte_lcore_is_enabled(i))
continue;
@@ -476,7 +537,7 @@ check_socket_id(const unsigned int socket_id)
{
static int warning_once = 0;
- if (socket_id >= max_socket) {
+ if (new_socket_id(socket_id)) {
if (!warning_once && numa_support)
printf("Warning: NUMA should be configured manually by"
" using --port-numa-config and"
@@ -499,6 +560,13 @@ init_config(void)
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
+
+ if (numa_support) {
+ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ }
+
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
@@ -518,35 +586,7 @@ init_config(void)
fwd_lcores[lc_id]->cpuid_idx = lc_id;
}
- /*
- * Create pools of mbuf.
- * If NUMA support is disabled, create a single pool of mbuf in
- * socket 0 memory by default.
- * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
- *
- * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
- * nb_txd can be configured at run time.
- */
- if (param_total_num_mbufs)
- nb_mbuf_per_pool = param_total_num_mbufs;
- else {
- nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
- + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
-
- if (!numa_support)
- nb_mbuf_per_pool =
- (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
- }
-
- if (!numa_support) {
- if (socket_num == UMA_NO_CONFIG)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
- else
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_num);
- }
-
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
rte_eth_dev_info_get(pid, &port->dev_info);
@@ -568,20 +608,38 @@ init_config(void)
port->need_reconfig_queues = 1;
}
+ /*
+ * Create pools of mbuf.
+ * If NUMA support is disabled, create a single pool of mbuf in
+ * socket 0 memory by default.
+ * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
+ *
+ * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
+ * nb_txd can be configured at run time.
+ */
+ if (param_total_num_mbufs)
+ nb_mbuf_per_pool = param_total_num_mbufs;
+ else {
+ nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
+ (nb_lcores * mb_mempool_cache) +
+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
+ nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
+ }
+
if (numa_support) {
uint8_t i;
- unsigned int nb_mbuf;
-
- if (param_total_num_mbufs)
- nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
- for (i = 0; i < max_socket; i++) {
- nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
- if (nb_mbuf)
- mbuf_pool_create(mbuf_data_size,
- nb_mbuf,i);
- }
+ for (i = 0; i < num_sockets; i++)
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
+ socket_ids[i]);
+ } else {
+ if (socket_num == UMA_NO_CONFIG)
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ else
+ mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
+ socket_num);
}
+
init_port_config();
/*
@@ -631,7 +689,7 @@ init_fwd_streams(void)
queueid_t q;
/* set socket id according to numa or not */
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
if (nb_rxq > port->dev_info.max_rx_queues) {
printf("Fail: nb_rxq(%d) is greater than "
@@ -921,12 +979,42 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
struct fwd_stream **fsm;
streamid_t nb_fs;
streamid_t sm_id;
-
+#ifdef RTE_LIBRTE_BITRATE
+ uint64_t tics_per_1sec;
+ uint64_t tics_datum;
+ uint64_t tics_current;
+ uint8_t idx_port, cnt_ports;
+
+ cnt_ports = rte_eth_dev_count();
+ tics_datum = rte_rdtsc();
+ tics_per_1sec = rte_get_timer_hz();
+#endif
fsm = &fwd_streams[fc->stream_idx];
nb_fs = fc->stream_nb;
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
(*pkt_fwd)(fsm[sm_id]);
+#ifdef RTE_LIBRTE_BITRATE
+ if (bitrate_enabled != 0 &&
+ bitrate_lcore_id == rte_lcore_id()) {
+ tics_current = rte_rdtsc();
+ if (tics_current - tics_datum >= tics_per_1sec) {
+ /* Periodic bitrate calculation */
+ for (idx_port = 0;
+ idx_port < cnt_ports;
+ idx_port++)
+ rte_stats_bitrate_calc(bitrate_data,
+ idx_port);
+ tics_datum = tics_current;
+ }
+ }
+#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ if (latencystats_enabled != 0 &&
+ latencystats_lcore_id == rte_lcore_id())
+ rte_latencystats_update();
+#endif
+
} while (! fc->stopped);
}
@@ -1252,7 +1340,7 @@ all_ports_started(void)
portid_t pi;
struct rte_port *port;
- FOREACH_PORT(pi, ports) {
+ RTE_ETH_FOREACH_DEV(pi) {
port = &ports[pi];
/* Check if there is a port which is not started */
if ((port->port_status != RTE_PORT_STARTED) &&
@@ -1270,7 +1358,7 @@ all_ports_stopped(void)
portid_t pi;
struct rte_port *port;
- FOREACH_PORT(pi, ports) {
+ RTE_ETH_FOREACH_DEV(pi) {
port = &ports[pi];
if ((port->port_status != RTE_PORT_STOPPED) &&
(port->slave_flag == 0))
@@ -1312,13 +1400,14 @@ start_port(portid_t pid)
queueid_t qi;
struct rte_port *port;
struct ether_addr mac_addr;
+ enum rte_eth_event_type event_type;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
if(dcb_config)
dcb_test = 1;
- FOREACH_PORT(pi, ports) {
+ RTE_ETH_FOREACH_DEV(pi) {
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
@@ -1423,6 +1512,21 @@ start_port(portid_t pid)
return -1;
}
}
+
+ for (event_type = RTE_ETH_EVENT_UNKNOWN;
+ event_type < RTE_ETH_EVENT_MAX;
+ event_type++) {
+ diag = rte_eth_dev_callback_register(pi,
+ event_type,
+ eth_event_callback,
+ NULL);
+ if (diag) {
+ printf("Failed to setup even callback for event %d\n",
+ event_type);
+ return -1;
+ }
+ }
+
/* start port */
if (rte_eth_dev_start(pi) < 0) {
printf("Fail to start port %d\n", pi);
@@ -1475,7 +1579,7 @@ stop_port(portid_t pid)
printf("Stopping ports...\n");
- FOREACH_PORT(pi, ports) {
+ RTE_ETH_FOREACH_DEV(pi) {
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
@@ -1518,7 +1622,7 @@ close_port(portid_t pid)
printf("Closing ports...\n");
- FOREACH_PORT(pi, ports) {
+ RTE_ETH_FOREACH_DEV(pi) {
if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
continue;
@@ -1545,6 +1649,8 @@ close_port(portid_t pid)
continue;
}
+ if (port->flow_list)
+ port_flow_flush(pi);
rte_eth_dev_close(pi);
if (rte_atomic16_cmpset(&(port->port_status),
@@ -1571,7 +1677,6 @@ attach_port(char *identifier)
if (rte_eth_dev_attach(identifier, &pi))
return;
- ports[pi].enabled = 1;
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
/* if socket_id is invalid, set to 0 */
if (check_socket_id(socket_id) < 0)
@@ -1599,10 +1704,12 @@ detach_port(uint8_t port_id)
return;
}
+ if (ports[port_id].flow_list)
+ port_flow_flush(port_id);
+
if (rte_eth_dev_detach(port_id, name))
return;
- ports[port_id].enabled = 0;
nb_ports = rte_eth_dev_count();
printf("Port '%s' is detached. Now total ports is %d\n",
@@ -1621,7 +1728,7 @@ pmd_test_exit(void)
if (ports != NULL) {
no_link_check = 1;
- FOREACH_PORT(pt_id, ports) {
+ RTE_ETH_FOREACH_DEV(pt_id) {
printf("\nShutting down port %d...\n", pt_id);
fflush(stdout);
stop_port(pt_id);
@@ -1652,7 +1759,7 @@ check_all_ports_link_status(uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- FOREACH_PORT(portid, ports) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1689,6 +1796,70 @@ check_all_ports_link_status(uint32_t port_mask)
if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
print_flag = 1;
}
+
+ if (lsc_interrupt)
+ break;
+ }
+}
+
+static void
+rmv_event_callback(void *arg)
+{
+ struct rte_eth_dev *dev;
+ struct rte_devargs *da;
+ char name[32] = "";
+ uint8_t port_id = (intptr_t)arg;
+
+ RTE_ETH_VALID_PORTID_OR_RET(port_id);
+ dev = &rte_eth_devices[port_id];
+ da = dev->device->devargs;
+
+ stop_port(port_id);
+ close_port(port_id);
+ if (da->type == RTE_DEVTYPE_VIRTUAL)
+ snprintf(name, sizeof(name), "%s", da->virt.drv_name);
+ else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
+ rte_pci_device_name(&da->pci.addr, name, sizeof(name));
+ printf("removing device %s\n", name);
+ rte_eal_dev_detach(name);
+ dev->state = RTE_ETH_DEV_UNUSED;
+}
+
+/* This function is used by the interrupt thread */
+static void
+eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+{
+ static const char * const event_desc[] = {
+ [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
+ [RTE_ETH_EVENT_INTR_LSC] = "LSC",
+ [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
+ [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
+ [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
+ [RTE_ETH_EVENT_MACSEC] = "MACsec",
+ [RTE_ETH_EVENT_INTR_RMV] = "device removal",
+ [RTE_ETH_EVENT_MAX] = NULL,
+ };
+
+ RTE_SET_USED(param);
+
+ if (type >= RTE_ETH_EVENT_MAX) {
+ fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
+ port_id, __func__, type);
+ fflush(stderr);
+ } else if (event_print_mask & (UINT32_C(1) << type)) {
+ printf("\nPort %" PRIu8 ": %s event\n", port_id,
+ event_desc[type]);
+ fflush(stdout);
+ }
+
+ switch (type) {
+ case RTE_ETH_EVENT_INTR_RMV:
+ if (rte_eal_alarm_set(100000,
+ rmv_event_callback, (void *)(intptr_t)port_id))
+ fprintf(stderr, "Could not set up deferred device removal\n");
+ break;
+ default:
+ break;
}
}
@@ -1817,7 +1988,7 @@ init_port_config(void)
portid_t pid;
struct rte_port *port;
- FOREACH_PORT(pid, ports) {
+ RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.rxmode = rx_mode;
port->dev_conf.fdir_conf = fdir_conf;
@@ -1829,24 +2000,13 @@ init_port_config(void)
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
}
- if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
+ if (port->dcb_flag == 0) {
if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
else
port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
}
- if (port->dev_info.max_vfs != 0) {
- if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
- port->dev_conf.rxmode.mq_mode =
- ETH_MQ_RX_VMDQ_RSS;
- else
- port->dev_conf.rxmode.mq_mode =
- ETH_MQ_RX_NONE;
-
- port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
- }
-
rxtx_port_config(port);
rte_eth_macaddr_get(pid, &port->eth_addr);
@@ -1855,6 +2015,15 @@ init_port_config(void)
#ifdef RTE_NIC_BYPASS
rte_eth_dev_bypass_init(pid);
#endif
+
+ if (lsc_interrupt &&
+ (rte_eth_devices[pid].data->dev_flags &
+ RTE_ETH_DEV_INTR_LSC))
+ port->dev_conf.intr_conf.lsc = 1;
+ if (rmv_interrupt &&
+ (rte_eth_devices[pid].data->dev_flags &
+ RTE_ETH_DEV_INTR_RMV))
+ port->dev_conf.intr_conf.rmv = 1;
}
}
@@ -1907,7 +2076,7 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
- /* VMDQ+DCB RX and TX configrations */
+ /* VMDQ+DCB RX and TX configurations */
vmdq_rx_conf->enable_default_pool = 0;
vmdq_rx_conf->default_pool = 0;
vmdq_rx_conf->nb_queue_pools =
@@ -1938,9 +2107,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
- for (i = 0; i < num_tcs; i++) {
- rx_conf->dcb_tc[i] = i;
- tx_conf->dcb_tc[i] = i;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+ rx_conf->dcb_tc[i] = i % num_tcs;
+ tx_conf->dcb_tc[i] = i % num_tcs;
}
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
@@ -2041,8 +2210,6 @@ init_port_dcb_config(portid_t pid,
static void
init_port(void)
{
- portid_t pid;
-
/* Configuration of Ethernet ports. */
ports = rte_zmalloc("testpmd: ports",
sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
@@ -2052,10 +2219,6 @@ init_port(void)
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
-
- /* enabled allocated ports */
- for (pid = 0; pid < nb_ports; pid++)
- ports[pid].enabled = 1;
}
static void
@@ -2075,6 +2238,9 @@ signal_handler(int signum)
/* uninitialize packet capture framework */
rte_pdump_uninit();
#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ rte_latencystats_uninit();
+#endif
force_quit();
/* exit with the expected status */
signal(signum, SIG_DFL);
@@ -2112,6 +2278,14 @@ main(int argc, char** argv)
rte_panic("Empty set of forwarding logical cores - check the "
"core mask supplied in the command parameters\n");
+ /* Bitrate/latency stats disabled by default */
+#ifdef RTE_LIBRTE_BITRATE
+ bitrate_enabled = 0;
+#endif
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ latencystats_enabled = 0;
+#endif
+
argc -= diag;
argv += diag;
if (argc > 1)
@@ -2130,16 +2304,45 @@ main(int argc, char** argv)
rte_exit(EXIT_FAILURE, "Start ports failed\n");
/* set all ports to promiscuous mode by default */
- FOREACH_PORT(port_id, ports)
+ RTE_ETH_FOREACH_DEV(port_id)
rte_eth_promiscuous_enable(port_id);
+ /* Init metrics library */
+ rte_metrics_init(rte_socket_id());
+
+#ifdef RTE_LIBRTE_LATENCY_STATS
+ if (latencystats_enabled != 0) {
+ int ret = rte_latencystats_init(1, NULL);
+ if (ret)
+ printf("Warning: latencystats init()"
+ " returned error %d\n", ret);
+ printf("Latencystats running on lcore %d\n",
+ latencystats_lcore_id);
+ }
+#endif
+
+ /* Setup bitrate stats */
+#ifdef RTE_LIBRTE_BITRATE
+ if (bitrate_enabled != 0) {
+ bitrate_data = rte_stats_bitrate_create();
+ if (bitrate_data == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Could not allocate bitrate data.\n");
+ rte_stats_bitrate_reg(bitrate_data);
+ }
+#endif
+
#ifdef RTE_LIBRTE_CMDLINE
+ if (strlen(cmdline_filename) != 0)
+ cmdline_read_from_file(cmdline_filename);
+
if (interactive == 1) {
if (auto_start) {
printf("Start automatic packet forwarding\n");
start_packet_forwarding(0);
}
prompt();
+ pmd_test_exit();
} else
#endif
{
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 9c1e7039..e6c43ba0 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -143,12 +143,26 @@ struct fwd_stream {
#define TESTPMD_TX_OFFLOAD_INSERT_VLAN 0x0040
/** Insert double VLAN header in forward engine */
#define TESTPMD_TX_OFFLOAD_INSERT_QINQ 0x0080
+/** Offload MACsec in forward engine */
+#define TESTPMD_TX_OFFLOAD_MACSEC 0x0100
+
+/** Descriptor for a single flow. */
+struct port_flow {
+ size_t size; /**< Allocated space including data[]. */
+ struct port_flow *next; /**< Next flow in list. */
+ struct port_flow *tmp; /**< Temporary linking. */
+ uint32_t id; /**< Flow rule ID. */
+ struct rte_flow *flow; /**< Opaque flow object returned by PMD. */
+ struct rte_flow_attr attr; /**< Attributes. */
+ struct rte_flow_item *pattern; /**< Pattern. */
+ struct rte_flow_action *actions; /**< Actions. */
+ uint8_t data[]; /**< Storage for pattern/actions. */
+};
/**
* The data structure associated with each port.
*/
struct rte_port {
- uint8_t enabled; /**< Port enabled or not */
struct rte_eth_dev_info dev_info; /**< PCI info + driver name */
struct rte_eth_conf dev_conf; /**< Port configuration. */
struct ether_addr eth_addr; /**< Port ethernet address */
@@ -177,16 +191,9 @@ struct rte_port {
struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
+ struct port_flow *flow_list; /**< Associated flows. */
};
-extern portid_t __rte_unused
-find_next_port(portid_t p, struct rte_port *ports, int size);
-
-#define FOREACH_PORT(p, ports) \
- for (p = find_next_port(0, ports, RTE_MAX_ETHPORTS); \
- p < RTE_MAX_ETHPORTS; \
- p = find_next_port(p + 1, ports, RTE_MAX_ETHPORTS))
-
/**
* The data structure associated with each forwarding logical core.
* The logical cores are internally numbered by a core index from 0 to
@@ -292,12 +299,17 @@ extern uint16_t nb_rx_queue_stats_mappings;
extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
extern uint8_t interactive;
extern uint8_t auto_start;
+extern char cmdline_filename[PATH_MAX]; /**< offline commands file */
extern uint8_t numa_support; /**< set by "--numa" parameter */
extern uint16_t port_topology; /**< set by "--port-topology" parameter */
extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */
extern uint8_t mp_anon; /**< set by "--mp-anon" parameter */
extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */
extern volatile int test_done; /* stop packet forwarding when set to 1. */
+extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */
+extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
+extern uint32_t event_print_mask;
+/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
#ifdef RTE_NIC_BYPASS
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
@@ -331,7 +343,8 @@ extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */
extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE];
-extern unsigned max_socket;
+extern unsigned int num_sockets;
+extern unsigned int socket_ids[RTE_MAX_NUMA_NODES];
/*
* Configuration of Ethernet ports:
@@ -365,6 +378,17 @@ extern enum dcb_queue_mapping_mode dcb_q_mapping;
extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
extern uint32_t param_total_num_mbufs;
+
+#ifdef RTE_LIBRTE_LATENCY_STATS
+extern uint8_t latencystats_enabled;
+extern lcoreid_t latencystats_lcore_id;
+#endif
+
+#ifdef RTE_LIBRTE_BITRATE
+extern lcoreid_t bitrate_lcore_id;
+extern uint8_t bitrate_enabled;
+#endif
+
extern struct rte_fdir_conf fdir_conf;
/*
@@ -476,6 +500,7 @@ unsigned int parse_item_list(char* str, const char* item_name,
unsigned int max_items,
unsigned int *parsed_items, int check_unique_values);
void launch_args_parse(int argc, char** argv);
+void cmdline_read_from_file(const char *filename);
void prompt(void);
void prompt_exit(void);
void nic_stats_display(portid_t port_id);
@@ -484,6 +509,7 @@ void nic_xstats_display(portid_t port_id);
void nic_xstats_clear(portid_t port_id);
void nic_stats_mapping_display(portid_t port_id);
void port_infos_display(portid_t port_id);
+void port_offload_cap_display(portid_t port_id);
void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id);
void fwd_lcores_config_display(void);
@@ -504,6 +530,19 @@ void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value);
void port_reg_display(portid_t port_id, uint32_t reg_off);
void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value);
+int port_flow_validate(portid_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions);
+int port_flow_create(portid_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions);
+int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule);
+int port_flow_flush(portid_t port_id);
+int port_flow_query(portid_t port_id, uint32_t rule,
+ enum rte_flow_action_type action);
+void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
@@ -571,8 +610,6 @@ void port_rss_reta_info(portid_t port_id,
uint16_t nb_entries);
void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on);
-void set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id,
- uint64_t vf_mask, uint8_t on);
int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate);
int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate,
@@ -594,11 +631,15 @@ void mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr);
void mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr);
void port_dcb_info_display(uint8_t port_id);
+uint8_t *open_ddp_package_file(const char *file_path, uint32_t *size);
+int close_ddp_package_file(uint8_t *buf);
+
enum print_warning {
ENABLED_WARN = 0,
DISABLED_WARN
};
int port_id_is_invalid(portid_t port_id, enum print_warning warning);
+int new_socket_id(unsigned int socket_id);
/*
* Work-around of a compilation error with ICC on invocations of the
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 8513a062..8b1a2afc 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -68,6 +68,7 @@
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_string_fns.h>
+#include <rte_flow.h>
#include "testpmd.h"
@@ -214,6 +215,8 @@ pkt_burst_transmit(struct fwd_stream *fs)
ol_flags = PKT_TX_VLAN_PKT;
if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
ol_flags |= PKT_TX_QINQ_PKT;
+ if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
+ ol_flags |= PKT_TX_MACSEC;
for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
pkt = rte_mbuf_raw_alloc(mbp);
if (pkt == NULL) {
diff --git a/app/test/autotest_data.py b/app/test/autotest_data.py
deleted file mode 100644
index 9e8fd946..00000000
--- a/app/test/autotest_data.py
+++ /dev/null
@@ -1,470 +0,0 @@
-#!/usr/bin/python
-
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Test data for autotests
-
-from glob import glob
-from autotest_test_funcs import *
-
-# quick and dirty function to find out number of sockets
-def num_sockets():
- result = len(glob("/sys/devices/system/node/node*"))
- if result == 0:
- return 1
- return result
-
-# Assign given number to each socket
-# e.g. 32 becomes 32,32 or 32,32,32,32
-def per_sockets(num):
- return ",".join([str(num)] * num_sockets())
-
-# groups of tests that can be run in parallel
-# the grouping has been found largely empirically
-parallel_test_group_list = [
-
-{
- "Prefix": "group_1",
- "Memory" : per_sockets(8),
- "Tests" :
- [
- {
- "Name" : "Cycles autotest",
- "Command" : "cycles_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Timer autotest",
- "Command" : "timer_autotest",
- "Func" : timer_autotest,
- "Report" : None,
- },
- {
- "Name" : "Debug autotest",
- "Command" : "debug_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Errno autotest",
- "Command" : "errno_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Meter autotest",
- "Command" : "meter_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Common autotest",
- "Command" : "common_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Resource autotest",
- "Command" : "resource_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_2",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Memory autotest",
- "Command" : "memory_autotest",
- "Func" : memory_autotest,
- "Report" : None,
- },
- {
- "Name" : "Read/write lock autotest",
- "Command" : "rwlock_autotest",
- "Func" : rwlock_autotest,
- "Report" : None,
- },
- {
- "Name" : "Logs autotest",
- "Command" : "logs_autotest",
- "Func" : logs_autotest,
- "Report" : None,
- },
- {
- "Name" : "CPU flags autotest",
- "Command" : "cpuflags_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Version autotest",
- "Command" : "version_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "EAL filesystem autotest",
- "Command" : "eal_fs_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "EAL flags autotest",
- "Command" : "eal_flags_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Hash autotest",
- "Command" : "hash_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ],
-},
-{
- "Prefix": "group_3",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "LPM autotest",
- "Command" : "lpm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "LPM6 autotest",
- "Command" : "lpm6_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Memcpy autotest",
- "Command" : "memcpy_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Memzone autotest",
- "Command" : "memzone_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "String autotest",
- "Command" : "string_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Alarm autotest",
- "Command" : "alarm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_4",
- "Memory" : per_sockets(128),
- "Tests" :
- [
- {
- "Name" : "PCI autotest",
- "Command" : "pci_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Malloc autotest",
- "Command" : "malloc_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Multi-process autotest",
- "Command" : "multiprocess_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Mbuf autotest",
- "Command" : "mbuf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Per-lcore autotest",
- "Command" : "per_lcore_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Ring autotest",
- "Command" : "ring_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_5",
- "Memory" : "32",
- "Tests" :
- [
- {
- "Name" : "Spinlock autotest",
- "Command" : "spinlock_autotest",
- "Func" : spinlock_autotest,
- "Report" : None,
- },
- {
- "Name" : "Byte order autotest",
- "Command" : "byteorder_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "TAILQ autotest",
- "Command" : "tailq_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Command-line autotest",
- "Command" : "cmdline_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Interrupts autotest",
- "Command" : "interrupt_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "group_6",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Function reentrancy autotest",
- "Command" : "func_reentrancy_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Mempool autotest",
- "Command" : "mempool_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Atomics autotest",
- "Command" : "atomic_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Prefetch autotest",
- "Command" : "prefetch_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" :"Red autotest",
- "Command" : "red_autotest",
- "Func" :default_autotest,
- "Report" :None,
- },
- ]
-},
-{
- "Prefix" : "group_7",
- "Memory" : "64",
- "Tests" :
- [
- {
- "Name" : "PMD ring autotest",
- "Command" : "ring_pmd_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" : "Access list control autotest",
- "Command" : "acl_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- {
- "Name" :"Sched autotest",
- "Command" : "sched_autotest",
- "Func" :default_autotest,
- "Report" :None,
- },
- ]
-},
-]
-
-# tests that should not be run when any other tests are running
-non_parallel_test_group_list = [
-
-{
- "Prefix" : "kni",
- "Memory" : "512",
- "Tests" :
- [
- {
- "Name" : "KNI autotest",
- "Command" : "kni_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "mempool_perf",
- "Memory" : per_sockets(256),
- "Tests" :
- [
- {
- "Name" : "Mempool performance autotest",
- "Command" : "mempool_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "memcpy_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Memcpy performance autotest",
- "Command" : "memcpy_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "hash_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Hash performance autotest",
- "Command" : "hash_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power autotest",
- "Command" : "power_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power_acpi_cpufreq",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power ACPI cpufreq autotest",
- "Command" : "power_acpi_cpufreq_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix" : "power_kvm_vm",
- "Memory" : "16",
- "Tests" :
- [
- {
- "Name" : "Power KVM VM autotest",
- "Command" : "power_kvm_vm_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-{
- "Prefix": "timer_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Timer performance autotest",
- "Command" : "timer_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-
-#
-# Please always make sure that ring_perf is the last test!
-#
-{
- "Prefix": "ring_perf",
- "Memory" : per_sockets(512),
- "Tests" :
- [
- {
- "Name" : "Ring performance autotest",
- "Command" : "ring_perf_autotest",
- "Func" : default_autotest,
- "Report" : None,
- },
- ]
-},
-]
diff --git a/app/test/autotest_runner.py b/app/test/autotest_runner.py
deleted file mode 100644
index 21d3be2c..00000000
--- a/app/test/autotest_runner.py
+++ /dev/null
@@ -1,422 +0,0 @@
-#!/usr/bin/python
-
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# The main logic behind running autotests in parallel
-
-import multiprocessing, subprocess, sys, pexpect, re, time, os, StringIO, csv
-
-# wait for prompt
-def wait_prompt(child):
- try:
- child.sendline()
- result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
- timeout = 120)
- except:
- return False
- if result == 0:
- return True
- else:
- return False
-
-# run a test group
-# each result tuple in results list consists of:
-# result value (0 or -1)
-# result string
-# test name
-# total test run time (double)
-# raw test log
-# test report (if not available, should be None)
-#
-# this function needs to be outside AutotestRunner class
-# because otherwise Pool won't work (or rather it will require
-# quite a bit of effort to make it work).
-def run_test_group(cmdline, test_group):
- results = []
- child = None
- start_time = time.time()
- startuplog = None
-
- # run test app
- try:
- # prepare logging of init
- startuplog = StringIO.StringIO()
-
- print >>startuplog, "\n%s %s\n" % ("="*20, test_group["Prefix"])
- print >>startuplog, "\ncmdline=%s" % cmdline
-
- child = pexpect.spawn(cmdline, logfile=startuplog)
-
- # wait for target to boot
- if not wait_prompt(child):
- child.close()
-
- results.append((-1, "Fail [No prompt]", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # mark all tests as failed
- for test in test_group["Tests"]:
- results.append((-1, "Fail [No prompt]", test["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- except:
- results.append((-1, "Fail [Can't run]", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # mark all tests as failed
- for t in test_group["Tests"]:
- results.append((-1, "Fail [Can't run]", t["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- # startup was successful
- results.append((0, "Success", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
-
- # parse the binary for available test commands
- binary = cmdline.split()[0]
- stripped = 'not stripped' not in subprocess.check_output(['file', binary])
- if not stripped:
- symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
- avail_cmds = re.findall('test_register_(\w+)', symbols)
-
- # run all tests in test group
- for test in test_group["Tests"]:
-
- # create log buffer for each test
- # in multiprocessing environment, the logging would be
- # interleaved and will create a mess, hence the buffering
- logfile = StringIO.StringIO()
- child.logfile = logfile
-
- result = ()
-
- # make a note when the test started
- start_time = time.time()
-
- try:
- # print test name to log buffer
- print >>logfile, "\n%s %s\n" % ("-"*20, test["Name"])
-
- # run test function associated with the test
- if stripped or test["Command"] in avail_cmds:
- result = test["Func"](child, test["Command"])
- else:
- result = (0, "Skipped [Not Available]")
-
- # make a note when the test was finished
- end_time = time.time()
-
- # append test data to the result tuple
- result += (test["Name"], end_time - start_time,
- logfile.getvalue())
-
- # call report function, if any defined, and supply it with
- # target and complete log for test run
- if test["Report"]:
- report = test["Report"](self.target, log)
-
- # append report to results tuple
- result += (report,)
- else:
- # report is None
- result += (None,)
- except:
- # make a note when the test crashed
- end_time = time.time()
-
- # mark test as failed
- result = (-1, "Fail [Crash]", test["Name"],
- end_time - start_time, logfile.getvalue(), None)
- finally:
- # append the results to the results list
- results.append(result)
-
- # regardless of whether test has crashed, try quitting it
- try:
- child.sendline("quit")
- child.close()
- # if the test crashed, just do nothing instead
- except:
- # nop
- pass
-
- # return test results
- return results
-
-
-
-
-
-# class representing an instance of autotests run
-class AutotestRunner:
- cmdline = ""
- parallel_test_groups = []
- non_parallel_test_groups = []
- logfile = None
- csvwriter = None
- target = ""
- start = None
- n_tests = 0
- fails = 0
- log_buffers = []
- blacklist = []
- whitelist = []
-
-
- def __init__(self, cmdline, target, blacklist, whitelist):
- self.cmdline = cmdline
- self.target = target
- self.blacklist = blacklist
- self.whitelist = whitelist
-
- # log file filename
- logfile = "%s.log" % target
- csvfile = "%s.csv" % target
-
- self.logfile = open(logfile, "w")
- csvfile = open(csvfile, "w")
- self.csvwriter = csv.writer(csvfile)
-
- # prepare results table
- self.csvwriter.writerow(["test_name","test_result","result_str"])
-
-
-
- # set up cmdline string
- def __get_cmdline(self, test):
- cmdline = self.cmdline
-
- # append memory limitations for each test
- # otherwise tests won't run in parallel
- if not "i686" in self.target:
- cmdline += " --socket-mem=%s"% test["Memory"]
- else:
- # affinitize startup so that tests don't fail on i686
- cmdline = "taskset 1 " + cmdline
- cmdline += " -m " + str(sum(map(int,test["Memory"].split(","))))
-
- # set group prefix for autotest group
- # otherwise they won't run in parallel
- cmdline += " --file-prefix=%s"% test["Prefix"]
-
- return cmdline
-
-
-
- def add_parallel_test_group(self,test_group):
- self.parallel_test_groups.append(test_group)
-
- def add_non_parallel_test_group(self,test_group):
- self.non_parallel_test_groups.append(test_group)
-
-
- def __process_results(self, results):
- # this iterates over individual test results
- for i, result in enumerate(results):
-
- # increase total number of tests that were run
- # do not include "start" test
- if i > 0:
- self.n_tests += 1
-
- # unpack result tuple
- test_result, result_str, test_name, \
- test_time, log, report = result
-
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
-
- # print results, test run time and total time since start
- print ("%s:" % test_name).ljust(30),
- print result_str.ljust(29),
- print "[%02dm %02ds]" % (test_time / 60, test_time % 60),
-
- # don't print out total time every line, it's the same anyway
- if i == len(results) - 1:
- print "[%02dm %02ds]" % (total_time / 60, total_time % 60)
- else:
- print ""
-
- # if test failed and it wasn't a "start" test
- if test_result < 0 and not i == 0:
- self.fails += 1
-
- # collect logs
- self.log_buffers.append(log)
-
- # create report if it exists
- if report:
- try:
- f = open("%s_%s_report.rst" % (self.target,test_name), "w")
- except IOError:
- print "Report for %s could not be created!" % test_name
- else:
- with f:
- f.write(report)
-
- # write test result to CSV file
- if i != 0:
- self.csvwriter.writerow([test_name, test_result, result_str])
-
-
-
-
- # this function iterates over test groups and removes each
- # test that is not in whitelist/blacklist
- def __filter_groups(self, test_groups):
- groups_to_remove = []
-
- # filter out tests from parallel test groups
- for i, test_group in enumerate(test_groups):
-
- # iterate over a copy so that we could safely delete individual tests
- for test in test_group["Tests"][:]:
- test_id = test["Command"]
-
- # dump tests are specified in full e.g. "Dump_mempool"
- if "_autotest" in test_id:
- test_id = test_id[:-len("_autotest")]
-
- # filter out blacklisted/whitelisted tests
- if self.blacklist and test_id in self.blacklist:
- test_group["Tests"].remove(test)
- continue
- if self.whitelist and test_id not in self.whitelist:
- test_group["Tests"].remove(test)
- continue
-
- # modify or remove original group
- if len(test_group["Tests"]) > 0:
- test_groups[i] = test_group
- else:
- # remember which groups should be deleted
- # put the numbers backwards so that we start
- # deleting from the end, not from the beginning
- groups_to_remove.insert(0, i)
-
- # remove test groups that need to be removed
- for i in groups_to_remove:
- del test_groups[i]
-
- return test_groups
-
-
-
- # iterate over test groups and run tests associated with them
- def run_all_tests(self):
- # filter groups
- self.parallel_test_groups = \
- self.__filter_groups(self.parallel_test_groups)
- self.non_parallel_test_groups = \
- self.__filter_groups(self.non_parallel_test_groups)
-
- # create a pool of worker threads
- pool = multiprocessing.Pool(processes=1)
-
- results = []
-
- # whatever happens, try to save as much logs as possible
- try:
-
- # create table header
- print ""
- print "Test name".ljust(30),
- print "Test result".ljust(29),
- print "Test".center(9),
- print "Total".center(9)
- print "=" * 80
-
- # make a note of tests start time
- self.start = time.time()
-
- # assign worker threads to run test groups
- for test_group in self.parallel_test_groups:
- result = pool.apply_async(run_test_group,
- [self.__get_cmdline(test_group), test_group])
- results.append(result)
-
- # iterate while we have group execution results to get
- while len(results) > 0:
-
- # iterate over a copy to be able to safely delete results
- # this iterates over a list of group results
- for group_result in results[:]:
-
- # if the thread hasn't finished yet, continue
- if not group_result.ready():
- continue
-
- res = group_result.get()
-
- self.__process_results(res)
-
- # remove result from results list once we're done with it
- results.remove(group_result)
-
- # run non_parallel tests. they are run one by one, synchronously
- for test_group in self.non_parallel_test_groups:
- group_result = run_test_group(self.__get_cmdline(test_group), test_group)
-
- self.__process_results(group_result)
-
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
-
- # print out summary
- print "=" * 80
- print "Total run time: %02dm %02ds" % (total_time / 60, total_time % 60)
- if self.fails != 0:
- print "Number of failed tests: %s" % str(self.fails)
-
- # write summary to logfile
- self.logfile.write("Summary\n")
- self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
- self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
- self.logfile.write("Failed tests: ".ljust(15) + "%i\n" % self.fails)
- except:
- print "Exception occured"
- print sys.exc_info()
- self.fails = 1
-
- # drop logs from all executions to a logfile
- for buf in self.log_buffers:
- self.logfile.write(buf.replace("\r",""))
-
- log_buffers = []
-
- return self.fails
diff --git a/app/test/autotest_test_funcs.py b/app/test/autotest_test_funcs.py
deleted file mode 100644
index 14cffd01..00000000
--- a/app/test/autotest_test_funcs.py
+++ /dev/null
@@ -1,289 +0,0 @@
-#!/usr/bin/python
-
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Test functions
-
-import sys, pexpect, time, os, re
-
-# default autotest, used to run most tests
-# waits for "Test OK"
-def default_autotest(child, test_name):
- child.sendline(test_name)
- result = child.expect(["Test OK", "Test Failed",
- "Command not found", pexpect.TIMEOUT], timeout = 900)
- if result == 1:
- return -1, "Fail"
- elif result == 2:
- return -1, "Fail [Not found]"
- elif result == 3:
- return -1, "Fail [Timeout]"
- return 0, "Success"
-
-# autotest used to run dump commands
-# just fires the command
-def dump_autotest(child, test_name):
- child.sendline(test_name)
- return 0, "Success"
-
-# memory autotest
-# reads output and waits for Test OK
-def memory_autotest(child, test_name):
- child.sendline(test_name)
- regexp = "phys:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, socket_id:[0-9]*"
- index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180)
- if index != 0:
- return -1, "Fail [Timeout]"
- size = int(child.match.groups()[0], 16)
- if size <= 0:
- return -1, "Fail [Bad size]"
- index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
- return 0, "Success"
-
-def spinlock_autotest(child, test_name):
- i = 0
- ir = 0
- child.sendline(test_name)
- while True:
- index = child.expect(["Test OK",
- "Test Failed",
- "Hello from core ([0-9]*) !",
- "Hello from within recursive locks from ([0-9]*) !",
- pexpect.TIMEOUT], timeout = 5)
- # ok
- if index == 0:
- break
-
- # message, check ordering
- elif index == 2:
- if int(child.match.groups()[0]) < i:
- return -1, "Fail [Bad order]"
- i = int(child.match.groups()[0])
- elif index == 3:
- if int(child.match.groups()[0]) < ir:
- return -1, "Fail [Bad order]"
- ir = int(child.match.groups()[0])
-
- # fail
- elif index == 4:
- return -1, "Fail [Timeout]"
- elif index == 1:
- return -1, "Fail"
-
- return 0, "Success"
-
-def rwlock_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
- while True:
- index = child.expect(["Test OK",
- "Test Failed",
- "Hello from core ([0-9]*) !",
- "Global write lock taken on master core ([0-9]*)",
- pexpect.TIMEOUT], timeout = 10)
- # ok
- if index == 0:
- if i != 0xffff:
- return -1, "Fail [Message is missing]"
- break
-
- # message, check ordering
- elif index == 2:
- if int(child.match.groups()[0]) < i:
- return -1, "Fail [Bad order]"
- i = int(child.match.groups()[0])
-
- # must be the last message, check ordering
- elif index == 3:
- i = 0xffff
-
- elif index == 4:
- return -1, "Fail [Timeout]"
-
- # fail
- else:
- return -1, "Fail"
-
- return 0, "Success"
-
-def logs_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
-
- log_list = [
- "TESTAPP1: error message",
- "TESTAPP1: critical message",
- "TESTAPP2: critical message",
- "TESTAPP1: error message",
- ]
-
- for log_msg in log_list:
- index = child.expect([log_msg,
- "Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- if index == 3:
- return -1, "Fail [Timeout]"
- # not ok
- elif index != 0:
- return -1, "Fail"
-
- index = child.expect(["Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- return 0, "Success"
-
-def timer_autotest(child, test_name):
- i = 0
- child.sendline(test_name)
-
- index = child.expect(["Start timer stress tests",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- index = child.expect(["Start timer stress tests 2",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- index = child.expect(["Start timer basic tests",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 5)
-
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- prev_lcore_timer1 = -1
-
- lcore_tim0 = -1
- lcore_tim1 = -1
- lcore_tim2 = -1
- lcore_tim3 = -1
-
- while True:
- index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)",
- "Test OK",
- "Test Failed",
- pexpect.TIMEOUT], timeout = 10)
-
- if index == 1:
- break
-
- if index == 2:
- return -1, "Fail"
- elif index == 3:
- return -1, "Fail [Timeout]"
-
- try:
- t = int(child.match.groups()[0])
- id = int(child.match.groups()[1])
- cnt = int(child.match.groups()[2])
- lcore = int(child.match.groups()[3])
- except:
- return -1, "Fail [Cannot parse]"
-
- # timer0 always expires on the same core when cnt < 20
- if id == 0:
- if lcore_tim0 == -1:
- lcore_tim0 = lcore
- elif lcore != lcore_tim0 and cnt < 20:
- return -1, "Fail [lcore != lcore_tim0 (%d, %d)]"%(lcore, lcore_tim0)
- if cnt > 21:
- return -1, "Fail [tim0 cnt > 21]"
-
- # timer1 each time expires on a different core
- if id == 1:
- if lcore == lcore_tim1:
- return -1, "Fail [lcore == lcore_tim1 (%d, %d)]"%(lcore, lcore_tim1)
- lcore_tim1 = lcore
- if cnt > 10:
- return -1, "Fail [tim1 cnt > 30]"
-
- # timer0 always expires on the same core
- if id == 2:
- if lcore_tim2 == -1:
- lcore_tim2 = lcore
- elif lcore != lcore_tim2:
- return -1, "Fail [lcore != lcore_tim2 (%d, %d)]"%(lcore, lcore_tim2)
- if cnt > 30:
- return -1, "Fail [tim2 cnt > 30]"
-
- # timer0 always expires on the same core
- if id == 3:
- if lcore_tim3 == -1:
- lcore_tim3 = lcore
- elif lcore != lcore_tim3:
- return -1, "Fail [lcore_tim3 changed (%d -> %d)]"%(lcore, lcore_tim3)
- if cnt > 30:
- return -1, "Fail [tim3 cnt > 30]"
-
- # must be 2 different cores
- if lcore_tim0 == lcore_tim3:
- return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]"%(lcore_tim0, lcore_tim3)
-
- return 0, "Success"
-
-def ring_autotest(child, test_name):
- child.sendline(test_name)
- index = child.expect(["Test OK", "Test Failed",
- pexpect.TIMEOUT], timeout = 2)
- if index == 1:
- return -1, "Fail"
- elif index == 2:
- return -1, "Fail [Timeout]"
-
- child.sendline("set_watermark test 100")
- child.sendline("dump_ring test")
- index = child.expect([" watermark=100",
- pexpect.TIMEOUT], timeout = 1)
- if index != 0:
- return -1, "Fail [Bad watermark]"
-
- return 0, "Success"
diff --git a/app/test/test_cryptodev_gcm_test_vectors.h b/app/test/test_cryptodev_gcm_test_vectors.h
deleted file mode 100644
index b404242b..00000000
--- a/app/test/test_cryptodev_gcm_test_vectors.h
+++ /dev/null
@@ -1,1238 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
-#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
-
-#define GMAC_LARGE_PLAINTEXT_LENGTH 65376
-
-struct gcm_test_data {
- struct {
- uint8_t data[64];
- unsigned len;
- } key;
-
- struct {
- uint8_t data[64] __rte_aligned(16);
- unsigned len;
- } iv;
-
- struct {
- uint8_t data[64];
- unsigned len;
- } aad;
-
- struct {
- uint8_t data[1024];
- unsigned len;
- } plaintext;
-
- struct {
- uint8_t data[1024];
- unsigned len;
- } ciphertext;
-
- struct {
- uint8_t data[16];
- unsigned len;
- } auth_tag;
-
-};
-
-struct gmac_test_data {
- struct {
- uint8_t data[64];
- unsigned len;
- } key;
-
- struct {
- uint8_t data[64] __rte_aligned(16);
- unsigned len;
- } iv;
-
- struct {
- uint8_t *data;
- unsigned len;
- } aad;
-
- struct {
- uint8_t *data;
- unsigned len;
- } plaintext;
-
- struct {
- uint8_t data[16];
- unsigned len;
- } gmac_tag;
-
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_1 = {
- .key = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .len = 16
- },
- .iv = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00 },
- .len = 12
- },
- .aad = {
- .data = { 0 },
- .len = 0
- },
- .plaintext = {
- .data = {
- 0x00 },
- .len = 0
- },
- .ciphertext = {
- .data = {
- 0x00
- },
- .len = 0
- },
- .auth_tag = {
- .data = {
- 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
- 0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
- .len = 16
- }
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_2 = {
- .key = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .len = 16
- },
- .iv = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00 },
- .len = 12
- },
- .aad = {
- .data = { 0 },
- .len = 0
- },
- .plaintext = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .len = 16
- },
- .ciphertext = {
- .data = {
- 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
- 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
- .len = 16
- },
- .auth_tag = {
- .data = {
- 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
- 0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
- .len = 16
- }
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_3 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- .len = 12
- },
- .aad = {
- .data = { 0 },
- .len = 0
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
- .len = 64
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
- 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
- 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
- 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
- 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
- 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
- 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
- 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
- },
- .len = 64
- },
- .auth_tag = {
- .data = {
- 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
- 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
- .len = 16
- }
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_4 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- .len = 12
- },
- .aad = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .len = 8
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39
- },
- .len = 60
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
- 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
- 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
- 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
- 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
- 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
- 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
- 0x3d, 0x58, 0xe0, 0x91
- },
- .len = 60
- },
- .auth_tag = {
- .data = {
- 0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
- 0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
- },
- .len = 16
- }
-
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_5 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- .len = 12
- },
- .aad = {
- .data = {
- 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef },
- .len = 8
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39
- },
- .len = 60
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
- 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
- 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
- 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
- 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
- 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
- 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
- 0x3d, 0x58, 0xe0, 0x91
- },
- .len = 60
- },
- .auth_tag = {
- .data = {
- 0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
- 0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
- },
- .len = 16
- }
-
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_6 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88
- },
- .len = 12
- },
- .aad = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00
- },
- .len = 12
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39
- },
- .len = 60
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
- 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
- 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
- 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
- 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
- 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
- 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
- 0x3d, 0x58, 0xe0, 0x91
- },
- .len = 60
- },
- .auth_tag = {
- .data = {
- 0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
- 0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
- },
- .len = 16
- }
-};
-
-/** AES-128 Test Vectors */
-static const struct gcm_test_data gcm_test_case_7 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88
- },
- .len = 12
- },
- .aad = {
- .data = {
- 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
- 0xfe, 0xed, 0xfa, 0xce
- },
- .len = 12
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39
- },
- .len = 60
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
- 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
- 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
- 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
- 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
- 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
- 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
- 0x3d, 0x58, 0xe0, 0x91
- },
- .len = 60
- },
- .auth_tag = {
- .data = {
- 0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
- 0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
- },
- .len = 16
- }
-};
-
-/** GMAC Test Vectors */
-static uint8_t gmac_plaintext[GMAC_LARGE_PLAINTEXT_LENGTH] = {
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
- 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
- 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10
-};
-
-static const struct gmac_test_data gmac_test_case_1 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- .len = 12
- },
- .aad = {
- .data = gmac_plaintext,
- .len = 160
- },
- .plaintext = {
- .data = NULL,
- .len = 0
- },
- .gmac_tag = {
- .data = {
- 0x4C, 0x0C, 0x4F, 0x47, 0x2D, 0x78, 0xF6, 0xD8,
- 0x03, 0x53, 0x20, 0x2F, 0x1A, 0xDF, 0x90, 0xD0
- },
- .len = 16
- },
-};
-
-static const struct gmac_test_data gmac_test_case_2 = {
- .key = {
- .data = {
- 0xaa, 0x74, 0x0a, 0xbf, 0xad, 0xcd, 0xa7, 0x79,
- 0x22, 0x0d, 0x3b, 0x40, 0x6c, 0x5d, 0x7e, 0xc0,
- 0x9a, 0x77, 0xfe, 0x9d, 0x94, 0x10, 0x45, 0x39,
- },
- .len = 24
- },
- .iv = {
- .data = {
- 0xab, 0x22, 0x65, 0xb4, 0xc1, 0x68, 0x95,
- 0x55, 0x61, 0xf0, 0x43, 0x15, },
- .len = 12
- },
- .aad = {
- .data = gmac_plaintext,
- .len = 80
- },
- .plaintext = {
- .data = NULL,
- .len = 0
- },
- .gmac_tag = {
- .data = {
- 0xCF, 0x82, 0x80, 0x64, 0x02, 0x46, 0xF4, 0xFB,
- 0x33, 0xAE, 0x1D, 0x90, 0xEA, 0x48, 0x83, 0xDB
- },
- .len = 16
- },
-};
-
-static const struct gmac_test_data gmac_test_case_3 = {
- .key = {
- .data = {
- 0xb5, 0x48, 0xe4, 0x93, 0x4f, 0x5c, 0x64, 0xd3,
- 0xc0, 0xf0, 0xb7, 0x8f, 0x7b, 0x4d, 0x88, 0x24,
- 0xaa, 0xc4, 0x6b, 0x3c, 0x8d, 0x2c, 0xc3, 0x5e,
- 0xe4, 0xbf, 0xb2, 0x54, 0xe4, 0xfc, 0xba, 0xf7,
- },
- .len = 32
- },
- .iv = {
- .data = {
- 0x2e, 0xed, 0xe1, 0xdc, 0x64, 0x47, 0xc7,
- 0xaf, 0xc4, 0x41, 0x53, 0x58,
- },
- .len = 12
- },
- .aad = {
- .data = gmac_plaintext,
- .len = 65
- },
- .plaintext = {
- .data = NULL,
- .len = 0
- },
- .gmac_tag = {
- .data = {
- 0x77, 0x46, 0x0D, 0x6F, 0xB1, 0x87, 0xDB, 0xA9,
- 0x46, 0xAD, 0xCD, 0xFB, 0xB7, 0xF9, 0x13, 0xA1
- },
- .len = 16
- },
-};
-
-/******* GCM PERF VECTORS ***********/
-
-struct cryptodev_perf_test_data {
- struct {
- uint8_t data[64];
- unsigned len;
- } key;
-
- struct {
- uint8_t data[64] __rte_aligned(16);
- unsigned len;
- } iv;
-
- struct {
- uint8_t data[64];
- unsigned len;
- } aad;
-
- struct {
- uint8_t data[2048];
- unsigned len;
- } plaintext;
-
- struct {
- uint8_t data[2048];
- unsigned len;
- } ciphertext;
-
- struct {
- uint8_t data[16];
- unsigned len;
- } auth_tag;
-
- struct {
- uint32_t size;
- uint8_t data[16];
- unsigned len;
- } auth_tags[7];
-
-};
-
-/* 2048B */
-static const struct cryptodev_perf_test_data AES_GCM_128_12IV_0AAD = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88 },
- .len = 12
- },
- .aad = {
- .data = { 0 },
- .len = 0
- },
- .plaintext = {
- .data = {
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
- 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
- 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
- 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
- 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
- 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
- 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
- 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
- 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55
- },
- .len = 2048
- },
- .ciphertext = {
- .data = {
- 0x42, 0x83, 0x1E, 0xC2, 0x21, 0x77, 0x74, 0x24,
- 0x4B, 0x72, 0x21, 0xB7, 0x84, 0xD0, 0xD4, 0x9C,
- 0xE3, 0xAA, 0x21, 0x2F, 0x2C, 0x02, 0xA4, 0xE0,
- 0x35, 0xC1, 0x7E, 0x23, 0x29, 0xAC, 0xA1, 0x2E,
- 0x21, 0xD5, 0x14, 0xB2, 0x54, 0x66, 0x93, 0x1C,
- 0x7D, 0x8F, 0x6A, 0x5A, 0xAC, 0x84, 0xAA, 0x05,
- 0x1B, 0xA3, 0x0B, 0x39, 0x6A, 0x0A, 0xAC, 0x97,
- 0x3D, 0x58, 0xE0, 0x91, 0x47, 0x3F, 0x59, 0x85,
- 0x04, 0x99, 0x55, 0xE1, 0x36, 0x76, 0xB7, 0x14,
- 0x1D, 0xF0, 0xF6, 0x8C, 0x65, 0xD5, 0xAD, 0xFB,
- 0x90, 0x7F, 0x5D, 0xA2, 0xD6, 0xFD, 0xD0, 0xE5,
- 0x0D, 0x9B, 0x68, 0x21, 0x49, 0x42, 0x6E, 0x13,
- 0xEC, 0x22, 0x50, 0x2A, 0x30, 0x47, 0x49, 0xA1,
- 0x7F, 0xC3, 0x09, 0xE0, 0x56, 0x91, 0xC4, 0x54,
- 0x70, 0xD7, 0x19, 0x40, 0xCA, 0x6B, 0x65, 0x27,
- 0x3E, 0xE9, 0xD1, 0x0F, 0x1C, 0xB5, 0x45, 0x0D,
- 0x27, 0xE7, 0xCF, 0x94, 0x10, 0xBF, 0xA2, 0xFA,
- 0x86, 0x20, 0x3F, 0x6E, 0xE9, 0x95, 0x03, 0x5A,
- 0x46, 0x11, 0x75, 0xD5, 0x37, 0x71, 0x7F, 0xE0,
- 0xBC, 0x9F, 0xC8, 0xE9, 0xB1, 0x08, 0x2C, 0x59,
- 0x6E, 0x51, 0x4A, 0x83, 0x38, 0xC1, 0xED, 0xE2,
- 0x2E, 0x88, 0x90, 0xA5, 0x7D, 0xA4, 0x93, 0x9A,
- 0x30, 0xD6, 0x96, 0x34, 0x0F, 0xC4, 0xD1, 0x7E,
- 0xC9, 0x8F, 0xC5, 0xBB, 0x80, 0x50, 0x85, 0x75,
- 0x7D, 0x82, 0x36, 0xDB, 0x62, 0x15, 0xAF, 0x4B,
- 0x0A, 0x9D, 0xCD, 0x64, 0x00, 0xAB, 0x88, 0x28,
- 0xA8, 0x35, 0x17, 0x70, 0x6F, 0x47, 0x44, 0xCD,
- 0x65, 0xAE, 0xD5, 0x05, 0x0A, 0xA8, 0x2F, 0x48,
- 0xAC, 0xA1, 0x72, 0x64, 0x1C, 0x7E, 0xD3, 0xF5,
- 0xD8, 0x4E, 0x73, 0x17, 0x0C, 0xE5, 0x9F, 0xB6,
- 0x00, 0xFA, 0xD7, 0x2C, 0x3D, 0x6A, 0x10, 0x47,
- 0x7C, 0xF2, 0x6B, 0x13, 0x10, 0x8A, 0x76, 0x39,
- 0xF8, 0x50, 0x33, 0xAC, 0x08, 0x1D, 0xA3, 0x48,
- 0xE1, 0xD0, 0x05, 0x49, 0xB7, 0x76, 0x03, 0x72,
- 0x07, 0xC5, 0xD3, 0x08, 0x79, 0x38, 0x66, 0xC1,
- 0x52, 0xAF, 0x83, 0xCD, 0xF3, 0x86, 0x62, 0xBF,
- 0x92, 0x24, 0x97, 0xBD, 0x5D, 0x7D, 0x81, 0x56,
- 0x4C, 0xF3, 0xD2, 0x60, 0xC2, 0xDE, 0x61, 0xC1,
- 0x39, 0x61, 0xDA, 0x07, 0x50, 0xC7, 0x98, 0x63,
- 0x7E, 0xDD, 0x54, 0xCA, 0xDE, 0x12, 0xD2, 0xA8,
- 0x19, 0x08, 0x6E, 0xF9, 0xFA, 0x6F, 0x58, 0x97,
- 0xD4, 0x0B, 0x5C, 0x5B, 0xE5, 0x30, 0xE5, 0x4C,
- 0x0E, 0x16, 0x87, 0xF0, 0x2C, 0xCB, 0x53, 0xB8,
- 0x0C, 0xE5, 0xDF, 0x16, 0x7B, 0xE8, 0xC2, 0xCF,
- 0xCC, 0xFF, 0x51, 0x24, 0xC1, 0xDD, 0x59, 0x9C,
- 0xA7, 0x56, 0x03, 0xB9, 0x0A, 0x37, 0xA2, 0xAC,
- 0x28, 0x8B, 0xEB, 0x51, 0x4E, 0xF1, 0xAE, 0xB5,
- 0xC8, 0xB5, 0xCB, 0x8D, 0x23, 0xF6, 0x24, 0x2D,
- 0xF6, 0x59, 0x62, 0xC0, 0xCB, 0xD3, 0x18, 0xE4,
- 0xB7, 0x73, 0xEF, 0xDB, 0x13, 0x9A, 0xF5, 0xD3,
- 0xD5, 0x61, 0x01, 0x14, 0xA5, 0xE5, 0x0D, 0x27,
- 0xC9, 0xA5, 0x08, 0x1C, 0x60, 0xBA, 0x73, 0xFF,
- 0xA9, 0xE0, 0x27, 0x86, 0x3F, 0xF7, 0x15, 0x03,
- 0x69, 0xA7, 0x2B, 0x57, 0xAC, 0xA6, 0x70, 0x55,
- 0xE9, 0xB5, 0x3F, 0xEB, 0x6F, 0xCE, 0x8A, 0xA1,
- 0x9D, 0x8B, 0x84, 0xF1, 0x7C, 0xD0, 0x35, 0x21,
- 0x91, 0x3D, 0x3D, 0x6E, 0x83, 0xFC, 0x45, 0x36,
- 0x93, 0xDA, 0x66, 0xDF, 0x1A, 0x59, 0x22, 0xA5,
- 0xC4, 0x99, 0x9B, 0xF8, 0x48, 0x9A, 0x50, 0x09,
- 0xAB, 0xAE, 0x56, 0xB6, 0x49, 0x02, 0x3E, 0x90,
- 0xB6, 0x07, 0x7E, 0xA7, 0x6A, 0x0A, 0xB5, 0x85,
- 0x31, 0x0D, 0x84, 0xD4, 0x01, 0xE4, 0x48, 0x63,
- 0xF3, 0xC1, 0x54, 0x65, 0xA6, 0x4C, 0x8B, 0x33,
- 0xF9, 0x70, 0x59, 0x3B, 0xA6, 0xF6, 0x2B, 0x66,
- 0xC5, 0xD2, 0xEB, 0xAB, 0x67, 0xD2, 0xE3, 0x78,
- 0xA9, 0x1A, 0x4C, 0x99, 0xA9, 0xA6, 0xCA, 0xF7,
- 0x65, 0xF0, 0x48, 0xF8, 0x2A, 0xEA, 0x96, 0x9F,
- 0xC4, 0x50, 0x9A, 0x0C, 0xB6, 0x0D, 0x8A, 0x2F,
- 0xC3, 0x99, 0x4E, 0xA0, 0x06, 0x4D, 0xAB, 0x25,
- 0x2E, 0x44, 0x47, 0xB6, 0x98, 0xF1, 0x2C, 0x96,
- 0x54, 0x51, 0x12, 0x41, 0x0D, 0xEF, 0x32, 0x9A,
- 0x4A, 0xBD, 0xA2, 0x26, 0x53, 0xA8, 0xFD, 0x8B,
- 0x6C, 0x95, 0x0A, 0x1A, 0x96, 0xEF, 0x3C, 0x85,
- 0x34, 0x4E, 0x25, 0x9E, 0x1C, 0x67, 0x33, 0x8A,
- 0xFF, 0x6D, 0x98, 0x93, 0x3D, 0x3F, 0x49, 0x6B,
- 0xBF, 0x7C, 0x4F, 0x63, 0x5D, 0x62, 0x64, 0x67,
- 0x0D, 0x07, 0x7F, 0x24, 0x4A, 0x23, 0xBC, 0x35,
- 0xE0, 0x92, 0x6F, 0x51, 0xE7, 0x25, 0x97, 0xB9,
- 0x14, 0x35, 0x2B, 0x48, 0xAC, 0x6F, 0x54, 0xDF,
- 0xF2, 0xB4, 0xB0, 0xE0, 0xD3, 0x28, 0x0D, 0x66,
- 0x46, 0x28, 0x0A, 0x16, 0x9C, 0x87, 0x73, 0xB7,
- 0x9C, 0x2B, 0xB5, 0x43, 0xC9, 0x46, 0xB9, 0x1F,
- 0x5F, 0x3C, 0x45, 0x03, 0x4B, 0xBF, 0x44, 0x4D,
- 0xE1, 0x44, 0xDA, 0x54, 0xC5, 0x32, 0x3A, 0xFA,
- 0x21, 0x5C, 0xAD, 0xD5, 0x1E, 0x1B, 0x54, 0x7C,
- 0x9F, 0xEA, 0x92, 0x8C, 0xEA, 0x69, 0xC0, 0xCE,
- 0xDA, 0x09, 0xAD, 0x95, 0xA0, 0x8E, 0x0B, 0x8E,
- 0x10, 0x4F, 0x5B, 0x8F, 0xB8, 0x2D, 0xAC, 0x4C,
- 0x94, 0x4B, 0x7C, 0x1E, 0xF1, 0x53, 0x20, 0x9B,
- 0xD6, 0xC4, 0x92, 0x4C, 0x7F, 0xFB, 0x8B, 0x8E,
- 0x40, 0x2F, 0x24, 0xA3, 0x4E, 0x46, 0x64, 0xF4,
- 0xC6, 0x35, 0x0F, 0xC7, 0x40, 0x55, 0x43, 0xAF,
- 0x7E, 0x91, 0x76, 0x48, 0x6F, 0x97, 0x7A, 0xF8,
- 0x32, 0x1E, 0xD3, 0x5B, 0xBC, 0x19, 0xB5, 0x48,
- 0xFA, 0x4F, 0x52, 0x77, 0x5B, 0x9E, 0xA2, 0xC8,
- 0x9A, 0x83, 0x30, 0x8D, 0x9F, 0x0B, 0x6F, 0xA8,
- 0x2E, 0x84, 0xCC, 0xC1, 0x50, 0x96, 0x46, 0xAE,
- 0x73, 0x91, 0x7D, 0xCD, 0x88, 0xAB, 0x67, 0x3F,
- 0x66, 0x3A, 0x8D, 0xB1, 0x89, 0x07, 0x93, 0xDB,
- 0x42, 0x22, 0xDC, 0x13, 0xBD, 0xCD, 0xBB, 0x12,
- 0x8D, 0x88, 0x44, 0x13, 0x22, 0x52, 0x81, 0xDC,
- 0xEF, 0xA1, 0xE4, 0xA3, 0xA7, 0xBA, 0xEE, 0x98,
- 0x79, 0x45, 0x29, 0x05, 0x65, 0x3D, 0xDC, 0xAF,
- 0xA1, 0x37, 0x29, 0xFD, 0x05, 0xD1, 0x3A, 0xF7,
- 0x32, 0x1D, 0x02, 0xEC, 0x28, 0x1E, 0x0F, 0x96,
- 0xF3, 0x21, 0x19, 0x5F, 0x49, 0xB9, 0xEA, 0x9A,
- 0xAD, 0x34, 0x58, 0xD1, 0xD9, 0xB1, 0x7D, 0xD2,
- 0xEA, 0xED, 0x74, 0xE8, 0x25, 0x9A, 0x7B, 0xC5,
- 0xC8, 0xD8, 0x76, 0xB6, 0xBC, 0x0B, 0x78, 0xCE,
- 0xD9, 0xA6, 0xBB, 0x2F, 0x79, 0xA4, 0x45, 0x05,
- 0x55, 0x6E, 0x20, 0x84, 0xEB, 0xC8, 0x70, 0xB0,
- 0x3A, 0x2D, 0x06, 0x98, 0x29, 0x10, 0xB8, 0xC5,
- 0xE9, 0xE4, 0xB6, 0xDE, 0x97, 0x9A, 0x0D, 0x8C,
- 0xB6, 0x22, 0x16, 0x59, 0xAB, 0xB5, 0xD7, 0x14,
- 0xAB, 0x08, 0x02, 0x27, 0x7B, 0xF7, 0x0E, 0xAC,
- 0xC5, 0xAC, 0x4D, 0x7F, 0xE5, 0x65, 0x51, 0x40,
- 0x44, 0x92, 0xB1, 0x6A, 0xB7, 0x00, 0x76, 0x89,
- 0x6E, 0x08, 0x5F, 0x45, 0x2B, 0x53, 0x86, 0x86,
- 0xA7, 0x85, 0xBC, 0x62, 0xAC, 0xAA, 0x82, 0x73,
- 0x0A, 0xEB, 0x35, 0x16, 0x95, 0x26, 0xAB, 0x9E,
- 0xE9, 0x64, 0x53, 0x99, 0x08, 0x31, 0xF5, 0x6B,
- 0x1F, 0xFE, 0x47, 0x4B, 0x09, 0x33, 0x4F, 0xBF,
- 0x1F, 0x0B, 0x4C, 0xB2, 0xB4, 0xA4, 0x17, 0xA9,
- 0xAD, 0xC5, 0x62, 0x7C, 0xF1, 0x1B, 0xAE, 0x46,
- 0xD3, 0xAC, 0xFD, 0x43, 0xFE, 0x79, 0xD0, 0x58,
- 0x2F, 0x6C, 0x9F, 0xD0, 0x65, 0xA4, 0x64, 0x03,
- 0xAF, 0x73, 0x46, 0x75, 0x7D, 0x49, 0x1B, 0x4C,
- 0xFA, 0x49, 0xD8, 0x9A, 0xCC, 0x59, 0xC6, 0xC7,
- 0xA1, 0x05, 0xC2, 0x32, 0xC8, 0x6C, 0x50, 0xA8,
- 0x06, 0x58, 0xBE, 0x6C, 0x7D, 0x22, 0xD6, 0x0D,
- 0x74, 0x40, 0xCE, 0xD6, 0x64, 0xD6, 0x47, 0xD0,
- 0xBF, 0xF1, 0x5C, 0x54, 0xF9, 0x06, 0x3F, 0x3D,
- 0x86, 0xBA, 0xF2, 0x0F, 0x5E, 0x2C, 0x01, 0xCC,
- 0xD9, 0xC7, 0xB1, 0x4A, 0xB3, 0xD7, 0x26, 0xCC,
- 0xC3, 0x7A, 0x74, 0x2C, 0xE1, 0x22, 0x65, 0xA0,
- 0x5B, 0xCA, 0xF4, 0xE1, 0x7D, 0xE1, 0x56, 0xFD,
- 0x94, 0x10, 0xC6, 0xA1, 0x4A, 0xE8, 0x6B, 0x34,
- 0x4E, 0x71, 0x60, 0x77, 0x0F, 0x03, 0xDD, 0xFF,
- 0xC8, 0x59, 0x54, 0x6C, 0xD4, 0x4A, 0x55, 0x24,
- 0x35, 0x21, 0x60, 0x73, 0xDF, 0x6F, 0xE7, 0x3C,
- 0xC2, 0xF0, 0xDA, 0xA9, 0xE5, 0x8C, 0xAC, 0xB6,
- 0xFD, 0x2E, 0xF7, 0xA0, 0x18, 0xA7, 0x55, 0x47,
- 0xD1, 0xCB, 0x9E, 0xAA, 0x58, 0x54, 0x3B, 0x37,
- 0x18, 0xB5, 0xC1, 0xBB, 0x41, 0x59, 0xE4, 0x28,
- 0x4A, 0x13, 0x90, 0x6A, 0xF7, 0xD1, 0xB3, 0x71,
- 0xB6, 0x6E, 0xF6, 0x5D, 0x2E, 0x0E, 0x6C, 0x4A,
- 0x7B, 0xF7, 0xB6, 0x21, 0xD4, 0xFC, 0x47, 0x8C,
- 0x9B, 0x0A, 0x90, 0xAC, 0x11, 0x52, 0x86, 0x07,
- 0x24, 0xDA, 0xA9, 0x49, 0x50, 0xD9, 0xDC, 0xE2,
- 0x19, 0x87, 0x73, 0x88, 0xC3, 0xE4, 0xED, 0xC9,
- 0x1C, 0xA8, 0x7E, 0x39, 0x48, 0x91, 0x10, 0xAB,
- 0xFC, 0x3C, 0x1E, 0xEE, 0x08, 0xA1, 0xB9, 0xB2,
- 0x02, 0x57, 0xB1, 0xD1, 0x35, 0x5E, 0x3D, 0x94,
- 0xFB, 0x36, 0x27, 0x1A, 0x0E, 0x75, 0xFC, 0xBC,
- 0xDB, 0xF3, 0xF5, 0x7C, 0x08, 0x39, 0xAA, 0xF4,
- 0x2E, 0xEE, 0xCF, 0xCD, 0x2D, 0x70, 0xB8, 0x84,
- 0xE6, 0x22, 0x5C, 0xC0, 0xB9, 0x33, 0xCB, 0x97,
- 0xA1, 0xA3, 0xEE, 0x93, 0x71, 0xCF, 0xC9, 0x21,
- 0x31, 0x7A, 0xEC, 0xE7, 0x70, 0xF2, 0xAA, 0x91,
- 0xAA, 0x48, 0xAD, 0xAC, 0x03, 0xB1, 0x26, 0x52,
- 0xBC, 0x65, 0x22, 0xA1, 0x09, 0x3D, 0xAB, 0x16,
- 0x08, 0xBF, 0xCF, 0x3F, 0x59, 0x08, 0x6F, 0x68,
- 0xEB, 0x8A, 0xB3, 0xCF, 0x77, 0x82, 0xFB, 0x25,
- 0x78, 0x16, 0x4C, 0xDB, 0x72, 0xF5, 0xCF, 0x79,
- 0x71, 0xE4, 0x4E, 0x23, 0x15, 0x7F, 0x1E, 0xA8,
- 0x3E, 0xC0, 0x59, 0x91, 0x20, 0xAE, 0x2C, 0x1D,
- 0x90, 0xC8, 0x49, 0x42, 0x48, 0x29, 0x82, 0x66,
- 0x68, 0x49, 0x73, 0xDA, 0xE4, 0x28, 0xCD, 0x7B,
- 0x4D, 0xE4, 0x23, 0x34, 0xB9, 0xE1, 0xB4, 0x42,
- 0x67, 0x22, 0x5B, 0xEE, 0xE6, 0x74, 0x32, 0x6F,
- 0x21, 0x9F, 0x97, 0x46, 0x03, 0xE1, 0xC9, 0x7A,
- 0x14, 0x27, 0x30, 0xE1, 0xB2, 0x34, 0xE6, 0xAF,
- 0x7B, 0xAA, 0xDD, 0x89, 0x04, 0x30, 0xD6, 0x78,
- 0x0B, 0x3D, 0xC3, 0x69, 0xB0, 0x67, 0x4F, 0x4E,
- 0x12, 0x21, 0x93, 0x2D, 0x79, 0xDD, 0x8B, 0xDB,
- 0xEA, 0x90, 0x66, 0x54, 0xA8, 0x05, 0xF2, 0xE4,
- 0x59, 0x8A, 0x96, 0x52, 0x30, 0xF0, 0x4E, 0x9A,
- 0xE5, 0xD8, 0x72, 0x1C, 0x3B, 0x63, 0x02, 0xB9,
- 0xC7, 0xA1, 0xDA, 0xC8, 0x6C, 0x48, 0xE0, 0xDE,
- 0x59, 0x64, 0x89, 0x2C, 0xF9, 0xC8, 0x3B, 0x00,
- 0xEC, 0xF2, 0x68, 0x51, 0x67, 0x05, 0x85, 0xAF,
- 0xB8, 0xD5, 0x65, 0xEE, 0x73, 0x26, 0x88, 0xFB,
- 0xA9, 0xD6, 0x6C, 0x68, 0x9D, 0x9F, 0x23, 0x6A,
- 0x10, 0x24, 0x82, 0xB2, 0xB7, 0x40, 0x19, 0x3E,
- 0x6F, 0xA2, 0xD5, 0x2C, 0x6E, 0x8D, 0xE9, 0x33,
- 0x6E, 0x24, 0x94, 0x05, 0xE9, 0x2D, 0xD9, 0x3A,
- 0x8C, 0xE5, 0xCC, 0x1D, 0x3F, 0xB8, 0x71, 0xA8,
- 0x98, 0x33, 0xBB, 0x1A, 0xAC, 0x41, 0x0A, 0x04,
- 0xFE, 0x4D, 0x46, 0x17, 0x8A, 0xCB, 0xF3, 0x4B,
- 0x97, 0x02, 0xCC, 0x9D, 0x11, 0xF1, 0xBC, 0xA9,
- 0xC1, 0xD1, 0xB6, 0xD6, 0x7B, 0x5F, 0x9D, 0x22,
- 0x86, 0x71, 0xEC, 0x42, 0x53, 0xB7, 0x85, 0x30,
- 0xAF, 0x1D, 0x01, 0xA7, 0xBF, 0x72, 0xC2, 0xC6,
- 0xC9, 0xB8, 0xD8, 0xC7, 0xE9, 0xC4, 0xBA, 0xC5,
- 0xB1, 0x8A, 0xB8, 0x62, 0xBF, 0x75, 0x75, 0x69,
- 0xF8, 0x8D, 0x7E, 0xD9, 0xD2, 0x28, 0xB5, 0x40,
- 0xCE, 0xCB, 0xB8, 0x74, 0x31, 0x40, 0x7B, 0x0D,
- 0x73, 0x98, 0x99, 0x12, 0xB7, 0x75, 0x3E, 0xBC,
- 0xAE, 0x48, 0xCA, 0xA9, 0x1E, 0xA7, 0x95, 0x31,
- 0x87, 0x0F, 0x14, 0x52, 0xB6, 0x8E, 0x42, 0x50,
- 0xB2, 0x76, 0x75, 0xD8, 0x7E, 0x66, 0x23, 0x13,
- 0x8B, 0x29, 0xAA, 0x13, 0xCA, 0x8A, 0xD8, 0x9B,
- 0x7B, 0x38, 0xD2, 0xE8, 0x67, 0xD1, 0x89, 0x25,
- 0x9C, 0x63, 0x2F, 0xC3, 0x26, 0xC7, 0x74, 0x83,
- 0x05, 0xED, 0x67, 0x02, 0x85, 0xAD, 0x1D, 0x0E,
- 0xA9, 0xD6, 0xE1, 0xC7, 0x39, 0xA0, 0x6E, 0x72,
- 0xCE, 0x56, 0x6C, 0xB8, 0x4A, 0xDE, 0x11, 0xA2,
- 0xBF, 0xC1, 0x84, 0x98, 0x8F, 0xCA, 0x79, 0x74,
- 0xCA, 0x9F, 0x45, 0x16, 0xBC, 0xB1, 0xF4, 0x03,
- 0x76, 0x6E, 0xD5, 0x46, 0x60, 0xD7, 0x1D, 0xF0,
- 0x87, 0x29, 0x63, 0x07, 0x06, 0xB9, 0xC2, 0x69,
- 0x6D, 0xF9, 0x4B, 0x30, 0x96, 0x83, 0xB8, 0xC5,
- 0xBE, 0x3A, 0xBA, 0xD0, 0x3E, 0x2B, 0x04, 0x16,
- 0x6A, 0x00, 0x3B, 0x1A, 0x8E, 0xF8, 0xF6, 0x21,
- 0x01, 0xD6, 0x08, 0x41, 0x74, 0xA2, 0xFC, 0x36,
- 0xED, 0x11, 0x51, 0x5A, 0x4A, 0x21, 0x1A, 0x03,
- 0x11, 0x95, 0x11, 0xF6, 0x73, 0x38, 0x67, 0xFC,
- 0xF1, 0x2B, 0x22, 0x54, 0x65, 0x40, 0x7D, 0x8C,
- 0x13, 0xC4, 0x46, 0x87, 0x09, 0x2B, 0xB5, 0xA1,
- 0x82, 0x49, 0x46, 0x56, 0xF5, 0x5F, 0xF1, 0x04,
- 0xD8, 0x6F, 0xDB, 0x38, 0xAD, 0xF4, 0x1A, 0xA3,
- 0xFF, 0x7C, 0xC7, 0xA6, 0xAF, 0x87, 0x5C, 0x8C,
- 0xEA, 0x3C, 0x9D, 0x7A, 0x4A, 0xD8, 0xA8, 0x66,
- 0xDB, 0xBF, 0x12, 0x58, 0x98, 0x8E, 0xBA, 0x6F,
- 0xAF, 0x20, 0xDA, 0xEE, 0x82, 0x34, 0x2F, 0x33,
- 0x88, 0x98, 0xBA, 0xB2, 0x54, 0x7F, 0x9E, 0x63,
- 0x19, 0x6C, 0x7D, 0xCE, 0x85, 0xF8, 0xB6, 0x77,
- 0xCB, 0x38, 0x1F, 0xB1, 0x79, 0xBD, 0xED, 0x32,
- 0xE3, 0xB9, 0x40, 0xEF, 0x3E, 0x6C, 0x29, 0x88,
- 0x70, 0x99, 0x47, 0xA6, 0x4A, 0x1C, 0xCC, 0x0B,
- 0x9B, 0x72, 0xA9, 0x29, 0x83, 0x4C, 0xDE, 0x4F,
- 0x65, 0x4E, 0xCE, 0xBD, 0xFA, 0x76, 0x8D, 0xA6,
- 0x1A, 0xD8, 0x66, 0xFE, 0xA4, 0x2A, 0x61, 0x50,
- 0xEE, 0x15, 0xF1, 0xF0, 0x9D, 0xFF, 0xEC, 0xEE,
- 0x00, 0x03, 0xFE, 0xAC, 0x53, 0x02, 0xCC, 0x87,
- 0xB1, 0xA2, 0xD8, 0x34, 0x2C, 0xEC, 0xA6, 0x4C,
- 0x02, 0xC0, 0xC1, 0x72, 0xD6, 0x54, 0x35, 0x24,
- 0x25, 0x8B, 0xEC, 0xDA, 0x47, 0x5F, 0x5D, 0x7E,
- 0xD8, 0x01, 0x51, 0xDD, 0x8F, 0xB4, 0x48, 0xDD,
- 0x94, 0x99, 0x95, 0x77, 0xB3, 0x42, 0x14, 0xEB,
- 0x26, 0x61, 0xE9, 0x22, 0xE3, 0x07, 0x73, 0xFB,
- 0xEF, 0x38, 0x55, 0x35, 0x8F, 0xCC, 0x30, 0x1E,
- 0x38, 0xE0, 0x35, 0xF4, 0x9A, 0x7C, 0xCF, 0x38,
- 0x0B, 0x9E, 0xF4, 0x88, 0x4A, 0xEA, 0xF2, 0x67,
- 0x9F, 0x61, 0x40, 0x34, 0x09, 0xDC, 0xBF, 0xFB,
- 0x22, 0x27, 0x04, 0x8B, 0x8D, 0x85, 0x7F, 0xB2,
- 0x29, 0x62, 0x25, 0x73, 0x7F, 0x46, 0x2E, 0xA3,
- 0x8E, 0xAF, 0xEC, 0x55, 0x98, 0x1A, 0xEE, 0x29,
- 0xA0, 0x1A, 0x5F, 0xFE, 0x5D, 0xA5, 0x76, 0x93,
- 0xAB, 0x57, 0x56, 0xEA, 0xDB, 0x39, 0xAC, 0x48,
- 0xBE, 0x95, 0x92, 0x2B, 0xC6, 0xE1, 0x2F, 0x36,
- 0x4B, 0x08, 0x01, 0x90, 0x50, 0xD8, 0xFA, 0xF9,
- 0x94, 0x4E, 0x76, 0x9B, 0x72, 0x59, 0xC2, 0x2F,
- 0x61, 0x04, 0x0A, 0x9E, 0x28, 0xE5, 0x24, 0x1E,
- 0x79, 0xCF, 0x8D, 0xB6, 0x52, 0xA7, 0x79, 0x5F,
- 0x44, 0x98, 0xD5, 0x0E, 0x6E, 0x4B, 0x64, 0x9B,
- },
- .len = 2048
- },
- .auth_tags[0] = {
- .size = 64,
- .data = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
- 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
- .len = 16
- },
- .auth_tags[1] = {
- .size = 128,
- .data = { 0xE9, 0xA9, 0x75, 0xB6, 0xEF, 0x6F, 0x8C, 0xF1,
- 0xB3, 0xA9, 0x19, 0xA4, 0xAE, 0x66, 0xBD, 0x9E },
- .len = 16
- },
- .auth_tags[2] = {
- .size = 256,
- .data = { 0x29, 0xC3, 0x18, 0x96, 0x54, 0xCB, 0xF5, 0xAA,
- 0x4E, 0x62, 0xB6, 0xFF, 0x45, 0xA6, 0x18, 0x0C },
- .len = 16
- },
- .auth_tags[3] = {
- .size = 512,
- .data = { 0x3B, 0xD7, 0xC3, 0x5F, 0xE4, 0x1B, 0xC2, 0xBC,
- 0xE9, 0xAC, 0xF2, 0xCE, 0xA7, 0x7B, 0x1D, 0x70 },
- .len = 16
- },
- .auth_tags[4] = {
- .size = 1024,
- .data = { 0xCC, 0xBB, 0xBC, 0xCF, 0x86, 0x01, 0x4D, 0x93,
- 0x4B, 0x68, 0x55, 0x19, 0xA1, 0x40, 0xCD, 0xEA },
- .len = 16
- },
- .auth_tags[5] = {
- .size = 1536,
- .data = { 0x67, 0x31, 0x11, 0xA2, 0x58, 0xB5, 0x1C, 0x23,
- 0xC0, 0x41, 0x05, 0x30, 0xC6, 0xBA, 0xFA, 0x88 },
- .len = 16
- },
- .auth_tags[6] = {
- .size = 2048,
- .data = { 0x03, 0x9C, 0x6B, 0xB9, 0x57, 0xBF, 0x6E, 0x86,
- 0x3A, 0x09, 0x5F, 0x08, 0xA9, 0xE4, 0xF2, 0x1F },
- .len = 16
- },
- .auth_tag = {
- .data = {
- 0x03, 0x9C, 0x6B, 0xB9, 0x57, 0xBF, 0x6E, 0x86,
- 0x3A, 0x09, 0x5F, 0x08, 0xA9, 0xE4, 0xF2, 0x1F
- },
- .len = 16
- },
-};
-
-static const struct gmac_test_data gmac_test_case_4 = {
- .key = {
- .data = {
- 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
- 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
- },
- .len = 16
- },
- .iv = {
- .data = {
- 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
- 0xde, 0xca, 0xf8, 0x88
- },
- .len = 12
- },
- .aad = {
- .data = gmac_plaintext,
- .len = GMAC_LARGE_PLAINTEXT_LENGTH
- },
- .plaintext = {
- .data = NULL,
- .len = 0
- },
- .gmac_tag = {
- .data = {
- 0x88, 0x82, 0xb4, 0x93, 0x8f, 0x04, 0xcd, 0x06,
- 0xfd, 0xac, 0x6d, 0x8b, 0x9c, 0x9e, 0x8f, 0xec
- },
- .len = 16
- }
-};
-
-#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/app/test/test_cryptodev_zuc_hash_test_vectors.h b/app/test/test_cryptodev_zuc_hash_test_vectors.h
deleted file mode 100644
index 988452cb..00000000
--- a/app/test/test_cryptodev_zuc_hash_test_vectors.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY ExPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, ExEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef TEST_CRYPTODEV_ZUC_HASH_TEST_VECTORS_H_
-#define TEST_CRYPTODEV_ZUC_HASH_TEST_VECTORS_H_
-
-struct zuc_hash_test_data {
- struct {
- uint8_t data[64];
- unsigned len;
- } key;
-
- struct {
- uint8_t data[64];
- unsigned len;
- } aad;
-
- struct {
- uint8_t data[2056];
- unsigned len; /* length must be in Bits */
- } plaintext;
-
- struct {
- unsigned len;
- } validAuthLenInBits;
-
- struct {
- unsigned len;
- } validAuthOffsetLenInBits;
-
- struct {
- uint8_t data[64];
- unsigned len;
- } digest;
-};
-
-struct zuc_hash_test_data zuc_hash_test_case_1 = {
- .key = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- },
- .len = 16
- },
- .aad = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- },
- .len = 16
- },
- .plaintext = {
- .data = {0x00},
- .len = 8
- },
- .validAuthLenInBits = {
- .len = 1
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- },
- .digest = {
- .data = {0xC8, 0xA9, 0x59, 0x5E},
- .len = 4
- }
-};
-
-struct zuc_hash_test_data zuc_hash_test_case_2 = {
- .key = {
- .data = {
- 0x47, 0x05, 0x41, 0x25, 0x56, 0x1E, 0xB2, 0xDD,
- 0xA9, 0x40, 0x59, 0xDA, 0x05, 0x09, 0x78, 0x50
- },
- .len = 16
- },
- .aad = {
- .data = {
- 0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00,
- 0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00
- },
- .len = 16
- },
- .plaintext = {
- .data = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00
- },
- .len = 96
- },
- .validAuthLenInBits = {
- .len = 90
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- },
- .digest = {
- .data = {0x67, 0x19, 0xA0, 0x88},
- .len = 4
- }
-};
-
-struct zuc_hash_test_data zuc_hash_test_case_3 = {
- .key = {
- .data = {
- 0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB,
- 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A
- },
- .len = 16
- },
- .aad = {
- .data = {
- 0xA9, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x00, 0x00,
- 0x29, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x80, 0x00
- },
- .len = 16
- },
- .plaintext = {
- .data = {
- 0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E,
- 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1,
- 0xDE, 0x0B, 0x35, 0xDA, 0x2D, 0xC6, 0x2F, 0x83,
- 0xE7, 0xB7, 0x8D, 0x63, 0x06, 0xCA, 0x0E, 0xA0,
- 0x7E, 0x94, 0x1B, 0x7B, 0xE9, 0x13, 0x48, 0xF9,
- 0xFC, 0xB1, 0x70, 0xE2, 0x21, 0x7F, 0xEC, 0xD9,
- 0x7F, 0x9F, 0x68, 0xAD, 0xB1, 0x6E, 0x5D, 0x7D,
- 0x21, 0xE5, 0x69, 0xD2, 0x80, 0xED, 0x77, 0x5C,
- 0xEB, 0xDE, 0x3F, 0x40, 0x93, 0xC5, 0x38, 0x81,
- 0x00
- },
- .len = 584
- },
- .validAuthLenInBits = {
- .len = 577
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- },
- .digest = {
- .data = {0xFA, 0xE8, 0xFF, 0x0B},
- .len = 4
- }
-};
-
-struct zuc_hash_test_data zuc_hash_test_case_4 = {
- .key = {
- .data = {
- 0xC8, 0xA4, 0x82, 0x62, 0xD0, 0xC2, 0xE2, 0xBA,
- 0xC4, 0xB9, 0x6E, 0xF7, 0x7E, 0x80, 0xCA, 0x59
- },
- .len = 16
- },
- .aad = {
- .data = {
- 0x05, 0x09, 0x78, 0x50, 0x80, 0x00, 0x00, 0x00,
- 0x85, 0x09, 0x78, 0x50, 0x80, 0x00, 0x80, 0x00
- },
- .len = 16
- },
- .plaintext = {
- .data = {
- 0xB5, 0x46, 0x43, 0x0B, 0xF8, 0x7B, 0x4F, 0x1E,
- 0xE8, 0x34, 0x70, 0x4C, 0xD6, 0x95, 0x1C, 0x36,
- 0xE2, 0x6F, 0x10, 0x8C, 0xF7, 0x31, 0x78, 0x8F,
- 0x48, 0xDC, 0x34, 0xF1, 0x67, 0x8C, 0x05, 0x22,
- 0x1C, 0x8F, 0xA7, 0xFF, 0x2F, 0x39, 0xF4, 0x77,
- 0xE7, 0xE4, 0x9E, 0xF6, 0x0A, 0x4E, 0xC2, 0xC3,
- 0xDE, 0x24, 0x31, 0x2A, 0x96, 0xAA, 0x26, 0xE1,
- 0xCF, 0xBA, 0x57, 0x56, 0x38, 0x38, 0xB2, 0x97,
- 0xF4, 0x7E, 0x85, 0x10, 0xC7, 0x79, 0xFD, 0x66,
- 0x54, 0xB1, 0x43, 0x38, 0x6F, 0xA6, 0x39, 0xD3,
- 0x1E, 0xDB, 0xD6, 0xC0, 0x6E, 0x47, 0xD1, 0x59,
- 0xD9, 0x43, 0x62, 0xF2, 0x6A, 0xEE, 0xED, 0xEE,
- 0x0E, 0x4F, 0x49, 0xD9, 0xBF, 0x84, 0x12, 0x99,
- 0x54, 0x15, 0xBF, 0xAD, 0x56, 0xEE, 0x82, 0xD1,
- 0xCA, 0x74, 0x63, 0xAB, 0xF0, 0x85, 0xB0, 0x82,
- 0xB0, 0x99, 0x04, 0xD6, 0xD9, 0x90, 0xD4, 0x3C,
- 0xF2, 0xE0, 0x62, 0xF4, 0x08, 0x39, 0xD9, 0x32,
- 0x48, 0xB1, 0xEB, 0x92, 0xCD, 0xFE, 0xD5, 0x30,
- 0x0B, 0xC1, 0x48, 0x28, 0x04, 0x30, 0xB6, 0xD0,
- 0xCA, 0xA0, 0x94, 0xB6, 0xEC, 0x89, 0x11, 0xAB,
- 0x7D, 0xC3, 0x68, 0x24, 0xB8, 0x24, 0xDC, 0x0A,
- 0xF6, 0x68, 0x2B, 0x09, 0x35, 0xFD, 0xE7, 0xB4,
- 0x92, 0xA1, 0x4D, 0xC2, 0xF4, 0x36, 0x48, 0x03,
- 0x8D, 0xA2, 0xCF, 0x79, 0x17, 0x0D, 0x2D, 0x50,
- 0x13, 0x3F, 0xD4, 0x94, 0x16, 0xCB, 0x6E, 0x33,
- 0xBE, 0xA9, 0x0B, 0x8B, 0xF4, 0x55, 0x9B, 0x03,
- 0x73, 0x2A, 0x01, 0xEA, 0x29, 0x0E, 0x6D, 0x07,
- 0x4F, 0x79, 0xBB, 0x83, 0xC1, 0x0E, 0x58, 0x00,
- 0x15, 0xCC, 0x1A, 0x85, 0xB3, 0x6B, 0x55, 0x01,
- 0x04, 0x6E, 0x9C, 0x4B, 0xDC, 0xAE, 0x51, 0x35,
- 0x69, 0x0B, 0x86, 0x66, 0xBD, 0x54, 0xB7, 0xA7,
- 0x03, 0xEA, 0x7B, 0x6F, 0x22, 0x0A, 0x54, 0x69,
- 0xA5, 0x68, 0x02, 0x7E
- },
- .len = 2080
- },
- .validAuthLenInBits = {
- .len = 2079
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- },
- .digest = {
- .data = {0x00, 0x4A, 0xC4, 0xD6},
- .len = 4
- }
-};
-
-struct zuc_hash_test_data zuc_hash_test_case_5 = {
- .key = {
- .data = {
- 0x6B, 0x8B, 0x08, 0xEE, 0x79, 0xE0, 0xB5, 0x98,
- 0x2D, 0x6D, 0x12, 0x8E, 0xA9, 0xF2, 0x20, 0xCB
- },
- .len = 16
- },
- .aad = {
- .data = {
- 0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00,
- 0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00
- },
- .len = 16
- },
- .plaintext = {
- .data = {
- 0x5B, 0xAD, 0x72, 0x47, 0x10, 0xBA, 0x1C, 0x56,
- 0xD5, 0xA3, 0x15, 0xF8, 0xD4, 0x0F, 0x6E, 0x09,
- 0x37, 0x80, 0xBE, 0x8E, 0x8D, 0xE0, 0x7B, 0x69,
- 0x92, 0x43, 0x20, 0x18, 0xE0, 0x8E, 0xD9, 0x6A,
- 0x57, 0x34, 0xAF, 0x8B, 0xAD, 0x8A, 0x57, 0x5D,
- 0x3A, 0x1F, 0x16, 0x2F, 0x85, 0x04, 0x5C, 0xC7,
- 0x70, 0x92, 0x55, 0x71, 0xD9, 0xF5, 0xB9, 0x4E,
- 0x45, 0x4A, 0x77, 0xC1, 0x6E, 0x72, 0x93, 0x6B,
- 0xF0, 0x16, 0xAE, 0x15, 0x74, 0x99, 0xF0, 0x54,
- 0x3B, 0x5D, 0x52, 0xCA, 0xA6, 0xDB, 0xEA, 0xB6,
- 0x97, 0xD2, 0xBB, 0x73, 0xE4, 0x1B, 0x80, 0x75,
- 0xDC, 0xE7, 0x9B, 0x4B, 0x86, 0x04, 0x4F, 0x66,
- 0x1D, 0x44, 0x85, 0xA5, 0x43, 0xDD, 0x78, 0x60,
- 0x6E, 0x04, 0x19, 0xE8, 0x05, 0x98, 0x59, 0xD3,
- 0xCB, 0x2B, 0x67, 0xCE, 0x09, 0x77, 0x60, 0x3F,
- 0x81, 0xFF, 0x83, 0x9E, 0x33, 0x18, 0x59, 0x54,
- 0x4C, 0xFB, 0xC8, 0xD0, 0x0F, 0xEF, 0x1A, 0x4C,
- 0x85, 0x10, 0xFB, 0x54, 0x7D, 0x6B, 0x06, 0xC6,
- 0x11, 0xEF, 0x44, 0xF1, 0xBC, 0xE1, 0x07, 0xCF,
- 0xA4, 0x5A, 0x06, 0xAA, 0xB3, 0x60, 0x15, 0x2B,
- 0x28, 0xDC, 0x1E, 0xBE, 0x6F, 0x7F, 0xE0, 0x9B,
- 0x05, 0x16, 0xF9, 0xA5, 0xB0, 0x2A, 0x1B, 0xD8,
- 0x4B, 0xB0, 0x18, 0x1E, 0x2E, 0x89, 0xE1, 0x9B,
- 0xD8, 0x12, 0x59, 0x30, 0xD1, 0x78, 0x68, 0x2F,
- 0x38, 0x62, 0xDC, 0x51, 0xB6, 0x36, 0xF0, 0x4E,
- 0x72, 0x0C, 0x47, 0xC3, 0xCE, 0x51, 0xAD, 0x70,
- 0xD9, 0x4B, 0x9B, 0x22, 0x55, 0xFB, 0xAE, 0x90,
- 0x65, 0x49, 0xF4, 0x99, 0xF8, 0xC6, 0xD3, 0x99,
- 0x47, 0xED, 0x5E, 0x5D, 0xF8, 0xE2, 0xDE, 0xF1,
- 0x13, 0x25, 0x3E, 0x7B, 0x08, 0xD0, 0xA7, 0x6B,
- 0x6B, 0xFC, 0x68, 0xC8, 0x12, 0xF3, 0x75, 0xC7,
- 0x9B, 0x8F, 0xE5, 0xFD, 0x85, 0x97, 0x6A, 0xA6,
- 0xD4, 0x6B, 0x4A, 0x23, 0x39, 0xD8, 0xAE, 0x51,
- 0x47, 0xF6, 0x80, 0xFB, 0xE7, 0x0F, 0x97, 0x8B,
- 0x38, 0xEF, 0xFD, 0x7B, 0x2F, 0x78, 0x66, 0xA2,
- 0x25, 0x54, 0xE1, 0x93, 0xA9, 0x4E, 0x98, 0xA6,
- 0x8B, 0x74, 0xBD, 0x25, 0xBB, 0x2B, 0x3F, 0x5F,
- 0xB0, 0xA5, 0xFD, 0x59, 0x88, 0x7F, 0x9A, 0xB6,
- 0x81, 0x59, 0xB7, 0x17, 0x8D, 0x5B, 0x7B, 0x67,
- 0x7C, 0xB5, 0x46, 0xBF, 0x41, 0xEA, 0xDC, 0xA2,
- 0x16, 0xFC, 0x10, 0x85, 0x01, 0x28, 0xF8, 0xBD,
- 0xEF, 0x5C, 0x8D, 0x89, 0xF9, 0x6A, 0xFA, 0x4F,
- 0xA8, 0xB5, 0x48, 0x85, 0x56, 0x5E, 0xD8, 0x38,
- 0xA9, 0x50, 0xFE, 0xE5, 0xF1, 0xC3, 0xB0, 0xA4,
- 0xF6, 0xFB, 0x71, 0xE5, 0x4D, 0xFD, 0x16, 0x9E,
- 0x82, 0xCE, 0xCC, 0x72, 0x66, 0xC8, 0x50, 0xE6,
- 0x7C, 0x5E, 0xF0, 0xBA, 0x96, 0x0F, 0x52, 0x14,
- 0x06, 0x0E, 0x71, 0xEB, 0x17, 0x2A, 0x75, 0xFC,
- 0x14, 0x86, 0x83, 0x5C, 0xBE, 0xA6, 0x53, 0x44,
- 0x65, 0xB0, 0x55, 0xC9, 0x6A, 0x72, 0xE4, 0x10,
- 0x52, 0x24, 0x18, 0x23, 0x25, 0xD8, 0x30, 0x41,
- 0x4B, 0x40, 0x21, 0x4D, 0xAA, 0x80, 0x91, 0xD2,
- 0xE0, 0xFB, 0x01, 0x0A, 0xE1, 0x5C, 0x6D, 0xE9,
- 0x08, 0x50, 0x97, 0x3B, 0xDF, 0x1E, 0x42, 0x3B,
- 0xE1, 0x48, 0xA2, 0x37, 0xB8, 0x7A, 0x0C, 0x9F,
- 0x34, 0xD4, 0xB4, 0x76, 0x05, 0xB8, 0x03, 0xD7,
- 0x43, 0xA8, 0x6A, 0x90, 0x39, 0x9A, 0x4A, 0xF3,
- 0x96, 0xD3, 0xA1, 0x20, 0x0A, 0x62, 0xF3, 0xD9,
- 0x50, 0x79, 0x62, 0xE8, 0xE5, 0xBE, 0xE6, 0xD3,
- 0xDA, 0x2B, 0xB3, 0xF7, 0x23, 0x76, 0x64, 0xAC,
- 0x7A, 0x29, 0x28, 0x23, 0x90, 0x0B, 0xC6, 0x35,
- 0x03, 0xB2, 0x9E, 0x80, 0xD6, 0x3F, 0x60, 0x67,
- 0xBF, 0x8E, 0x17, 0x16, 0xAC, 0x25, 0xBE, 0xBA,
- 0x35, 0x0D, 0xEB, 0x62, 0xA9, 0x9F, 0xE0, 0x31,
- 0x85, 0xEB, 0x4F, 0x69, 0x93, 0x7E, 0xCD, 0x38,
- 0x79, 0x41, 0xFD, 0xA5, 0x44, 0xBA, 0x67, 0xDB,
- 0x09, 0x11, 0x77, 0x49, 0x38, 0xB0, 0x18, 0x27,
- 0xBC, 0xC6, 0x9C, 0x92, 0xB3, 0xF7, 0x72, 0xA9,
- 0xD2, 0x85, 0x9E, 0xF0, 0x03, 0x39, 0x8B, 0x1F,
- 0x6B, 0xBA, 0xD7, 0xB5, 0x74, 0xF7, 0x98, 0x9A,
- 0x1D, 0x10, 0xB2, 0xDF, 0x79, 0x8E, 0x0D, 0xBF,
- 0x30, 0xD6, 0x58, 0x74, 0x64, 0xD2, 0x48, 0x78,
- 0xCD, 0x00, 0xC0, 0xEA, 0xEE, 0x8A, 0x1A, 0x0C,
- 0xC7, 0x53, 0xA2, 0x79, 0x79, 0xE1, 0x1B, 0x41,
- 0xDB, 0x1D, 0xE3, 0xD5, 0x03, 0x8A, 0xFA, 0xF4,
- 0x9F, 0x5C, 0x68, 0x2C, 0x37, 0x48, 0xD8, 0xA3,
- 0xA9, 0xEC, 0x54, 0xE6, 0xA3, 0x71, 0x27, 0x5F,
- 0x16, 0x83, 0x51, 0x0F, 0x8E, 0x4F, 0x90, 0x93,
- 0x8F, 0x9A, 0xB6, 0xE1, 0x34, 0xC2, 0xCF, 0xDF,
- 0x48, 0x41, 0xCB, 0xA8, 0x8E, 0x0C, 0xFF, 0x2B,
- 0x0B, 0xCC, 0x8E, 0x6A, 0xDC, 0xB7, 0x11, 0x09,
- 0xB5, 0x19, 0x8F, 0xEC, 0xF1, 0xBB, 0x7E, 0x5C,
- 0x53, 0x1A, 0xCA, 0x50, 0xA5, 0x6A, 0x8A, 0x3B,
- 0x6D, 0xE5, 0x98, 0x62, 0xD4, 0x1F, 0xA1, 0x13,
- 0xD9, 0xCD, 0x95, 0x78, 0x08, 0xF0, 0x85, 0x71,
- 0xD9, 0xA4, 0xBB, 0x79, 0x2A, 0xF2, 0x71, 0xF6,
- 0xCC, 0x6D, 0xBB, 0x8D, 0xC7, 0xEC, 0x36, 0xE3,
- 0x6B, 0xE1, 0xED, 0x30, 0x81, 0x64, 0xC3, 0x1C,
- 0x7C, 0x0A, 0xFC, 0x54, 0x1C
- },
- .len = 5672
- },
- .validAuthLenInBits = {
- .len = 5670
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- },
- .digest = {
- .data = {0x0C, 0xA1, 0x27, 0x92},
- .len = 4
- }
-};
-
-#endif /* TEST_CRYPTODEV_ZUC_HASH_TEST_VECTORS_H_ */
diff --git a/app/test/test_pci.c b/app/test/test_pci.c
deleted file mode 100644
index cda186d0..00000000
--- a/app/test/test_pci.c
+++ /dev/null
@@ -1,322 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 6WIND S.A.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <stdint.h>
-#include <sys/queue.h>
-
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_ethdev.h>
-#include <rte_devargs.h>
-
-#include "test.h"
-#include "resource.h"
-
-/* Generic maximum number of drivers to have room to allocate all drivers */
-#define NUM_MAX_DRIVERS 256
-
-/*
- * PCI test
- * ========
- *
- * - Register a driver with a ``probe()`` function.
- *
- * - Dump all PCI devices.
- *
- * - Check that the ``probe()`` function is called at least once.
- */
-
-int test_pci_run = 0; /* value checked by the multiprocess test */
-static unsigned pci_dev_count;
-
-static int my_driver_init(struct rte_pci_driver *dr,
- struct rte_pci_device *dev);
-
-/* IXGBE NICS */
-struct rte_pci_id my_driver_id[] = {
- {RTE_PCI_DEVICE(0x0001, 0x1234)},
- { .vendor_id = 0, /* sentinel */ },
-};
-
-struct rte_pci_id my_driver_id2[] = {
- {RTE_PCI_DEVICE(0x0001, 0x4444)},
- {RTE_PCI_DEVICE(0x0002, 0xabcd)},
- { .vendor_id = 0, /* sentinel */ },
-};
-
-struct rte_pci_driver my_driver = {
- .driver = {
- .name = "test_driver"
- },
- .probe = my_driver_init,
- .id_table = my_driver_id,
- .drv_flags = 0,
-};
-
-struct rte_pci_driver my_driver2 = {
- .driver = {
- .name = "test_driver2"
- },
- .probe = my_driver_init,
- .id_table = my_driver_id2,
- .drv_flags = 0,
-};
-
-static int
-my_driver_init(__attribute__((unused)) struct rte_pci_driver *dr,
- struct rte_pci_device *dev)
-{
- printf("My driver init called in %s\n", dr->driver.name);
- printf("%x:%x:%x.%d", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- printf(" - vendor:%x device:%x\n", dev->id.vendor_id, dev->id.device_id);
-
- pci_dev_count ++;
- return 0;
-}
-
-static void
-blacklist_all_devices(void)
-{
- struct rte_pci_device *dev = NULL;
- unsigned i = 0;
- char pci_addr_str[16];
-
- TAILQ_FOREACH(dev, &pci_device_list, next) {
- snprintf(pci_addr_str, sizeof(pci_addr_str), PCI_PRI_FMT,
- dev->addr.domain, dev->addr.bus, dev->addr.devid,
- dev->addr.function);
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI,
- pci_addr_str) < 0) {
- printf("Error: cannot blacklist <%s>", pci_addr_str);
- break;
- }
- i++;
- }
- printf("%u devices blacklisted\n", i);
-}
-
-/* clear devargs list that was modified by the test */
-static void free_devargs_list(void)
-{
- struct rte_devargs *devargs;
-
- while (!TAILQ_EMPTY(&devargs_list)) {
- devargs = TAILQ_FIRST(&devargs_list);
- TAILQ_REMOVE(&devargs_list, devargs, next);
- free(devargs->args);
- free(devargs);
- }
-}
-
-/* backup real devices & drivers (not used for testing) */
-struct pci_driver_list real_pci_driver_list =
- TAILQ_HEAD_INITIALIZER(real_pci_driver_list);
-struct pci_device_list real_pci_device_list =
- TAILQ_HEAD_INITIALIZER(real_pci_device_list);
-
-REGISTER_LINKED_RESOURCE(test_pci_sysfs);
-
-static int
-test_pci_setup(void)
-{
- struct rte_pci_device *dev;
- struct rte_pci_driver *dr;
- const struct resource *r;
- int ret;
-
- r = resource_find("test_pci_sysfs");
- TEST_ASSERT_NOT_NULL(r, "missing resource test_pci_sysfs");
-
- ret = resource_untar(r);
- TEST_ASSERT_SUCCESS(ret, "failed to untar %s", r->name);
-
- ret = setenv("SYSFS_PCI_DEVICES", "test_pci_sysfs/bus/pci/devices", 1);
- TEST_ASSERT_SUCCESS(ret, "failed to setenv");
-
- /* Unregister original devices & drivers lists */
- while (!TAILQ_EMPTY(&pci_driver_list)) {
- dr = TAILQ_FIRST(&pci_driver_list);
- rte_eal_pci_unregister(dr);
- TAILQ_INSERT_TAIL(&real_pci_driver_list, dr, next);
- }
-
- while (!TAILQ_EMPTY(&pci_device_list)) {
- dev = TAILQ_FIRST(&pci_device_list);
- TAILQ_REMOVE(&pci_device_list, dev, next);
- TAILQ_INSERT_TAIL(&real_pci_device_list, dev, next);
- }
-
- ret = rte_eal_pci_scan();
- TEST_ASSERT_SUCCESS(ret, "failed to scan PCI bus");
- rte_eal_pci_dump(stdout);
-
- return 0;
-}
-
-static int
-test_pci_cleanup(void)
-{
- struct rte_pci_device *dev;
- struct rte_pci_driver *dr;
- const struct resource *r;
- int ret;
-
- unsetenv("SYSFS_PCI_DEVICES");
-
- r = resource_find("test_pci_sysfs");
- TEST_ASSERT_NOT_NULL(r, "missing resource test_pci_sysfs");
-
- ret = resource_rm_by_tar(r);
- TEST_ASSERT_SUCCESS(ret, "Failed to delete resource %s", r->name);
-
- /*
- * FIXME: there is no API in DPDK to free a rte_pci_device so we
- * cannot free the devices in the right way. Let's assume that we
- * don't care for tests.
- */
- while (!TAILQ_EMPTY(&pci_device_list)) {
- dev = TAILQ_FIRST(&pci_device_list);
- TAILQ_REMOVE(&pci_device_list, dev, next);
- }
-
- /* Restore original devices & drivers lists */
- while (!TAILQ_EMPTY(&real_pci_driver_list)) {
- dr = TAILQ_FIRST(&real_pci_driver_list);
- TAILQ_REMOVE(&real_pci_driver_list, dr, next);
- rte_eal_pci_register(dr);
- }
-
- while (!TAILQ_EMPTY(&real_pci_device_list)) {
- dev = TAILQ_FIRST(&real_pci_device_list);
- TAILQ_REMOVE(&real_pci_device_list, dev, next);
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
- }
-
- return 0;
-}
-
-static int
-test_pci_blacklist(void)
-{
- struct rte_devargs_list save_devargs_list;
-
- printf("Dump all devices\n");
- TEST_ASSERT(TAILQ_EMPTY(&pci_driver_list),
- "pci_driver_list not empty");
-
- rte_eal_pci_register(&my_driver);
- rte_eal_pci_register(&my_driver2);
-
- pci_dev_count = 0;
- printf("Scan bus\n");
- rte_eal_pci_probe();
-
- if (pci_dev_count == 0) {
- printf("no device detected\n");
- return -1;
- }
-
- /* save the real devargs_list */
- save_devargs_list = devargs_list;
- TAILQ_INIT(&devargs_list);
-
- blacklist_all_devices();
-
- pci_dev_count = 0;
- printf("Scan bus with all devices blacklisted\n");
- rte_eal_pci_probe();
-
- free_devargs_list();
- devargs_list = save_devargs_list;
-
- if (pci_dev_count != 0) {
- printf("not all devices are blacklisted\n");
- return -1;
- }
-
- test_pci_run = 1;
-
- rte_eal_pci_unregister(&my_driver);
- rte_eal_pci_unregister(&my_driver2);
-
- return 0;
-}
-
-static int test_pci_sysfs(void)
-{
- const char *orig;
- const char *newpath;
- int ret;
-
- orig = pci_get_sysfs_path();
- ret = setenv("SYSFS_PCI_DEVICES", "My Documents", 1);
- TEST_ASSERT_SUCCESS(ret, "Failed setenv to My Documents");
-
- newpath = pci_get_sysfs_path();
- TEST_ASSERT(!strcmp(newpath, "My Documents"),
- "pci_get_sysfs_path() should return 'My Documents' "
- "but gives %s", newpath);
-
- ret = setenv("SYSFS_PCI_DEVICES", orig, 1);
- TEST_ASSERT_SUCCESS(ret, "Failed setenv back to '%s'", orig);
-
- newpath = pci_get_sysfs_path();
- TEST_ASSERT(!strcmp(orig, newpath),
- "pci_get_sysfs_path returned unexpected path: "
- "%s (expected: %s)", newpath, orig);
- return 0;
-}
-
-int
-test_pci(void)
-{
- if (test_pci_sysfs())
- return -1;
-
- if (test_pci_setup())
- return -1;
-
- if (test_pci_blacklist())
- return -1;
-
- if (test_pci_cleanup())
- return -1;
-
- return 0;
-}
-
-REGISTER_TEST_COMMAND(pci_autotest, test_pci);
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class
deleted file mode 100644
index 2f9c1dad..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/class
+++ /dev/null
@@ -1 +0,0 @@
-0x020000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config
deleted file mode 100644
index 7752421c..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/config
+++ /dev/null
Binary files differ
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits
deleted file mode 100644
index 900731ff..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/consistent_dma_mask_bits
+++ /dev/null
@@ -1 +0,0 @@
-64
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device
deleted file mode 100644
index 48a62909..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/device
+++ /dev/null
@@ -1 +0,0 @@
-0x1234
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits
deleted file mode 100644
index 900731ff..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/dma_mask_bits
+++ /dev/null
@@ -1 +0,0 @@
-64
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable
deleted file mode 100644
index d00491fd..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/enable
+++ /dev/null
@@ -1 +0,0 @@
-1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq
deleted file mode 100644
index 573541ac..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/irq
+++ /dev/null
@@ -1 +0,0 @@
-0
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias
deleted file mode 100644
index f4c76ed9..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/modalias
+++ /dev/null
@@ -1 +0,0 @@
-pci:v00008086d000010FBsv00008086sd00000003bc02sc00i00
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus
deleted file mode 100644
index d00491fd..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/msi_bus
+++ /dev/null
@@ -1 +0,0 @@
-1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node
deleted file mode 100644
index 3a2e3f49..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/numa_node
+++ /dev/null
@@ -1 +0,0 @@
--1
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource
deleted file mode 100644
index f3889296..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/resource
+++ /dev/null
@@ -1,13 +0,0 @@
-0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x000000000000e020 0x000000000000e03f 0x0000000000040101
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs
deleted file mode 100644
index 573541ac..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_numvfs
+++ /dev/null
@@ -1 +0,0 @@
-0
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs
deleted file mode 100644
index 4b9026d8..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/sriov_totalvfs
+++ /dev/null
@@ -1 +0,0 @@
-63
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device
deleted file mode 100644
index 89a932cc..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_device
+++ /dev/null
@@ -1 +0,0 @@
-0x0003
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor
deleted file mode 100644
index 446afb4b..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/subsystem_vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent
deleted file mode 100644
index 1dbe34de..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/uevent
+++ /dev/null
@@ -1,6 +0,0 @@
-DRIVER=ixgbe
-PCI_CLASS=20000
-PCI_ID=8086:10FB
-PCI_SUBSYS_ID=8086:0003
-PCI_SLOT_NAME=0000:01:00.0
-MODALIAS=pci:v00008086d000010FBsv00008086sd00000003bc02sc00i00
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor
deleted file mode 100644
index 446afb4b..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:00.0/vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class
deleted file mode 100644
index 22dd9361..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/class
+++ /dev/null
@@ -1 +0,0 @@
-0x100000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device
deleted file mode 100644
index f61bbe63..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/device
+++ /dev/null
@@ -1 +0,0 @@
-0xabcd
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource
deleted file mode 100644
index f3889296..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/resource
+++ /dev/null
@@ -1,13 +0,0 @@
-0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x000000000000e020 0x000000000000e03f 0x0000000000040101
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device
deleted file mode 100644
index f61bbe63..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_device
+++ /dev/null
@@ -1 +0,0 @@
-0xabcd
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor
deleted file mode 100644
index 4321b81f..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/subsystem_vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0002
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor
deleted file mode 100644
index 4321b81f..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:01:02.0/vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0002
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class
deleted file mode 100644
index 22dd9361..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/class
+++ /dev/null
@@ -1 +0,0 @@
-0x100000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device
deleted file mode 100644
index ccaa4982..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/device
+++ /dev/null
@@ -1 +0,0 @@
-0x4444
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource
deleted file mode 100644
index f3889296..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/resource
+++ /dev/null
@@ -1,13 +0,0 @@
-0x00000000d0080000 0x00000000d00fffff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x000000000000e020 0x000000000000e03f 0x0000000000040101
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000d0104000 0x00000000d0107fff 0x000000000014220c
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab000000 0x00000000ab0fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x00000000ab100000 0x00000000ab1fffff 0x0000000000140204
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device
deleted file mode 100644
index ccaa4982..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_device
+++ /dev/null
@@ -1 +0,0 @@
-0x4444
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor
deleted file mode 100644
index 446afb4b..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/subsystem_vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0001
diff --git a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor b/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor
deleted file mode 100644
index 446afb4b..00000000
--- a/app/test/test_pci_sysfs/bus/pci/devices/0000:02:ab.0/vendor
+++ /dev/null
@@ -1 +0,0 @@
-0x0001
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
deleted file mode 100644
index ebcb8964..00000000
--- a/app/test/test_ring.c
+++ /dev/null
@@ -1,1381 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <string.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_cycles.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_malloc.h>
-#include <rte_ring.h>
-#include <rte_random.h>
-#include <rte_common.h>
-#include <rte_errno.h>
-#include <rte_hexdump.h>
-
-#include "test.h"
-
-/*
- * Ring
- * ====
- *
- * #. Basic tests: done on one core:
- *
- * - Using single producer/single consumer functions:
- *
- * - Enqueue one object, two objects, MAX_BULK objects
- * - Dequeue one object, two objects, MAX_BULK objects
- * - Check that dequeued pointers are correct
- *
- * - Using multi producers/multi consumers functions:
- *
- * - Enqueue one object, two objects, MAX_BULK objects
- * - Dequeue one object, two objects, MAX_BULK objects
- * - Check that dequeued pointers are correct
- *
- * - Test watermark and default bulk enqueue/dequeue:
- *
- * - Set watermark
- * - Set default bulk value
- * - Enqueue objects, check that -EDQUOT is returned when
- * watermark is exceeded
- * - Check that dequeued pointers are correct
- *
- * #. Check live watermark change
- *
- * - Start a loop on another lcore that will enqueue and dequeue
- * objects in a ring. It will monitor the value of watermark.
- * - At the same time, change the watermark on the master lcore.
- * - The slave lcore will check that watermark changes from 16 to 32.
- *
- * #. Performance tests.
- *
- * Tests done in test_ring_perf.c
- */
-
-#define RING_SIZE 4096
-#define MAX_BULK 32
-
-static rte_atomic32_t synchro;
-
-static struct rte_ring *r;
-
-#define TEST_RING_VERIFY(exp) \
- if (!(exp)) { \
- printf("error at %s:%d\tcondition " #exp " failed\n", \
- __func__, __LINE__); \
- rte_ring_dump(stdout, r); \
- return -1; \
- }
-
-#define TEST_RING_FULL_EMTPY_ITER 8
-
-static int
-check_live_watermark_change(__attribute__((unused)) void *dummy)
-{
- uint64_t hz = rte_get_timer_hz();
- void *obj_table[MAX_BULK];
- unsigned watermark, watermark_old = 16;
- uint64_t cur_time, end_time;
- int64_t diff = 0;
- int i, ret;
- unsigned count = 4;
-
- /* init the object table */
- memset(obj_table, 0, sizeof(obj_table));
- end_time = rte_get_timer_cycles() + (hz / 4);
-
- /* check that bulk and watermark are 4 and 32 (respectively) */
- while (diff >= 0) {
-
- /* add in ring until we reach watermark */
- ret = 0;
- for (i = 0; i < 16; i ++) {
- if (ret != 0)
- break;
- ret = rte_ring_enqueue_bulk(r, obj_table, count);
- }
-
- if (ret != -EDQUOT) {
- printf("Cannot enqueue objects, or watermark not "
- "reached (ret=%d)\n", ret);
- return -1;
- }
-
- /* read watermark, the only change allowed is from 16 to 32 */
- watermark = r->prod.watermark;
- if (watermark != watermark_old &&
- (watermark_old != 16 || watermark != 32)) {
- printf("Bad watermark change %u -> %u\n", watermark_old,
- watermark);
- return -1;
- }
- watermark_old = watermark;
-
- /* dequeue objects from ring */
- while (i--) {
- ret = rte_ring_dequeue_bulk(r, obj_table, count);
- if (ret != 0) {
- printf("Cannot dequeue (ret=%d)\n", ret);
- return -1;
- }
- }
-
- cur_time = rte_get_timer_cycles();
- diff = end_time - cur_time;
- }
-
- if (watermark_old != 32 ) {
- printf(" watermark was not updated (wm=%u)\n",
- watermark_old);
- return -1;
- }
-
- return 0;
-}
-
-static int
-test_live_watermark_change(void)
-{
- unsigned lcore_id = rte_lcore_id();
- unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
-
- printf("Test watermark live modification\n");
- rte_ring_set_water_mark(r, 16);
-
- /* launch a thread that will enqueue and dequeue, checking
- * watermark and quota */
- rte_eal_remote_launch(check_live_watermark_change, NULL, lcore_id2);
-
- rte_delay_ms(100);
- rte_ring_set_water_mark(r, 32);
- rte_delay_ms(100);
-
- if (rte_eal_wait_lcore(lcore_id2) < 0)
- return -1;
-
- return 0;
-}
-
-/* Test for catch on invalid watermark values */
-static int
-test_set_watermark( void ){
- unsigned count;
- int setwm;
-
- struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
- if(r == NULL){
- printf( " ring lookup failed\n" );
- goto error;
- }
- count = r->prod.size*2;
- setwm = rte_ring_set_water_mark(r, count);
- if (setwm != -EINVAL){
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
-
- count = 0;
- rte_ring_set_water_mark(r, count);
- if (r->prod.watermark != r->prod.size) {
- printf("Test failed to detect invalid watermark count value\n");
- goto error;
- }
- return 0;
-
-error:
- return -1;
-}
-
-/*
- * helper routine for test_ring_basic
- */
-static int
-test_ring_basic_full_empty(void * const src[], void *dst[])
-{
- unsigned i, rand;
- const unsigned rsz = RING_SIZE - 1;
-
- printf("Basic full/empty test\n");
-
- for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
-
- /* random shift in the ring */
- rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
- printf("%s: iteration %u, random shift: %u;\n",
- __func__, i, rand);
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rand));
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
-
- /* fill the ring */
- TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
- rsz));
- TEST_RING_VERIFY(0 == rte_ring_free_count(r));
- TEST_RING_VERIFY(rsz == rte_ring_count(r));
- TEST_RING_VERIFY(rte_ring_full(r));
- TEST_RING_VERIFY(0 == rte_ring_empty(r));
-
- /* empty the ring */
- TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
- TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
- TEST_RING_VERIFY(0 == rte_ring_count(r));
- TEST_RING_VERIFY(0 == rte_ring_full(r));
- TEST_RING_VERIFY(rte_ring_empty(r));
-
- /* check data */
- TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
- rte_ring_dump(stdout, r);
- }
- return 0;
-}
-
-static int
-test_ring_basic(void)
-{
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i, num_elems;
-
- /* alloc dummy object pointers */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
- cur_src = src;
-
- /* alloc some room for copied objects */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
- cur_dst = dst;
-
- printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
- cur_src += 1;
- if (ret != 0)
- goto fail;
-
- printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
- cur_src += 2;
- if (ret != 0)
- goto fail;
-
- printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if (ret != 0)
- goto fail;
-
- printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
- cur_dst += 1;
- if (ret != 0)
- goto fail;
-
- printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
- cur_dst += 2;
- if (ret != 0)
- goto fail;
-
- printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if (ret != 0)
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
- cur_src = src;
- cur_dst = dst;
-
- printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
- cur_src += 1;
- if (ret != 0)
- goto fail;
-
- printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
- cur_src += 2;
- if (ret != 0)
- goto fail;
-
- printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if (ret != 0)
- goto fail;
-
- printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
- cur_dst += 1;
- if (ret != 0)
- goto fail;
-
- printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
- cur_dst += 2;
- if (ret != 0)
- goto fail;
-
- printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if (ret != 0)
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
- cur_src = src;
- cur_dst = dst;
-
- printf("fill and empty the ring\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if (ret != 0)
- goto fail;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if (ret != 0)
- goto fail;
- }
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- if (test_ring_basic_full_empty(src, dst) != 0)
- goto fail;
-
- cur_src = src;
- cur_dst = dst;
-
- printf("test watermark and default bulk enqueue / dequeue\n");
- rte_ring_set_water_mark(r, 20);
- num_elems = 16;
-
- cur_src = src;
- cur_dst = dst;
-
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
- cur_src += num_elems;
- if (ret != 0) {
- printf("Cannot enqueue\n");
- goto fail;
- }
- ret = rte_ring_enqueue_bulk(r, cur_src, num_elems);
- cur_src += num_elems;
- if (ret != -EDQUOT) {
- printf("Watermark not exceeded\n");
- goto fail;
- }
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
- cur_dst += num_elems;
- if (ret != 0) {
- printf("Cannot dequeue\n");
- goto fail;
- }
- ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems);
- cur_dst += num_elems;
- if (ret != 0) {
- printf("Cannot dequeue2\n");
- goto fail;
- }
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- ret = rte_ring_mp_enqueue(r, cur_src);
- if (ret != 0)
- goto fail;
-
- ret = rte_ring_mc_dequeue(r, cur_dst);
- if (ret != 0)
- goto fail;
-
- free(src);
- free(dst);
- return 0;
-
- fail:
- free(src);
- free(dst);
- return -1;
-}
-
-static int
-test_ring_burst_basic(void)
-{
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
-
- /* alloc dummy object pointers */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
- cur_src = src;
-
- /* alloc some room for copied objects */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
- cur_dst = dst;
-
- printf("Test SP & SC basic functions \n");
- printf("enqueue 1 obj\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 1);
- cur_src += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
- goto fail;
-
- printf("enqueue 2 objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
- cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK) ;
- cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
-
- printf("dequeue 1 obj\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1) ;
- cur_dst += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
- goto fail;
-
- printf("dequeue 2 objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
- cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- printf("Test enqueue without enough memory space \n");
- for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
- goto fail;
- }
- }
-
- printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, 2);
- cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
- /* Always one free entry left */
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
- goto fail;
-
- printf("Test if ring is full \n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
- printf("Test enqueue for a full entry \n");
- ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- printf("Test dequeue without enough objects \n");
- for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
- }
-
- /* Available memory space for the exact MAX_BULK entries */
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2);
- cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
- goto fail;
-
- printf("Test if ring is empty \n");
- /* Check if ring is empty */
- if (1 != rte_ring_empty(r))
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- printf("Test MP & MC basic functions \n");
-
- printf("enqueue 1 obj\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 1);
- cur_src += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
- goto fail;
-
- printf("enqueue 2 objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
- cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- printf("enqueue MAX_BULK objs\n");
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
-
- printf("dequeue 1 obj\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1);
- cur_dst += 1;
- if ((ret & RTE_RING_SZ_MASK) != 1)
- goto fail;
-
- printf("dequeue 2 objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
- cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- printf("dequeue MAX_BULK objs\n");
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- printf("fill and empty the ring\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
- }
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- printf("Test enqueue without enough memory space \n");
- for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
- }
-
- /* Available memory space for the exact MAX_BULK objects */
- ret = rte_ring_mp_enqueue_burst(r, cur_src, 2);
- cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
- goto fail;
-
-
- printf("Test dequeue without enough objects \n");
- for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
- goto fail;
- }
-
- /* Available objects - the exact MAX_BULK */
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2);
- cur_dst += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK - 3;
- if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
- goto fail;
-
- /* check data */
- if (memcmp(src, dst, cur_dst - dst)) {
- rte_hexdump(stdout, "src", src, cur_src - src);
- rte_hexdump(stdout, "dst", dst, cur_dst - dst);
- printf("data after dequeue is not the same\n");
- goto fail;
- }
-
- cur_src = src;
- cur_dst = dst;
-
- printf("Covering rte_ring_enqueue_burst functions \n");
-
- ret = rte_ring_enqueue_burst(r, cur_src, 2);
- cur_src += 2;
- if ((ret & RTE_RING_SZ_MASK) != 2)
- goto fail;
-
- ret = rte_ring_dequeue_burst(r, cur_dst, 2);
- cur_dst += 2;
- if (ret != 2)
- goto fail;
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
- fail:
- free(src);
- free(dst);
- return -1;
-}
-
-static int
-test_ring_stats(void)
-{
-
-#ifndef RTE_LIBRTE_RING_DEBUG
- printf("Enable RTE_LIBRTE_RING_DEBUG to test ring stats.\n");
- return 0;
-#else
- void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
- int ret;
- unsigned i;
- unsigned num_items = 0;
- unsigned failed_enqueue_ops = 0;
- unsigned failed_enqueue_items = 0;
- unsigned failed_dequeue_ops = 0;
- unsigned failed_dequeue_items = 0;
- unsigned last_enqueue_ops = 0;
- unsigned last_enqueue_items = 0;
- unsigned last_quota_ops = 0;
- unsigned last_quota_items = 0;
- unsigned lcore_id = rte_lcore_id();
- struct rte_ring_debug_stats *ring_stats = &r->stats[lcore_id];
-
- printf("Test the ring stats.\n");
-
- /* Reset the watermark in case it was set in another test. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Allocate some dummy object pointers. */
- src = malloc(RING_SIZE*2*sizeof(void *));
- if (src == NULL)
- goto fail;
-
- for (i = 0; i < RING_SIZE*2 ; i++) {
- src[i] = (void *)(unsigned long)i;
- }
-
- /* Allocate some memory for copied objects. */
- dst = malloc(RING_SIZE*2*sizeof(void *));
- if (dst == NULL)
- goto fail;
-
- memset(dst, 0, RING_SIZE*2*sizeof(void *));
-
- /* Set the head and tail pointers. */
- cur_src = src;
- cur_dst = dst;
-
- /* Do Enqueue tests. */
- printf("Test the dequeue stats.\n");
-
- /* Fill the ring up to RING_SIZE -1. */
- printf("Fill the ring.\n");
- for (i = 0; i< (RING_SIZE/MAX_BULK); i++) {
- rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
- cur_src += MAX_BULK;
- }
-
- /* Adjust for final enqueue = MAX_BULK -1. */
- cur_src--;
-
- printf("Verify that the ring is full.\n");
- if (rte_ring_full(r) != 1)
- goto fail;
-
-
- printf("Verify the enqueue success stats.\n");
- /* Stats should match above enqueue operations to fill the ring. */
- if (ring_stats->enq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Current max objects is RING_SIZE -1. */
- if (ring_stats->enq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any failures yet. */
- if (ring_stats->enq_fail_bulk != 0)
- goto fail;
- if (ring_stats->enq_fail_objs != 0)
- goto fail;
-
-
- printf("Test stats for SP burst enqueue to a full ring.\n");
- num_items = 2;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for SP bulk enqueue to a full ring.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP burst enqueue to a full ring.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- printf("Test stats for MP bulk enqueue to a full ring.\n");
- num_items = 16;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -ENOBUFS)
- goto fail;
-
- failed_enqueue_ops += 1;
- failed_enqueue_items += num_items;
-
- /* The enqueue should have failed. */
- if (ring_stats->enq_fail_bulk != failed_enqueue_ops)
- goto fail;
- if (ring_stats->enq_fail_objs != failed_enqueue_items)
- goto fail;
-
-
- /* Do Dequeue tests. */
- printf("Test the dequeue stats.\n");
-
- printf("Empty the ring.\n");
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* There was only RING_SIZE -1 objects to dequeue. */
- cur_dst++;
-
- printf("Verify ring is empty.\n");
- if (1 != rte_ring_empty(r))
- goto fail;
-
- printf("Verify the dequeue success stats.\n");
- /* Stats should match above dequeue operations. */
- if (ring_stats->deq_success_bulk != (RING_SIZE/MAX_BULK))
- goto fail;
-
- /* Objects dequeued is RING_SIZE -1. */
- if (ring_stats->deq_success_objs != RING_SIZE -1)
- goto fail;
-
- /* Shouldn't have any dequeue failure stats yet. */
- if (ring_stats->deq_fail_bulk != 0)
- goto fail;
-
- printf("Test stats for SC burst dequeue with an empty ring.\n");
- num_items = 2;
- ret = rte_ring_sc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for SC bulk dequeue with an empty ring.\n");
- num_items = 4;
- ret = rte_ring_sc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC burst dequeue with an empty ring.\n");
- num_items = 8;
- ret = rte_ring_mc_dequeue_burst(r, cur_dst, num_items);
- if ((ret & RTE_RING_SZ_MASK) != 0)
- goto fail;
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test stats for MC bulk dequeue with an empty ring.\n");
- num_items = 16;
- ret = rte_ring_mc_dequeue_bulk(r, cur_dst, num_items);
- if (ret != -ENOENT)
- goto fail;
-
- failed_dequeue_ops += 1;
- failed_dequeue_items += num_items;
-
- /* The dequeue should have failed. */
- if (ring_stats->deq_fail_bulk != failed_dequeue_ops)
- goto fail;
- if (ring_stats->deq_fail_objs != failed_dequeue_items)
- goto fail;
-
-
- printf("Test total enqueue/dequeue stats.\n");
- /* At this point the enqueue and dequeue stats should be the same. */
- if (ring_stats->enq_success_bulk != ring_stats->deq_success_bulk)
- goto fail;
- if (ring_stats->enq_success_objs != ring_stats->deq_success_objs)
- goto fail;
- if (ring_stats->enq_fail_bulk != ring_stats->deq_fail_bulk)
- goto fail;
- if (ring_stats->enq_fail_objs != ring_stats->deq_fail_objs)
- goto fail;
-
-
- /* Watermark Tests. */
- printf("Test the watermark/quota stats.\n");
-
- printf("Verify the initial watermark stats.\n");
- /* Watermark stats should be 0 since there is no watermark. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Set a watermark. */
- rte_ring_set_water_mark(r, 16);
-
- /* Reset pointers. */
- cur_src = src;
- cur_dst = dst;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue below watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should still be 0. */
- if (ring_stats->enq_quota_bulk != 0)
- goto fail;
- if (ring_stats->enq_quota_objs != 0)
- goto fail;
-
- /* Success stats should have increased. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops + 1)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items + num_items)
- goto fail;
-
- last_enqueue_ops = ring_stats->enq_success_bulk;
- last_enqueue_items = ring_stats->enq_success_objs;
-
-
- printf("Test stats for SP burst enqueue at watermark.\n");
- num_items = 8;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != 1)
- goto fail;
- if (ring_stats->enq_quota_objs != num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP burst enqueue above watermark.\n");
- num_items = 1;
- ret = rte_ring_sp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP burst enqueue above watermark.\n");
- num_items = 2;
- ret = rte_ring_mp_enqueue_burst(r, cur_src, num_items);
- if ((ret & RTE_RING_SZ_MASK) != num_items)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for SP bulk enqueue above watermark.\n");
- num_items = 4;
- ret = rte_ring_sp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- last_quota_ops = ring_stats->enq_quota_bulk;
- last_quota_items = ring_stats->enq_quota_objs;
-
-
- printf("Test stats for MP bulk enqueue above watermark.\n");
- num_items = 8;
- ret = rte_ring_mp_enqueue_bulk(r, cur_src, num_items);
- if (ret != -EDQUOT)
- goto fail;
-
- /* Watermark stats should have changed. */
- if (ring_stats->enq_quota_bulk != last_quota_ops +1)
- goto fail;
- if (ring_stats->enq_quota_objs != last_quota_items + num_items)
- goto fail;
-
- printf("Test watermark success stats.\n");
- /* Success stats should be same as last non-watermarked enqueue. */
- if (ring_stats->enq_success_bulk != last_enqueue_ops)
- goto fail;
- if (ring_stats->enq_success_objs != last_enqueue_items)
- goto fail;
-
-
- /* Cleanup. */
-
- /* Empty the ring. */
- for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
- rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
- cur_dst += MAX_BULK;
- }
-
- /* Reset the watermark. */
- rte_ring_set_water_mark(r, 0);
-
- /* Reset the ring stats. */
- memset(&r->stats[lcore_id], 0, sizeof(r->stats[lcore_id]));
-
- /* Free memory before test completed */
- free(src);
- free(dst);
- return 0;
-
-fail:
- free(src);
- free(dst);
- return -1;
-#endif
-}
-
-/*
- * it will always fail to create ring with a wrong ring size number in this function
- */
-static int
-test_ring_creation_with_wrong_size(void)
-{
- struct rte_ring * rp = NULL;
-
- /* Test if ring size is not power of 2 */
- rp = rte_ring_create("test_bad_ring_size", RING_SIZE + 1, SOCKET_ID_ANY, 0);
- if (NULL != rp) {
- return -1;
- }
-
- /* Test if ring size is exceeding the limit */
- rp = rte_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK + 1), SOCKET_ID_ANY, 0);
- if (NULL != rp) {
- return -1;
- }
- return 0;
-}
-
-/*
- * it tests if it would always fail to create ring with an used ring name
- */
-static int
-test_ring_creation_with_an_used_name(void)
-{
- struct rte_ring * rp;
-
- rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
- if (NULL != rp)
- return -1;
-
- return 0;
-}
-
-/*
- * Test to if a non-power of 2 count causes the create
- * function to fail correctly
- */
-static int
-test_create_count_odd(void)
-{
- struct rte_ring *r = rte_ring_create("test_ring_count",
- 4097, SOCKET_ID_ANY, 0 );
- if(r != NULL){
- return -1;
- }
- return 0;
-}
-
-static int
-test_lookup_null(void)
-{
- struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
- if (rlp ==NULL)
- if (rte_errno != ENOENT){
- printf( "test failed to returnn error on null pointer\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * it tests some more basic ring operations
- */
-static int
-test_ring_basic_ex(void)
-{
- int ret = -1;
- unsigned i;
- struct rte_ring * rp;
- void **obj = NULL;
-
- obj = rte_calloc("test_ring_basic_ex_malloc", RING_SIZE, sizeof(void *), 0);
- if (obj == NULL) {
- printf("test_ring_basic_ex fail to rte_malloc\n");
- goto fail_test;
- }
-
- rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (rp == NULL) {
- printf("test_ring_basic_ex fail to create ring\n");
- goto fail_test;
- }
-
- if (rte_ring_lookup("test_ring_basic_ex") != rp) {
- goto fail_test;
- }
-
- if (rte_ring_empty(rp) != 1) {
- printf("test_ring_basic_ex ring is not empty but it should be\n");
- goto fail_test;
- }
-
- printf("%u ring entries are now free\n", rte_ring_free_count(rp));
-
- for (i = 0; i < RING_SIZE; i ++) {
- rte_ring_enqueue(rp, obj[i]);
- }
-
- if (rte_ring_full(rp) != 1) {
- printf("test_ring_basic_ex ring is not full but it should be\n");
- goto fail_test;
- }
-
- for (i = 0; i < RING_SIZE; i ++) {
- rte_ring_dequeue(rp, &obj[i]);
- }
-
- if (rte_ring_empty(rp) != 1) {
- printf("test_ring_basic_ex ring is not empty but it should be\n");
- goto fail_test;
- }
-
- /* Covering the ring burst operation */
- ret = rte_ring_enqueue_burst(rp, obj, 2);
- if ((ret & RTE_RING_SZ_MASK) != 2) {
- printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
- goto fail_test;
- }
-
- ret = rte_ring_dequeue_burst(rp, obj, 2);
- if (ret != 2) {
- printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
- goto fail_test;
- }
-
- ret = 0;
-fail_test:
- if (obj != NULL)
- rte_free(obj);
-
- return ret;
-}
-
-static int
-test_ring(void)
-{
- /* some more basic operations */
- if (test_ring_basic_ex() < 0)
- return -1;
-
- rte_atomic32_init(&synchro);
-
- if (r == NULL)
- r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
- if (r == NULL)
- return -1;
-
- /* retrieve the ring from its name */
- if (rte_ring_lookup("test") != r) {
- printf("Cannot lookup ring from its name\n");
- return -1;
- }
-
- /* burst operations */
- if (test_ring_burst_basic() < 0)
- return -1;
-
- /* basic operations */
- if (test_ring_basic() < 0)
- return -1;
-
- /* ring stats */
- if (test_ring_stats() < 0)
- return -1;
-
- /* basic operations */
- if (test_live_watermark_change() < 0)
- return -1;
-
- if ( test_set_watermark() < 0){
- printf ("Test failed to detect invalid parameter\n");
- return -1;
- }
- else
- printf ( "Test detected forced bad watermark values\n");
-
- if ( test_create_count_odd() < 0){
- printf ("Test failed to detect odd count\n");
- return -1;
- }
- else
- printf ( "Test detected odd count\n");
-
- if ( test_lookup_null() < 0){
- printf ("Test failed to detect NULL ring lookup\n");
- return -1;
- }
- else
- printf ( "Test detected NULL ring lookup \n");
-
- /* test of creating ring with wrong size */
- if (test_ring_creation_with_wrong_size() < 0)
- return -1;
-
- /* test of creation ring with an used name */
- if (test_ring_creation_with_an_used_name() < 0)
- return -1;
-
- /* dump the ring status */
- rte_ring_list_dump(stdout);
-
- return 0;
-}
-
-REGISTER_TEST_COMMAND(ring_autotest, test_ring);
diff --git a/scripts/auto-config-h.sh b/buildtools/auto-config-h.sh
index 4356d7e3..4356d7e3 100755
--- a/scripts/auto-config-h.sh
+++ b/buildtools/auto-config-h.sh
diff --git a/scripts/gen-build-mk.sh b/buildtools/gen-build-mk.sh
index 5c12813d..5c12813d 100755
--- a/scripts/gen-build-mk.sh
+++ b/buildtools/gen-build-mk.sh
diff --git a/scripts/gen-config-h.sh b/buildtools/gen-config-h.sh
index 1a2436c2..1a2436c2 100755
--- a/scripts/gen-config-h.sh
+++ b/buildtools/gen-config-h.sh
diff --git a/buildtools/pmdinfogen/Makefile b/buildtools/pmdinfogen/Makefile
index bd8f9005..bf07b6f2 100644
--- a/buildtools/pmdinfogen/Makefile
+++ b/buildtools/pmdinfogen/Makefile
@@ -44,6 +44,4 @@ SRCS-y += pmdinfogen.c
HOST_CFLAGS += $(WERROR_FLAGS) -g
HOST_CFLAGS += -I$(RTE_OUTPUT)/include
-DEPDIRS-y += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.hostapp.mk
diff --git a/buildtools/pmdinfogen/pmdinfogen.c b/buildtools/pmdinfogen/pmdinfogen.c
index 5bf08ced..ba1a12e2 100644
--- a/buildtools/pmdinfogen/pmdinfogen.c
+++ b/buildtools/pmdinfogen/pmdinfogen.c
@@ -270,6 +270,7 @@ struct opt_tag {
static const struct opt_tag opt_tags[] = {
{"_param_string_export", "params"},
+ {"_kmod_dep_export", "kmod"},
};
static int complete_pmd_entry(struct elf_info *info, struct pmd_driver *drv)
diff --git a/buildtools/pmdinfogen/pmdinfogen.h b/buildtools/pmdinfogen/pmdinfogen.h
index e9eabffb..27bab30e 100644
--- a/buildtools/pmdinfogen/pmdinfogen.h
+++ b/buildtools/pmdinfogen/pmdinfogen.h
@@ -89,6 +89,7 @@ else \
enum opt_params {
PMD_PARAM_STRING = 0,
+ PMD_KMOD_DEP,
PMD_OPT_MAX
};
diff --git a/scripts/relpath.sh b/buildtools/relpath.sh
index 4ff4671e..139b7813 100755
--- a/scripts/relpath.sh
+++ b/buildtools/relpath.sh
@@ -44,7 +44,7 @@ if [ $# -ne 2 ]; then
fi
# get the real absolute path, derefencing symlinks
-ABS1=$(readlink -f $1)
+ABS1="$(dirname $(readlink -f $1))/$(basename $1)"
ABS2=$(readlink -f $2)
# remove leading slash
diff --git a/config/common_base b/config/common_base
index 4bff83af..8907bea3 100644
--- a/config/common_base
+++ b/config/common_base
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,11 @@ CONFIG_RTE_BUILD_SHARED_LIB=n
CONFIG_RTE_NEXT_ABI=y
#
+# Major ABI to overwrite library specific LIBABIVER
+#
+CONFIG_RTE_MAJOR_ABI=
+
+#
# Machine's cache line size
#
CONFIG_RTE_CACHE_LINE_SIZE=64
@@ -89,7 +94,9 @@ CONFIG_RTE_MAX_MEMSEG=256
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_LOG_LEVEL=RTE_LOG_INFO
+CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
+CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
@@ -97,6 +104,14 @@ CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
+#
+# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
+# AVX512 is marked as experimental for now, will enable it after enough
+# field test and possible optimization.
+#
+CONFIG_RTE_ENABLE_AVX=y
+CONFIG_RTE_ENABLE_AVX512=n
+
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH=""
@@ -122,6 +137,14 @@ CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
#
+# Turn off Tx preparation stage
+#
+# Warning: rte_ethdev_tx_prepare() can be safely disabled only if using a
+# driver which do not implement any Tx preparation.
+#
+CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
+
+#
# Support NIC bypass logic
#
CONFIG_RTE_NIC_BYPASS=n
@@ -159,20 +182,16 @@ CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_IXGBE_INC_VECTOR=y
-CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y
#
# Compile burst-oriented I40E PMD driver
#
CONFIG_RTE_LIBRTE_I40E_PMD=y
-CONFIG_RTE_LIBRTE_I40E_DEBUG_INIT=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
-CONFIG_RTE_LIBRTE_I40E_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
-CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF=4
@@ -203,7 +222,7 @@ CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
CONFIG_RTE_LIBRTE_MLX4_SOFT_COUNTERS=1
#
-# Compile burst-oriented Mellanox ConnectX-4 (MLX5) PMD
+# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD
#
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
@@ -248,6 +267,12 @@ CONFIG_RTE_LIBRTE_NFP_DEBUG=n
CONFIG_RTE_LIBRTE_BNXT_PMD=y
#
+# Compile burst-oriented Solarflare libefx-based PMD
+#
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=y
+CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
+
+#
# Compile software PMD backed by SZEDATA2 device
#
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
@@ -264,7 +289,7 @@ CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0
#
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
#
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
+CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=y
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
@@ -272,6 +297,38 @@ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
#
+# Compile burst-oriented Cavium LiquidIO PMD driver
+#
+CONFIG_RTE_LIBRTE_LIO_PMD=y
+CONFIG_RTE_LIBRTE_LIO_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_LIO_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
+CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
+
+#
+# Compile NXP DPAA2 FSL-MC Bus
+#
+CONFIG_RTE_LIBRTE_FSLMC_BUS=n
+
+#
+# Compile Support Libraries for NXP DPAA2
+#
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
+
+#
+# Compile burst-oriented NXP DPAA2 PMD driver
+#
+CONFIG_RTE_LIBRTE_DPAA2_PMD=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n
+
+#
# Compile burst-oriented VIRTIO PMD driver
#
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
@@ -315,7 +372,7 @@ CONFIG_RTE_LIBRTE_PMD_BOND=y
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n
-# QLogic 10G/25G/40G/100G PMD
+# QLogic 10G/25G/40G/50G/100G PMD
#
CONFIG_RTE_LIBRTE_QEDE_PMD=y
CONFIG_RTE_LIBRTE_QEDE_DEBUG_INIT=n
@@ -333,6 +390,31 @@ CONFIG_RTE_LIBRTE_QEDE_FW=""
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
#
+# Compile ARK PMD
+#
+CONFIG_RTE_LIBRTE_ARK_PMD=y
+CONFIG_RTE_LIBRTE_ARK_PAD_TX=y
+CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
+CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
+
+#
+# Compile WRS accelerated virtual port (AVP) guest PMD driver
+#
+CONFIG_RTE_LIBRTE_AVP_PMD=n
+CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_AVP_DEBUG_DRIVER=y
+CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n
+
+#
+# Compile the TAP PMD
+# It is enabled by default for Linux only.
+#
+CONFIG_RTE_LIBRTE_PMD_TAP=n
+
+#
# Compile Xen PMD
#
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
@@ -356,6 +438,20 @@ CONFIG_RTE_CRYPTO_MAX_DEVS=64
CONFIG_RTE_CRYPTODEV_NAME_LEN=64
#
+# Compile PMD for ARMv8 Crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n
+CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
+
+#
+# Compile NXP DPAA2 crypto sec driver for CAAM HW
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
+
+#
# Compile PMD for QuickAssist based devices
#
CONFIG_RTE_LIBRTE_PMD_QAT=n
@@ -406,17 +502,46 @@ CONFIG_RTE_LIBRTE_PMD_ZUC=n
CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n
#
+# Compile PMD for Crypto Scheduler device
+#
+CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=y
+CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
+
+#
# Compile PMD for NULL Crypto device
#
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
+# Compile generic event device library
+#
+CONFIG_RTE_LIBRTE_EVENTDEV=y
+CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
+CONFIG_RTE_EVENT_MAX_DEVS=16
+CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
+
+#
+# Compile PMD for skeleton event device
+#
+CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=y
+CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n
+
+#
+# Compile PMD for software event device
+#
+CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=y
+CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV_DEBUG=n
+
+#
+# Compile PMD for octeontx sso event device
+#
+CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=y
+CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG=n
+
+#
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
-CONFIG_RTE_LIBRTE_RING_DEBUG=n
-CONFIG_RTE_RING_SPLIT_PROD_CONS=n
-CONFIG_RTE_RING_PAUSE_REP_COUNT=0
#
# Compile librte_mempool
@@ -426,6 +551,12 @@ CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
#
+# Compile Mempool drivers
+#
+CONFIG_RTE_DRIVER_MEMPOOL_RING=y
+CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
+
+#
# Compile librte_mbuf
#
CONFIG_RTE_LIBRTE_MBUF=y
@@ -458,11 +589,31 @@ CONFIG_RTE_LIBRTE_HASH=y
CONFIG_RTE_LIBRTE_HASH_DEBUG=n
#
+# Compile librte_efd
+#
+CONFIG_RTE_LIBRTE_EFD=y
+
+#
# Compile librte_jobstats
#
CONFIG_RTE_LIBRTE_JOBSTATS=y
#
+# Compile the device metrics library
+#
+CONFIG_RTE_LIBRTE_METRICS=y
+
+#
+# Compile the bitrate statistics library
+#
+CONFIG_RTE_LIBRTE_BITRATE=y
+
+#
+# Compile the latency statistics library
+#
+CONFIG_RTE_LIBRTE_LATENCY_STATS=y
+
+#
# Compile librte_lpm
#
CONFIG_RTE_LIBRTE_LPM=y
@@ -543,11 +694,10 @@ CONFIG_RTE_PIPELINE_STATS_COLLECT=n
# Compile librte_kni
#
CONFIG_RTE_LIBRTE_KNI=n
+CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
+CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
-CONFIG_RTE_KNI_VHOST=n
-CONFIG_RTE_KNI_VHOST_MAX_CACHE_SIZE=1024
-CONFIG_RTE_KNI_VHOST_VNET_HDR_EN=n
#
# Compile the pdump library
@@ -573,11 +723,6 @@ CONFIG_RTE_LIBRTE_PMD_VHOST=n
CONFIG_RTE_LIBRTE_XEN_DOM0=n
#
-# Enable warning directives
-#
-CONFIG_RTE_INSECURE_FUNCTION_WARNING=n
-
-#
# Compile the test application
#
CONFIG_RTE_APP_TEST=y
@@ -589,3 +734,8 @@ CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
+
+#
+# Compile the crypto performance application
+#
+CONFIG_RTE_APP_CRYPTO_PERF=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index 2483dfa5..b3cf41b0 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -39,8 +39,12 @@ CONFIG_RTE_EAL_IGB_UIO=y
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_KNI_KMOD=y
CONFIG_RTE_LIBRTE_KNI=y
+CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
+CONFIG_RTE_LIBRTE_PMD_TAP=y
+CONFIG_RTE_LIBRTE_AVP_PMD=y
+CONFIG_RTE_LIBRTE_NFP_PMD=y
CONFIG_RTE_LIBRTE_POWER=y
CONFIG_RTE_VIRTIO_USER=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index bde6acd9..19607eb6 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -61,6 +61,7 @@ CONFIG_RTE_SCHED_VECTOR=n
# cannot use those on ARM
CONFIG_RTE_KNI_KMOD=n
+CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_EM_PMD=n
CONFIG_RTE_LIBRTE_IGB_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
@@ -70,8 +71,9 @@ CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_I40E_PMD=n
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_MLX4_PMD=n
-CONFIG_RTE_LIBRTE_MPIPE_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n
CONFIG_RTE_LIBRTE_PMD_BNX2X=n
CONFIG_RTE_LIBRTE_QEDE_PMD=n
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/defconfig_arm64-armv8a-linuxapp-gcc b/config/defconfig_arm64-armv8a-linuxapp-gcc
index 6321884c..9f327666 100644
--- a/config/defconfig_arm64-armv8a-linuxapp-gcc
+++ b/config/defconfig_arm64-armv8a-linuxapp-gcc
@@ -42,8 +42,15 @@ CONFIG_RTE_FORCE_INTRINSICS=y
CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_TOOLCHAIN_GCC=y
+# Maximum available cache line size in arm64 implementations.
+# Setting to maximum available cache line size in generic config
+# to address minimum DMA alignment across all arm64 implementations.
+CONFIG_RTE_CACHE_LINE_SIZE=128
+
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_LIBRTE_FM10K_PMD=n
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_SCHED_VECTOR=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 66df54c5..314a0ece 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -1,6 +1,7 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -40,3 +41,42 @@ CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
#
CONFIG_RTE_MAX_LCORE=8
CONFIG_RTE_MAX_NUMA_NODES=1
+CONFIG_RTE_CACHE_LINE_SIZE=64
+
+CONFIG_RTE_PKTMBUF_HEADROOM=256
+
+#
+# Compile Support Libraries for DPAA2
+#
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y
+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2"
+CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
+
+#
+# Compile NXP DPAA2 FSL-MC Bus
+#
+CONFIG_RTE_LIBRTE_FSLMC_BUS=y
+
+#
+# Compile burst-oriented NXP DPAA2 PMD driver
+#
+CONFIG_RTE_LIBRTE_DPAA2_PMD=y
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n
+
+#
+# Compile NXP DPAA2 crypto sec driver for CAAM HW
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=y
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
+
+#
+# Number of sessions to create in the session memory pool
+# on a single DPAA2 SEC device.
+#
+CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS=2048
diff --git a/config/defconfig_arm64-thunderx-linuxapp-gcc b/config/defconfig_arm64-thunderx-linuxapp-gcc
index a5b1e24c..f64da4cd 100644
--- a/config/defconfig_arm64-thunderx-linuxapp-gcc
+++ b/config/defconfig_arm64-thunderx-linuxapp-gcc
@@ -38,11 +38,7 @@ CONFIG_RTE_MAX_NUMA_NODES=2
CONFIG_RTE_MAX_LCORE=96
#
-# Compile Cavium Thunderx NICVF PMD driver
+# Compile PMD for octeontx sso event device
#
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=y
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
+CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=y
+CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG=n
diff --git a/config/defconfig_arm64-xgene1-linuxapp-gcc b/config/defconfig_arm64-xgene1-linuxapp-gcc
index f096166b..d8e54472 100644
--- a/config/defconfig_arm64-xgene1-linuxapp-gcc
+++ b/config/defconfig_arm64-xgene1-linuxapp-gcc
@@ -32,3 +32,4 @@
#include "defconfig_arm64-armv8a-linuxapp-gcc"
CONFIG_RTE_MACHINE="xgene1"
+CONFIG_RTE_CACHE_LINE_SIZE=64
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index 576d5435..9847bdb6 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -52,6 +52,11 @@ CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_IXGBE_INC_VECTOR=n
#
+# Solarflare PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+
+#
# AES-NI multi-buffer PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
@@ -70,3 +75,8 @@ CONFIG_RTE_LIBRTE_PMD_KASUMI=n
# ZUC PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_ZUC=n
+
+#
+# AVP PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index 6c902a3e..269e88e9 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -52,6 +52,11 @@ CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_IXGBE_INC_VECTOR=n
#
+# Solarflare PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+
+#
# AES-NI multi-buffer PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
@@ -70,3 +75,8 @@ CONFIG_RTE_LIBRTE_PMD_KASUMI=n
# ZUC PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_PMD_ZUC=n
+
+#
+# AVP PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/defconfig_ppc_64-power8-linuxapp-gcc b/config/defconfig_ppc_64-power8-linuxapp-gcc
index f953e61b..71e4c353 100644
--- a/config/defconfig_ppc_64-power8-linuxapp-gcc
+++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
@@ -49,10 +49,10 @@ CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
# Note: Initially, all of the PMD drivers compilation are turned off on Power
# Will turn on them only after the successful testing on Power
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
-CONFIG_RTE_LIBRTE_I40E_PMD=n
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_ENIC_PMD=n
CONFIG_RTE_LIBRTE_FM10K_PMD=n
-
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/config/defconfig_x86_64-native-linuxapp-icc b/config/defconfig_x86_64-native-linuxapp-icc
index bfa8a3da..872d1a1b 100644
--- a/config/defconfig_x86_64-native-linuxapp-icc
+++ b/config/defconfig_x86_64-native-linuxapp-icc
@@ -41,3 +41,8 @@ CONFIG_RTE_ARCH_64=y
CONFIG_RTE_TOOLCHAIN="icc"
CONFIG_RTE_TOOLCHAIN_ICC=y
+
+#
+# Solarflare PMD build is not supported using icc toolchain
+#
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
diff --git a/config/defconfig_x86_x32-native-linuxapp-gcc b/config/defconfig_x86_x32-native-linuxapp-gcc
index 0c26857c..19573cb5 100644
--- a/config/defconfig_x86_x32-native-linuxapp-gcc
+++ b/config/defconfig_x86_x32-native-linuxapp-gcc
@@ -45,3 +45,13 @@ CONFIG_RTE_TOOLCHAIN_GCC=y
# KNI is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_KNI=n
+
+#
+# Solarflare PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
+
+#
+# AVP PMD is not supported on 32-bit
+#
+CONFIG_RTE_LIBRTE_AVP_PMD=n
diff --git a/devtools/build-tags.sh b/devtools/build-tags.sh
new file mode 100755
index 00000000..0db406d6
--- /dev/null
+++ b/devtools/build-tags.sh
@@ -0,0 +1,233 @@
+#!/bin/sh -e
+# Generate tags or gtags or cscope or etags files
+#
+# BSD LICENSE
+#
+# Copyright 2017 Cavium Networks
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+verbose=false
+linux=true
+bsd=true
+x86_32=true
+x86_64=true
+ppc_64=true
+arm_32=true
+arm_64=true
+
+print_usage()
+{
+ echo "Usage: $(basename $0) [-h] [-v] tags|cscope|gtags|etags [config]"
+ echo "Valid configs are:"
+ make showconfigs | sed 's,^,\t,'
+}
+
+# Move to the root of the git tree
+cd $(dirname $0)/..
+
+while getopts hv ARG ; do
+ case $ARG in
+ v ) verbose=true ;;
+ h ) print_usage; exit 0 ;;
+ ? ) print_usage; exit 1 ;;
+ esac
+done
+shift $(($OPTIND - 1))
+
+#ignore version control files
+ignore="( -name .svn -o -name CVS -o -name .hg -o -name .git ) -prune -o"
+
+source_dirs="test app buildtools drivers examples lib"
+
+skip_bsd="( -name bsdapp ) -prune -o"
+skip_linux="( -name linuxapp ) -prune -o"
+skip_arch="( -name arch ) -prune -o"
+skip_sse="( -name *_sse*.[chS] ) -prune -o"
+skip_avx="( -name *_avx*.[chS] ) -prune -o"
+skip_neon="( -name *_neon*.[chS] ) -prune -o"
+skip_altivec="( -name *_altivec*.[chS] ) -prune -o"
+skip_arm64="( -name *arm64*.[chS] ) -prune -o"
+skip_x86="( -name *x86*.[chS] ) -prune -o"
+skip_32b_files="( -name *_32.h ) -prune -o"
+skip_64b_files="( -name *_64.h ) -prune -o"
+
+skiplist="$skip_bsd $skip_linux $skip_arch $skip_sse $skip_avx \
+ $skip_neon $skip_altivec $skip_x86 $skip_arm64"
+
+find_sources()
+{
+ find $1 $ignore $3 -name $2 -not -type l -print
+}
+
+common_sources()
+{
+ find_sources "$source_dirs" '*.[chS]' "$skiplist"
+}
+
+linux_sources()
+{
+ find_sources "lib/librte_eal/linuxapp" '*.[chS]'
+}
+
+bsd_sources()
+{
+ find_sources "lib/librte_eal/bsdapp" '*.[chS]'
+}
+
+arm_common()
+{
+ find_sources "lib/librte_eal/common/arch/arm" '*.[chS]'
+ find_sources "$source_dirs" '*neon*.[chS]'
+}
+
+arm_32_sources()
+{
+ arm_common
+ find_sources "lib/librte_eal/common/include/arch/arm" '*.[chS]' \
+ "$skip_64b_files"
+}
+
+arm_64_sources()
+{
+ arm_common
+ find_sources "lib/librte_eal/common/include/arch/arm" '*.[chS]' \
+ "$skip_32b_files"
+ find_sources "$source_dirs" '*arm64.[chS]'
+}
+
+x86_common()
+{
+ find_sources "lib/librte_eal/common/arch/x86" '*.[chS]'
+
+ find_sources "examples/performance-thread/common/arch/x86" '*.[chS]'
+ find_sources "$source_dirs" '*_sse*.[chS]'
+ find_sources "$source_dirs" '*_avx*.[chS]'
+ find_sources "$source_dirs" '*x86.[chS]'
+}
+
+x86_32_sources()
+{
+ x86_common
+ find_sources "lib/librte_eal/common/include/arch/x86" '*.[chS]' \
+ "$skip_64b_files"
+}
+
+x86_64_sources()
+{
+ x86_common
+ find_sources "lib/librte_eal/common/include/arch/x86" '*.[chS]' \
+ "$skip_32b_files"
+}
+
+ppc_64_sources()
+{
+ find_sources "lib/librte_eal/common/arch/ppc_64" '*.[chS]'
+ find_sources "lib/librte_eal/common/include/arch/ppc_64" '*.[chS]'
+ find_sources "$source_dirs" '*altivec*.[chS]'
+}
+
+check_valid_target()
+{
+ cfgfound=false
+ allconfigs=$(make showconfigs)
+ for cfg in $allconfigs ; do
+ if [ "$cfg" = "$1" ] ; then
+ cfgfound=true
+ fi
+ done
+ if ! $cfgfound ; then
+ echo "Invalid config: $1"
+ print_usage
+ exit 0
+ fi
+}
+
+if [ -n "$2" ]; then
+ check_valid_target $2
+
+ echo $2 | grep -q "linuxapp-" || linux=false
+ echo $2 | grep -q "bsdapp-" || bsd=false
+ echo $2 | grep -q "x86_64-" || x86_64=false
+ echo $2 | grep -q "arm-" || arm_32=false
+ echo $2 | grep -q "arm64-" || arm_64=false
+ echo $2 | grep -q "ppc_64-" || ppc_64=false
+ echo $2 | grep -q -e "i686-" -e "x32-" || x86_32=false
+fi
+
+all_sources()
+{
+ common_sources
+ if $linux ; then linux_sources ; fi
+ if $bsd ; then bsd_sources ; fi
+ if $x86_64 ; then x86_64_sources ; fi
+ if $x86_32 ; then x86_32_sources ; fi
+ if $ppc_64 ; then ppc_64_sources ; fi
+ if $arm_32 ; then arm_32_sources ; fi
+ if $arm_64 ; then arm_64_sources ; fi
+}
+
+show_flags()
+{
+ if $verbose ; then
+ echo "mode: $1"
+ echo "config: $2"
+ echo "linux: $linux"
+ echo "bsd: $bsd"
+ echo "x86_32: $x86_32"
+ echo "x86_64: $x86_64"
+ echo "ppc_64: $ppc_64"
+ echo "arm_32: $arm_32"
+ echo "arm_64: $arm_64"
+ fi
+}
+
+case "$1" in
+ "cscope")
+ show_flags $1 $2
+ all_sources > cscope.files
+ cscope -q -b -f cscope.out
+ ;;
+ "gtags")
+ show_flags $1 $2
+ all_sources | gtags -i -f -
+ ;;
+ "tags")
+ show_flags $1 $2
+ rm -f tags
+ all_sources | xargs ctags -a
+ ;;
+ "etags")
+ show_flags $1 $2
+ rm -f TAGS
+ all_sources | xargs etags -a
+ ;;
+ *)
+ echo "Invalid mode: $1"
+ print_usage
+ ;;
+esac
diff --git a/scripts/check-git-log.sh b/devtools/check-git-log.sh
index 5f8a9fc5..910daba4 100755
--- a/scripts/check-git-log.sh
+++ b/devtools/check-git-log.sh
@@ -47,12 +47,18 @@ if [ "$1" = '-h' -o "$1" = '--help' ] ; then
exit
fi
+selfdir=$(dirname $(readlink -f $0))
range=${1:-origin/master..}
+# convert -N to HEAD~N.. in order to comply with git-log-fixes.sh getopts
+if printf -- $range | grep -q '^-[0-9]\+' ; then
+ range="HEAD$(printf -- $range | sed 's,^-,~,').."
+fi
commits=$(git log --format='%h' --reverse $range)
headlines=$(git log --format='%s' --reverse $range)
bodylines=$(git log --format='%b' --reverse $range)
fixes=$(git log --format='%h %s' --reverse $range | grep -i ': *fix' | cut -d' ' -f1)
+stablefixes=$($selfdir/git-log-fixes.sh $range | sed '/(N\/A)$/d' | cut -d' ' -f2)
tags=$(git log --format='%b' --reverse $range | grep -i -e 'by *:' -e 'fix.*:')
bytag='\(Reported\|Suggested\|Signed-off\|Acked\|Reviewed\|Tested\)-by:'
@@ -81,7 +87,7 @@ bad=$(for commit in $commits ; do
if [ $(echo "$drvgrp" | wc -l) -gt 1 ] ; then
echo "$headline" | grep -v '^drivers:'
elif [ $(echo "$drv" | wc -l) -gt 1 ] ; then
- echo "$headline" | grep -v "^$drvgrp"
+ echo "$headline" | grep -v "^drivers/$drvgrp"
else
echo "$headline" | grep -v "^$drv"
fi
@@ -107,28 +113,31 @@ bad=$(echo "$headlines" | grep --color=always \
# check headline uppercase (Rx/Tx, VF, L2, MAC, Linux, ARM...)
bad=$(echo "$headlines" | grep -E --color=always \
- -e '\<(rx|tx|RX|TX)\>' \
- -e '\<[pv]f\>' \
- -e '\<[hsf]w\>' \
- -e '\<l[234]\>' \
+ -e ':.*\<(rx|tx|RX|TX)\>' \
+ -e ':.*\<[pv]f\>' \
+ -e ':.*\<[hsf]w\>' \
+ -e ':.*\<l[234]\>' \
-e ':.*\<api\>' \
-e ':.*\<arm\>' \
-e ':.*\<armv7\>' \
-e ':.*\<armv8\>' \
+ -e ':.*\<crc\>' \
-e ':.*\<dma\>' \
-e ':.*\<freebsd\>' \
-e ':.*\<linux\>' \
-e ':.*\<lro\>' \
+ -e ':.*\<lsc\>' \
-e ':.*\<mac\>' \
-e ':.*\<mtu\>' \
-e ':.*\<nic\>' \
+ -e ':.*\<nvm\>' \
-e ':.*\<numa\>' \
-e ':.*\<pci\>' \
-e ':.*\<pmd\>' \
-e ':.*\<rss\>' \
- -e ':.*\<tile-gx\>' \
- -e ':.*\<tilegx\>' \
- -e ':.*\<vlan\>' \
+ -e ':.*\<tso\>' \
+ -e ':.*\<[Vv]lan\>' \
+ -e ':.*\<vsi\>' \
| sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong headline lowercase:\n$bad\n"
@@ -164,12 +173,6 @@ bad=$(echo "$tags" |
sed 's,^.,\t&,')
[ -z "$bad" ] || printf "Wrong tag:\n$bad\n"
-# check blank line after last Fixes: tag
-bad=$(echo "$bodylines" |
- sed -n 'N;/\nFixes:/D;/\n$/D;/^Fixes:/P' |
- sed 's,^.,\t&,')
-[ -z "$bad" ] || printf "Missing blank line after 'Fixes' tag:\n$bad\n"
-
# check missing Fixes: tag
bad=$(for fix in $fixes ; do
git log --format='%b' -1 $fix | grep -q '^Fixes: ' ||
@@ -191,3 +194,10 @@ bad=$(for fixtag in $fixtags ; do
printf "$fixtag" | grep -v "^$good$"
done | sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong 'Fixes' reference:\n$bad\n"
+
+# check Cc: stable@dpdk.org for fixes
+bad=$(for fix in $stablefixes ; do
+ git log --format='%b' -1 $fix | grep -qi '^Cc: *stable@dpdk.org' ||
+ git log --format='\t%s' -1 $fix
+done)
+[ -z "$bad" ] || printf "Is it candidate for Cc: stable@dpdk.org backport?\n$bad\n"
diff --git a/scripts/check-includes.sh b/devtools/check-includes.sh
index d65adc6d..c4ec73f1 100755
--- a/scripts/check-includes.sh
+++ b/devtools/check-includes.sh
@@ -109,10 +109,12 @@ include_dir=${1:-build/include}
'rte_byteorder_64.h' \
'generic/*' \
'exec-env/*' \
+ 'rte_vhost.h' \
+ 'rte_eth_vhost.h' \
}
: ${IGNORE_CXX= \
+ 'rte_vhost.h' \
'rte_eth_vhost.h' \
- 'rte_virtio_net.h' \
}
temp_cc=/tmp/${0##*/}.$$.c
diff --git a/scripts/check-maintainers.sh b/devtools/check-maintainers.sh
index 69c879a8..69c879a8 100755
--- a/scripts/check-maintainers.sh
+++ b/devtools/check-maintainers.sh
diff --git a/scripts/checkpatches.sh b/devtools/checkpatches.sh
index 336cc7b4..a56c41a3 100755
--- a/scripts/checkpatches.sh
+++ b/devtools/checkpatches.sh
@@ -44,7 +44,8 @@ options="$options --show-types"
options="$options --ignore=LINUX_VERSION_CODE,FILE_PATH_CHANGES,\
VOLATILE,PREFER_PACKED,PREFER_ALIGNED,PREFER_PRINTF,\
PREFER_KERNEL_TYPES,BIT_MACRO,CONST_STRUCT,\
-SPLIT_STRING,LINE_SPACING,PARENTHESIS_ALIGNMENT,NETWORKING_BLOCK_COMMENT_STYLE,\
+SPLIT_STRING,LONG_LINE_STRING,\
+LINE_SPACING,PARENTHESIS_ALIGNMENT,NETWORKING_BLOCK_COMMENT_STYLE,\
NEW_TYPEDEFS,COMPARISON_TO_NULL"
print_usage () {
@@ -89,12 +90,12 @@ check () { # <patch> <commit> <title>
if [ -n "$1" ] ; then
report=$($DPDK_CHECKPATCH_PATH $options "$1" 2>/dev/null)
elif [ -n "$2" ] ; then
- report=$(git format-patch --no-stat --stdout -1 $commit |
+ report=$(git format-patch --find-renames --no-stat --stdout -1 $commit |
$DPDK_CHECKPATCH_PATH $options - 2>/dev/null)
else
report=$($DPDK_CHECKPATCH_PATH $options - 2>/dev/null)
fi
- [ $? -ne 0 ] || continue
+ [ $? -ne 0 ] || return 0
$verbose || printf '\n### %s\n\n' "$3"
printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p'
status=$(($status + 1))
diff --git a/scripts/cocci.sh b/devtools/cocci.sh
index 7acc256f..4ca5025f 100755
--- a/scripts/cocci.sh
+++ b/devtools/cocci.sh
@@ -33,7 +33,7 @@
# Apply coccinelle transforms.
SRCTREE=$(readlink -f $(dirname $0)/..)
-COCCI=$SRCTREE/scripts/cocci
+COCCI=$SRCTREE/devtools/cocci
[ -n "$SPATCH" ] || SPATCH=$(which spatch)
PATCH_LIST="$@"
diff --git a/scripts/cocci/mtod-offset.cocci b/devtools/cocci/mtod-offset.cocci
index 13134e9d..13134e9d 100644
--- a/scripts/cocci/mtod-offset.cocci
+++ b/devtools/cocci/mtod-offset.cocci
diff --git a/scripts/git-log-fixes.sh b/devtools/git-log-fixes.sh
index 4824c7f7..74049467 100755
--- a/scripts/git-log-fixes.sh
+++ b/devtools/git-log-fixes.sh
@@ -90,7 +90,7 @@ origin_version () # <origin_hash> ...
git rev-parse -q --verify $1 >&- || continue
# get version of this bug origin
local origver=$(commit_version $origin)
- local roothashes=$(origin_filter $origin)
+ local roothashes="$(origin_filter $origin)"
if [ -n "$roothashes" ] ; then
# look chained fix of fix recursively
local rootver="$(origin_version $roothashes)"
@@ -103,10 +103,21 @@ origin_version () # <origin_hash> ...
done | sort -uV | head -n1
}
+# print a marker for stable tag presence
+stable_tag () # <hash>
+{
+ if git log --format='%b' -1 $1 | grep -qi '^Cc: *stable@dpdk.org' ; then
+ echo 'S'
+ else
+ echo '-'
+ fi
+}
+
git log --oneline --reverse $range |
while read id headline ; do
origins=$(origin_filter $id)
- [ -n "$origins" ] || echo "$headline" | grep -q fix || continue
+ stable=$(stable_tag $id)
+ [ "$stable" = "S" ] || [ -n "$origins" ] || echo "$headline" | grep -q fix || continue
version=$(commit_version $id)
if [ -n "$origins" ] ; then
origver="$(origin_version $origins)"
@@ -116,5 +127,5 @@ while read id headline ; do
else
origver='N/A'
fi
- printf '%s %7s %s (%s)\n' $version $id "$headline" "$origver"
+ printf '%s %7s %s %s (%s)\n' $version $id $stable "$headline" "$origver"
done
diff --git a/scripts/load-devel-config b/devtools/load-devel-config
index 4f43cb35..4f43cb35 100644
--- a/scripts/load-devel-config
+++ b/devtools/load-devel-config
diff --git a/scripts/test-build.sh b/devtools/test-build.sh
index a9793097..61bdce7c 100755
--- a/scripts/test-build.sh
+++ b/devtools/test-build.sh
@@ -34,9 +34,11 @@ default_path=$PATH
# Load config options:
# - AESNI_MULTI_BUFFER_LIB_PATH
+# - ARMV8_CRYPTO_LIB_PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
+# - DPDK_DEP_ISAL_CRYPTO (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MOFED (y/[n])
# - DPDK_DEP_NUMA (y/[n])
@@ -64,7 +66,7 @@ print_help () {
options:
-h this help
-jX use X parallel jobs in "make"
- -s short test with only first config without examples/doc
+ -s short test only first config without tests|examples|doc
-v verbose build
config: defconfig[[~][+]option1[[~][+]option2...]]
@@ -119,6 +121,7 @@ reset_env ()
unset CROSS
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
+ unset DPDK_DEP_ISAL_CRYPTO
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MOFED
unset DPDK_DEP_NUMA
@@ -127,6 +130,7 @@ reset_env ()
unset DPDK_DEP_SZE
unset DPDK_DEP_ZLIB
unset AESNI_MULTI_BUFFER_LIB_PATH
+ unset ARMV8_CRYPTO_LIB_PATH
unset LIBSSO_SNOW3G_PATH
unset LIBSSO_KASUMI_PATH
unset LIBSSO_ZUC_PATH
@@ -174,9 +178,11 @@ config () # <directory> <target> <options>
sed -ri 's,(NFP_PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_PCAP" != y || \
sed -ri 's,(PCAP=)n,\1y,' $1/.config
+ test -z "$ARMV8_CRYPTO_LIB_PATH" || \
+ sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
- test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
+ test "$DPDK_DEP_ISAL_CRYPTO" != y || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config
@@ -188,7 +194,6 @@ config () # <directory> <target> <options>
sed -ri 's,(PMD_OPENSSL=)n,\1y,' $1/.config
test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config
- sed -ri 's,(KNI_VHOST.*=)n,\1y,' $1/.config
sed -ri 's,(SCHED_.*=)n,\1y,' $1/.config
build_config_hook $1 $2 $3
@@ -225,16 +230,15 @@ for conf in $configs ; do
make -j$J EXTRA_CFLAGS="$maxerr $DPDK_DEP_CFLAGS" \
EXTRA_LDFLAGS="$DPDK_DEP_LDFLAGS" $verbose O=$dir
! $short || break
+ echo "================== Build tests for $dir"
+ make test-build -j$J EXTRA_CFLAGS="$maxerr $DPDK_DEP_CFLAGS" \
+ EXTRA_LDFLAGS="$DPDK_DEP_LDFLAGS" $verbose O=$dir
echo "================== Build examples for $dir"
export RTE_SDK=$(pwd)
export RTE_TARGET=$dir
make -j$J -sC examples \
EXTRA_LDFLAGS="$DPDK_DEP_LDFLAGS" $verbose \
O=$(readlink -m $dir/examples)
- ! echo $target | grep -q '^x86_64' || \
- make -j$J -sC examples/performance-thread \
- EXTRA_LDFLAGS="$DPDK_DEP_LDFLAGS" $verbose \
- O=$(readlink -m $dir/examples/performance-thread)
unset RTE_TARGET
echo "################## $dir done."
unset dir
diff --git a/scripts/test-null.sh b/devtools/test-null.sh
index 30cd0b03..30cd0b03 100755
--- a/scripts/test-null.sh
+++ b/devtools/test-null.sh
diff --git a/scripts/validate-abi.sh b/devtools/validate-abi.sh
index 52e4e7ae..52e4e7ae 100755
--- a/scripts/validate-abi.sh
+++ b/devtools/validate-abi.sh
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 6675f965..f5f1f199 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -4,7 +4,7 @@ API {#index}
<!--
BSD LICENSE
- Copyright 2013 6WIND S.A.
+ Copyright 2013-2017 6WIND S.A.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -39,12 +39,20 @@ There are many libraries, so their headers may be grouped by topics:
[dev] (@ref rte_dev.h),
[ethdev] (@ref rte_ethdev.h),
[ethctrl] (@ref rte_eth_ctrl.h),
+ [rte_flow] (@ref rte_flow.h),
+ [rte_flow_driver] (@ref rte_flow_driver.h),
[cryptodev] (@ref rte_cryptodev.h),
+ [eventdev] (@ref rte_eventdev.h),
[devargs] (@ref rte_devargs.h),
+ [PCI] (@ref rte_pci.h)
+
+- **device specific**:
[bond] (@ref rte_eth_bond.h),
- [vhost] (@ref rte_virtio_net.h),
+ [vhost] (@ref rte_vhost.h),
[KNI] (@ref rte_kni.h),
- [PCI] (@ref rte_pci.h),
+ [ixgbe] (@ref rte_pmd_ixgbe.h),
+ [i40e] (@ref rte_pmd_i40e.h),
+ [crypto_scheduler] (@ref rte_cryptodev_scheduler.h)
- **memory**:
[memseg] (@ref rte_memory.h),
@@ -66,8 +74,10 @@ There are many libraries, so their headers may be grouped by topics:
- **CPU arch**:
[branch prediction] (@ref rte_branch_prediction.h),
[cache prefetch] (@ref rte_prefetch.h),
+ [SIMD] (@ref rte_vect.h),
[byte order] (@ref rte_byteorder.h),
- [CPU flags] (@ref rte_cpuflags.h)
+ [CPU flags] (@ref rte_cpuflags.h),
+ [I/O access] (@ref rte_io.h)
- **CPU multicore**:
[interrupts] (@ref rte_interrupts.h),
@@ -87,7 +97,8 @@ There are many libraries, so their headers may be grouped by topics:
[frag/reass] (@ref rte_ip_frag.h),
[LPM IPv4 route] (@ref rte_lpm.h),
[LPM IPv6 route] (@ref rte_lpm6.h),
- [ACL] (@ref rte_acl.h)
+ [ACL] (@ref rte_acl.h),
+ [EFD] (@ref rte_efd.h)
- **QoS**:
[metering] (@ref rte_meter.h),
@@ -136,10 +147,10 @@ There are many libraries, so their headers may be grouped by topics:
- **debug**:
[jobstats] (@ref rte_jobstats.h),
+ [pdump] (@ref rte_pdump.h),
[hexdump] (@ref rte_hexdump.h),
[debug] (@ref rte_debug.h),
[log] (@ref rte_log.h),
- [warnings] (@ref rte_warnings.h),
[errno] (@ref rte_errno.h)
- **misc**:
@@ -147,4 +158,7 @@ There are many libraries, so their headers may be grouped by topics:
[common] (@ref rte_common.h),
[ABI compat] (@ref rte_compat.h),
[keepalive] (@ref rte_keepalive.h),
+ [device metrics] (@ref rte_metrics.h),
+ [bitrate statistics] (@ref rte_bitrate.h),
+ [latency statistics] (@ref rte_latencystats.h),
[version] (@ref rte_version.h)
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index 9dc7ae5c..ca9194fe 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright 2013 6WIND S.A.
+# Copyright 2013-2017 6WIND S.A.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -30,27 +30,35 @@
PROJECT_NAME = DPDK
INPUT = doc/api/doxy-api-index.md \
- doc/api/examples.dox \
+ drivers/crypto/scheduler \
drivers/net/bonding \
+ drivers/net/i40e \
+ drivers/net/ixgbe \
lib/librte_eal/common/include \
lib/librte_eal/common/include/generic \
lib/librte_acl \
+ lib/librte_bitratestats \
lib/librte_cfgfile \
lib/librte_cmdline \
lib/librte_compat \
lib/librte_cryptodev \
lib/librte_distributor \
+ lib/librte_efd \
lib/librte_ether \
+ lib/librte_eventdev \
lib/librte_hash \
lib/librte_ip_frag \
lib/librte_jobstats \
lib/librte_kni \
lib/librte_kvargs \
+ lib/librte_latencystats \
lib/librte_lpm \
lib/librte_mbuf \
lib/librte_mempool \
lib/librte_meter \
+ lib/librte_metrics \
lib/librte_net \
+ lib/librte_pdump \
lib/librte_pipeline \
lib/librte_port \
lib/librte_power \
diff --git a/doc/api/examples.dox b/doc/api/examples.dox
deleted file mode 100644
index 1626852c..00000000
--- a/doc/api/examples.dox
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
-@page examples DPDK Example Programs
-
-@example bond/main.c
-@example cmdline/commands.c
-@example cmdline/main.c
-@example cmdline/parse_obj_list.c
-@example distributor/main.c
-@example dpdk_qat/crypto.c
-@example dpdk_qat/main.c
-@example ethtool/ethtool-app/ethapp.c
-@example ethtool/ethtool-app/main.c
-@example ethtool/lib/rte_ethtool.c
-@example exception_path/main.c
-@example helloworld/main.c
-@example ip_fragmentation/main.c
-@example ip_pipeline/config_check.c
-@example ip_pipeline/config_parse.c
-@example ip_pipeline/config_parse_tm.c
-@example ip_pipeline/cpu_core_map.c
-@example ip_pipeline/init.c
-@example ip_pipeline/main.c
-@example ip_pipeline/pipeline/pipeline_common_be.c
-@example ip_pipeline/pipeline/pipeline_common_fe.c
-@example ip_pipeline/pipeline/pipeline_firewall_be.c
-@example ip_pipeline/pipeline/pipeline_firewall.c
-@example ip_pipeline/pipeline/pipeline_flow_actions_be.c
-@example ip_pipeline/pipeline/pipeline_flow_actions.c
-@example ip_pipeline/pipeline/pipeline_flow_classification_be.c
-@example ip_pipeline/pipeline/pipeline_flow_classification.c
-@example ip_pipeline/pipeline/pipeline_master_be.c
-@example ip_pipeline/pipeline/pipeline_master.c
-@example ip_pipeline/pipeline/pipeline_passthrough_be.c
-@example ip_pipeline/pipeline/pipeline_passthrough.c
-@example ip_pipeline/pipeline/pipeline_routing_be.c
-@example ip_pipeline/pipeline/pipeline_routing.c
-@example ip_pipeline/thread.c
-@example ip_pipeline/thread_fe.c
-@example ip_reassembly/main.c
-@example ipv4_multicast/main.c
-@example kni/main.c
-@example l2fwd-crypto/main.c
-@example l2fwd-jobstats/main.c
-@example l2fwd-keepalive/main.c
-@example l2fwd/main.c
-@example l3fwd-acl/main.c
-@example l3fwd/main.c
-@example l3fwd-power/main.c
-@example l3fwd-vf/main.c
-@example link_status_interrupt/main.c
-@example load_balancer/config.c
-@example load_balancer/init.c
-@example load_balancer/main.c
-@example load_balancer/runtime.c
-@example multi_process/client_server_mp/mp_client/client.c
-@example multi_process/client_server_mp/mp_server/args.c
-@example multi_process/client_server_mp/mp_server/init.c
-@example multi_process/client_server_mp/mp_server/main.c
-@example multi_process/l2fwd_fork/flib.c
-@example multi_process/l2fwd_fork/main.c
-@example multi_process/simple_mp/main.c
-@example multi_process/simple_mp/mp_commands.c
-@example multi_process/symmetric_mp/main.c
-@example netmap_compat/bridge/bridge.c
-@example netmap_compat/lib/compat_netmap.c
-@example packet_ordering/main.c
-@example performance-thread/common/arch/x86/ctx.c
-@example performance-thread/common/lthread.c
-@example performance-thread/common/lthread_cond.c
-@example performance-thread/common/lthread_diag.c
-@example performance-thread/common/lthread_mutex.c
-@example performance-thread/common/lthread_sched.c
-@example performance-thread/common/lthread_tls.c
-@example performance-thread/l3fwd-thread/main.c
-@example performance-thread/pthread_shim/main.c
-@example performance-thread/pthread_shim/pthread_shim.c
-@example ptpclient/ptpclient.c
-@example qos_meter/main.c
-@example qos_meter/rte_policer.c
-@example qos_sched/app_thread.c
-@example qos_sched/args.c
-@example qos_sched/cfg_file.c
-@example qos_sched/cmdline.c
-@example qos_sched/init.c
-@example qos_sched/main.c
-@example qos_sched/stats.c
-@example quota_watermark/qw/args.c
-@example quota_watermark/qwctl/commands.c
-@example quota_watermark/qwctl/qwctl.c
-@example quota_watermark/qw/init.c
-@example quota_watermark/qw/main.c
-@example rxtx_callbacks/main.c
-@example skeleton/basicfwd.c
-@example tep_termination/main.c
-@example tep_termination/vxlan.c
-@example tep_termination/vxlan_setup.c
-@example timer/main.c
-@example vhost/main.c
-@example vhost_xen/main.c
-@example vhost_xen/vhost_monitor.c
-@example vhost_xen/xenstore_parse.c
-@example vmdq_dcb/main.c
-@example vmdq/main.c
-@example vm_power_manager/channel_manager.c
-@example vm_power_manager/channel_monitor.c
-@example vm_power_manager/guest_cli/main.c
-@example vm_power_manager/guest_cli/vm_power_cli_guest.c
-@example vm_power_manager/main.c
-@example vm_power_manager/power_manager.c
-@example vm_power_manager/vm_power_cli.c
-*/
diff --git a/doc/build-sdk-quick.txt b/doc/build-sdk-quick.txt
index 967ff095..8d410525 100644
--- a/doc/build-sdk-quick.txt
+++ b/doc/build-sdk-quick.txt
@@ -9,6 +9,8 @@ Build commands
install install optionally staged in DESTDIR
examples build examples for given targets (T=)
examples_clean clean examples for given targets (T=)
+ test compile tests and run basic unit tests
+ test-* run specific subset of unit tests
Build variables
EXTRA_CPPFLAGS preprocessor options
EXTRA_CFLAGS compiler options
@@ -20,8 +22,8 @@ Build variables
V verbose
D debug dependencies
O build directory (default: build/ - install T= default: ./)
- DESTDIR staging install directory
- prefix root install directory
+ DESTDIR staging install directory (default: empty)
+ prefix root install directory (default: /usr/local)
T target template - used with config or install
format: <arch-machine-execenv-toolchain>
templates in config/defconfig_*
diff --git a/doc/guides/conf.py b/doc/guides/conf.py
index 149bcdbc..c3cd0bd6 100644
--- a/doc/guides/conf.py
+++ b/doc/guides/conf.py
@@ -58,11 +58,15 @@ html_add_permalinks = ""
html_show_copyright = False
highlight_language = 'none'
-version = subprocess.check_output(['make', '-sRrC', '../../', 'showversion']).decode('utf-8').rstrip()
+version = subprocess.check_output(['make', '-sRrC', '../../', 'showversion'])
+version = version.decode('utf-8').rstrip()
release = version
master_doc = 'index'
+# Maximum feature description string length
+feature_str_len = 25
+
# Figures, tables and code-blocks automatically numbered if they have caption
numfig = True
@@ -75,7 +79,7 @@ latex_documents = [
]
# Latex directives to be included directly in the latex/pdf docs.
-latex_preamble = r"""
+custom_latex_preamble = r"""
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{helvet}
@@ -91,9 +95,10 @@ latex_elements = {
'classoptions': ',openany,oneside',
'babel': '\\usepackage[english]{babel}',
# customize Latex formatting
- 'preamble': latex_preamble
+ 'preamble': custom_latex_preamble
}
+
# Override the default Latex formatter in order to modify the
# code/verbatim blocks.
class CustomLatexFormatter(LatexFormatter):
@@ -117,12 +122,12 @@ man_pages = [("testpmd_app_ug/run_app", "testpmd",
("tools/devbind", "dpdk-devbind",
"check device status and bind/unbind them from drivers", "", 8)]
-######## :numref: fallback ########
+
+# ####### :numref: fallback ########
# The following hook functions add some simple handling for the :numref:
# directive for Sphinx versions prior to 1.3.1. The functions replace the
# :numref: reference with a link to the target (for all Sphinx doc types).
# It doesn't try to label figures/tables.
-
def numref_role(reftype, rawtext, text, lineno, inliner):
"""
Add a Sphinx role to handle numref references. Note, we can't convert
@@ -136,6 +141,7 @@ def numref_role(reftype, rawtext, text, lineno, inliner):
internal=True)
return [newnode], []
+
def process_numref(app, doctree, from_docname):
"""
Process the numref nodes once the doctree has been built and prior to
@@ -172,25 +178,24 @@ def process_numref(app, doctree, from_docname):
node.replace_self(newnode)
-def generate_nic_overview_table(output_filename):
+def generate_overview_table(output_filename, table_id, section, table_name, title):
"""
- Function to generate the NIC Overview Table from the ini files that define
- the features for each NIC.
+ Function to generate the Overview Table from the ini files that define
+ the features for each driver.
The default features for the table and their order is defined by the
'default.ini' file.
"""
- # Default worning string.
- warning = 'Warning generate_nic_overview_table()'
+ # Default warning string.
+ warning = 'Warning generate_overview_table()'
# Get the default features and order from the 'default.ini' file.
ini_path = path_join(dirname(output_filename), 'features')
config = configparser.ConfigParser()
config.optionxform = str
config.read(path_join(ini_path, 'default.ini'))
- default_section = 'Features'
- default_features = config.items(default_section)
+ default_features = config.items(section)
# Create a dict of the valid features to validate the other ini files.
valid_features = {}
@@ -200,7 +205,7 @@ def generate_nic_overview_table(output_filename):
valid_features[key] = ' '
max_feature_length = max(max_feature_length, len(key))
- # Get a list of NIC ini files, excluding 'default.ini'.
+ # Get a list of driver ini files, excluding 'default.ini'.
ini_files = [basename(file) for file in listdir(ini_path)
if file.endswith('.ini') and file != 'default.ini']
ini_files.sort()
@@ -220,7 +225,7 @@ def generate_nic_overview_table(output_filename):
header_names.append(name)
- # Create a dict of the defined features for each NIC from the ini files.
+ # Create a dict of the defined features for each driver from the ini files.
ini_data = {}
for ini_filename in ini_files:
config = configparser.ConfigParser()
@@ -231,14 +236,14 @@ def generate_nic_overview_table(output_filename):
ini_data[ini_filename] = valid_features.copy()
# Check for a valid ini section.
- if not config.has_section(default_section):
+ if not config.has_section(section):
print("{}: File '{}' has no [{}] secton".format(warning,
ini_filename,
- default_section))
+ section))
continue
# Check for valid features names.
- for name, value in config.items(default_section):
+ for name, value in config.items(section):
if name not in valid_features:
print("{}: Unknown feature '{}' in '{}'".format(warning,
name,
@@ -249,18 +254,17 @@ def generate_nic_overview_table(output_filename):
# Get the first letter only.
ini_data[ini_filename][name] = value[0]
- # Print out the RST NIC Overview table from the ini file data.
+ # Print out the RST Driver Overview table from the ini file data.
outfile = open(output_filename, 'w')
num_cols = len(header_names)
- print('.. table:: Features availability in networking drivers\n',
- file=outfile)
-
- print_table_header(outfile, num_cols, header_names)
+ print_table_css(outfile, table_id)
+ print('.. table:: ' + table_name + '\n', file=outfile)
+ print_table_header(outfile, num_cols, header_names, title)
print_table_body(outfile, num_cols, ini_files, ini_data, default_features)
-def print_table_header(outfile, num_cols, header_names):
+def print_table_header(outfile, num_cols, header_names, title):
""" Print the RST table header. The header names are vertical. """
print_table_divider(outfile, num_cols)
@@ -268,7 +272,7 @@ def print_table_header(outfile, num_cols, header_names):
for name in header_names:
line += ' ' + name[0]
- print_table_row(outfile, 'Feature', line)
+ print_table_row(outfile, title, line)
for i in range(1, 10):
line = ''
@@ -297,7 +301,7 @@ def print_table_body(outfile, num_cols, ini_files, ini_data, default_features):
def print_table_row(outfile, feature, line):
""" Print a single row of the table with fixed formatting. """
line = line.rstrip()
- print(' {:<20}{}'.format(feature, line), file=outfile)
+ print(' {:<{}}{}'.format(feature, feature_str_len, line), file=outfile)
def print_table_divider(outfile, num_cols):
@@ -306,13 +310,95 @@ def print_table_divider(outfile, num_cols):
column_dividers = ['='] * num_cols
line += ' '.join(column_dividers)
- feature = '=' * 20
+ feature = '=' * feature_str_len
print_table_row(outfile, feature, line)
+def print_table_css(outfile, table_id):
+ template = """
+.. raw:: html
+
+ <style>
+ .wy-nav-content {
+ opacity: .99;
+ }
+ table#idx {
+ cursor: default;
+ overflow: hidden;
+ }
+ table#idx th, table#idx td {
+ text-align: center;
+ }
+ table#idx th {
+ font-size: 80%;
+ white-space: pre-wrap;
+ vertical-align: top;
+ padding: 2px;
+ }
+ table#idx th:first-child {
+ vertical-align: bottom;
+ }
+ table#idx td {
+ font-size: 70%;
+ padding: 1px;
+ }
+ table#idx td:first-child {
+ padding-left: 1em;
+ text-align: left;
+ }
+ table#idx tr:nth-child(2n-1) td {
+ background-color: rgba(210, 210, 210, 0.2);
+ }
+ table#idx th:not(:first-child):hover,
+ table#idx td:not(:first-child):hover {
+ position: relative;
+ }
+ table#idx th:not(:first-child):hover::after,
+ table#idx td:not(:first-child):hover::after {
+ content: '';
+ height: 6000px;
+ top: -3000px;
+ width: 100%;
+ left: 0;
+ position: absolute;
+ z-index: -1;
+ background-color: #ffb;
+ }
+ table#idx tr:hover td {
+ background-color: #ffb;
+ }
+ </style>
+"""
+ print(template.replace("idx", "id%d" % (table_id)), file=outfile)
+
+
def setup(app):
- generate_nic_overview_table('doc/guides/nics/overview_table.txt')
+ table_file = dirname(__file__) + '/nics/overview_table.txt'
+ generate_overview_table(table_file, 1,
+ 'Features',
+ 'Features availability in networking drivers',
+ 'Feature')
+ table_file = dirname(__file__) + '/cryptodevs/overview_feature_table.txt'
+ generate_overview_table(table_file, 1,
+ 'Features',
+ 'Features availability in crypto drivers',
+ 'Feature')
+ table_file = dirname(__file__) + '/cryptodevs/overview_cipher_table.txt'
+ generate_overview_table(table_file, 2,
+ 'Cipher',
+ 'Cipher algorithms in crypto drivers',
+ 'Cipher algorithm')
+ table_file = dirname(__file__) + '/cryptodevs/overview_auth_table.txt'
+ generate_overview_table(table_file, 3,
+ 'Auth',
+ 'Authentication algorithms in crypto drivers',
+ 'Authentication algorithm')
+ table_file = dirname(__file__) + '/cryptodevs/overview_aead_table.txt'
+ generate_overview_table(table_file, 4,
+ 'AEAD',
+ 'AEAD algorithms in crypto drivers',
+ 'AEAD algorithm')
if LooseVersion(sphinx_version) < LooseVersion('1.3.1'):
print('Upgrade sphinx to version >= 1.3.1 for '
diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
index 1eb67f34..d8e4a0f9 100644
--- a/doc/guides/contributing/coding_style.rst
+++ b/doc/guides/contributing/coding_style.rst
@@ -603,22 +603,30 @@ In the DPDK environment, use the logging interface provided:
.. code-block:: c
- #define RTE_LOGTYPE_TESTAPP1 RTE_LOGTYPE_USER1
- #define RTE_LOGTYPE_TESTAPP2 RTE_LOGTYPE_USER2
+ /* register log types for this application */
+ int my_logtype1 = rte_log_register("myapp.log1");
+ int my_logtype2 = rte_log_register("myapp.log2");
- /* enable these logs type */
- rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1);
- rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 1);
+ /* set global log level to INFO */
+ rte_log_set_global_level(RTE_LOG_INFO);
+
+ /* only display messages higher than NOTICE for log2 (default
+ * is DEBUG) */
+ rte_log_set_level(my_logtype2, RTE_LOG_NOTICE);
+
+ /* enable all PMD logs (whose identifier string starts with "pmd") */
+ rte_log_set_level_regexp("pmd.*", RTE_LOG_DEBUG);
/* log in debug level */
- rte_set_log_level(RTE_LOG_DEBUG);
- RTE_LOG(DEBUG, TESTAPP1, "this is is a debug level message\n");
- RTE_LOG(INFO, TESTAPP1, "this is is a info level message\n");
- RTE_LOG(WARNING, TESTAPP1, "this is is a warning level message\n");
+ rte_log_set_global_level(RTE_LOG_DEBUG);
+ RTE_LOG(DEBUG, my_logtype1, "this is is a debug level message\n");
+ RTE_LOG(INFO, my_logtype1, "this is is a info level message\n");
+ RTE_LOG(WARNING, my_logtype1, "this is is a warning level message\n");
+ RTE_LOG(WARNING, my_logtype2, "this is is a debug level message (not displayed)\n");
/* log in info level */
- rte_set_log_level(RTE_LOG_INFO);
- RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
+ rte_log_set_global_level(RTE_LOG_INFO);
+ RTE_LOG(DEBUG, my_logtype1, "debug level message (not displayed)\n");
Branch Prediction
~~~~~~~~~~~~~~~~~
@@ -690,6 +698,7 @@ Control Statements
Python Code
-----------
-All python code should be compliant with `PEP8 (Style Guide for Python Code) <https://www.python.org/dev/peps/pep-0008/>`_.
+All Python code should work with Python 2.7+ and 3.2+ and be compliant with
+`PEP8 (Style Guide for Python Code) <https://www.python.org/dev/peps/pep-0008/>`_.
The ``pep8`` tool can be used for testing compliance with the guidelines.
diff --git a/doc/guides/contributing/design.rst b/doc/guides/contributing/design.rst
index bac3d1b4..88d3a433 100644
--- a/doc/guides/contributing/design.rst
+++ b/doc/guides/contributing/design.rst
@@ -158,3 +158,17 @@ cache bandwidth, memory bandwidth, etc) that depends on:
branches are usually required. When processing a burst of packets that have been
validated for header integrity, counting the number of bits set in a bitmask
might be needed.
+
+PF and VF Considerations
+------------------------
+
+The primary goal of DPDK is to provide a userspace dataplane. Managing VFs from
+a PF driver is a control plane feature and developers should generally rely on
+the Linux Kernel for that.
+
+Developers should work with the Linux Kernel community to get the required
+functionality upstream. PF functionality should only be added to DPDK for
+testing and prototyping purposes while the kernel work is ongoing. It should
+also be marked with an "EXPERIMENTAL" tag. If the functionality isn't
+upstreamable then a case can be made to maintain the PF functionality in DPDK
+without the EXPERIMENTAL tag.
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index 2cfb1a29..4c85da7b 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -332,7 +332,7 @@ Whitespace
Section Headers
~~~~~~~~~~~~~~~
-* Section headers should use the use the following underline formats::
+* Section headers should use the following underline formats::
Level 1 Heading
===============
@@ -380,12 +380,11 @@ Lists
#. Item one.
- #. Item two is a long line that is wrapped and then indented
- to match the start of the e first line.
-
#. Item two is a long line that is wrapped and then indented to match
the start of the previous line.
+ #. Item three.
+
* Definition lists can be written with or without a bullet::
* Item one.
@@ -458,7 +457,7 @@ Code and Literal block sections
For long literal lines that exceed that limit try to wrap the text at sensible locations.
For example a long command line could be documented like this and still work if copied directly from the docs::
- build/app/testpmd -c7 -n3 --vdev=net_pcap0,iface=eth0 \
+ build/app/testpmd -l 0-2 -n3 --vdev=net_pcap0,iface=eth0 \
--vdev=net_pcap1,iface=eth1 \
-- -i --nb-cores=2 --nb-ports=2 \
--total-num-mbufs=2048
@@ -477,7 +476,7 @@ Images
* `Inkscape <http://inkscape.org>`_ is the recommended graphics editor for creating the images.
Use some of the older images in ``doc/guides/prog_guide/img/`` as a template, for example ``mbuf1.svg``
- or ``ring-enqueue.svg``.
+ or ``ring-enqueue1.svg``.
* The SVG images should include a copyright notice, as an XML comment.
diff --git a/doc/guides/contributing/index.rst b/doc/guides/contributing/index.rst
index f6af317f..329b678a 100644
--- a/doc/guides/contributing/index.rst
+++ b/doc/guides/contributing/index.rst
@@ -10,4 +10,5 @@ Contributor's Guidelines
versioning
documentation
patches
+ stable
cheatsheet
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index 729aea71..84a5dab8 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -7,12 +7,12 @@ This document outlines the guidelines for submitting code to DPDK.
The DPDK development process is modelled (loosely) on the Linux Kernel development model so it is worth reading the
Linux kernel guide on submitting patches:
-`How to Get Your Change Into the Linux Kernel <http://www.kernel.org/doc/Documentation/SubmittingPatches>`_.
+`How to Get Your Change Into the Linux Kernel <https://www.kernel.org/doc/html/latest/process/submitting-patches.html>`_.
The rationale for many of the DPDK guidelines is explained in greater detail in the kernel guidelines.
The DPDK Development Process
------------------------------
+----------------------------
The DPDK development process has the following features:
@@ -20,7 +20,10 @@ The DPDK development process has the following features:
* There is a mailing list where developers submit patches.
* There are maintainers for hierarchical components.
* Patches are reviewed publicly on the mailing list.
-* Successfully reviewed patches are merged to the master branch of the repository.
+* Successfully reviewed patches are merged to the repository.
+* Patches should be sent to the target repository or sub-tree, see below.
+* All sub-repositories are merged into main repository for ``-rc1`` and ``-rc2`` versions of the release.
+* After the ``-rc2`` release all patches should target the main repository.
The mailing list for DPDK development is `dev@dpdk.org <http://dpdk.org/ml/archives/dev/>`_.
Contributors will need to `register for the mailing list <http://dpdk.org/ml/listinfo/dev>`_ in order to submit patches.
@@ -30,15 +33,81 @@ The development process requires some familiarity with the ``git`` version contr
Refer to the `Pro Git Book <http://www.git-scm.com/book/>`_ for further information.
+Maintainers and Sub-trees
+-------------------------
+
+The DPDK maintenance hierarchy is divided into a main repository ``dpdk`` and sub-repositories ``dpdk-next-*``.
+
+There are maintainers for the trees and for components within the tree.
+
+Trees and maintainers are listed in the ``MAINTAINERS`` file. For example::
+
+ Crypto Drivers
+ --------------
+ M: Some Name <some.name@email.com>
+ B: Another Name <another.name@email.com>
+ T: git://dpdk.org/next/dpdk-next-crypto
+
+ Intel AES-NI GCM PMD
+ M: Some One <some.one@email.com>
+ F: drivers/crypto/aesni_gcm/
+ F: doc/guides/cryptodevs/aesni_gcm.rst
+
+Where:
+
+* ``M`` is a tree or component maintainer.
+* ``B`` is a tree backup maintainer.
+* ``T`` is a repository tree.
+* ``F`` is a maintained file or directory.
+
+Additional details are given in the ``MAINTAINERS`` file.
+
+The role of the component maintainers is to:
+
+* Review patches for the component or delegate the review.
+ The review should be done, ideally, within 1 week of submission to the mailing list.
+* Add an ``acked-by`` to patches, or patchsets, that are ready for committing to a tree.
+* Reply to questions asked about the component.
+
+Component maintainers can be added or removed by submitting a patch to the ``MAINTAINERS`` file.
+Maintainers should have demonstrated a reasonable level of contributions or reviews to the component area.
+The maintainer should be confirmed by an ``ack`` from an established contributor.
+There can be more than one component maintainer if desired.
+
+The role of the tree maintainers is to:
+
+* Maintain the overall quality of their tree.
+ This can entail additional review, compilation checks or other tests deemed necessary by the maintainer.
+* Commit patches that have been reviewed by component maintainers and/or other contributors.
+ The tree maintainer should determine if patches have been reviewed sufficiently.
+* Ensure that patches are reviewed in a timely manner.
+* Prepare the tree for integration.
+* Ensure that there is a designated back-up maintainer and coordinate a handover for periods where the
+ tree maintainer can't perform their role.
+
+Tree maintainers can be added or removed by submitting a patch to the ``MAINTAINERS`` file.
+The proposer should justify the need for a new sub-tree and should have demonstrated a sufficient level of contributions in the area or to a similar area.
+The maintainer should be confirmed by an ``ack`` from an existing tree maintainer.
+Disagreements on trees or maintainers can be brought to the Technical Board.
+
+The backup maintainer for the master tree should be selected from the existing sub-tree maintainers from the project.
+The backup maintainer for a sub-tree should be selected from among the component maintainers within that sub-tree.
+
+
Getting the Source Code
-----------------------
-The source code can be cloned using either of the following::
+The source code can be cloned using either of the following:
- git clone git://dpdk.org/dpdk
+main repository::
+ git clone git://dpdk.org/dpdk
git clone http://dpdk.org/git/dpdk
+sub-repositories (`list <http://dpdk.org/browse/next>`_)::
+
+ git clone git://dpdk.org/next/dpdk-next-*
+ git clone http://dpdk.org/git/next/dpdk-next-*
Make your Changes
-----------------
@@ -124,7 +193,7 @@ Here are some guidelines for the body of a commit message:
git commit --signoff # or -s
The purpose of the signoff is explained in the
- `Developer's Certificate of Origin <http://www.kernel.org/doc/Documentation/SubmittingPatches>`_
+ `Developer's Certificate of Origin <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#developer-s-certificate-of-origin-1-1>`_
section of the Linux kernel guidelines.
.. Note::
@@ -157,13 +226,9 @@ Here are some guidelines for the body of a commit message:
* Use correct capitalization, punctuation and spelling.
-In addition to the ``Signed-off-by:`` name the commit messages can also have one or more of the following:
-
-* ``Reported-by:`` The reporter of the issue.
-* ``Tested-by:`` The tester of the change.
-* ``Reviewed-by:`` The reviewer of the change.
-* ``Suggested-by:`` The person who suggested the change.
-* ``Acked-by:`` When a previous version of the patch was acked and the ack is still relevant.
+In addition to the ``Signed-off-by:`` name the commit messages can also have
+tags for who reported, suggested, tested and reviewed the patch being
+posted. Please refer to the `Tested, Acked and Reviewed by`_ section.
Creating Patches
@@ -230,7 +295,7 @@ For example::
Checking the Patches
--------------------
-Patches should be checked for formatting and syntax issues using the ``checkpatches.sh`` script in the ``scripts``
+Patches should be checked for formatting and syntax issues using the ``checkpatches.sh`` script in the ``devtools``
directory of the DPDK repo.
This uses the Linux kernel development tool ``checkpatch.pl`` which can be obtained by cloning, and periodically,
updating the Linux kernel sources.
@@ -245,7 +310,7 @@ files, in order of preference::
Once the environment variable the script can be run as follows::
- scripts/checkpatches.sh ~/patch/
+ devtools/checkpatches.sh ~/patch/
The script usage is::
@@ -272,10 +337,10 @@ Where the range is a ``git log`` option.
Checking Compilation
--------------------
-Compilation of patches and changes should be tested using the the ``test-build.sh`` script in the ``scripts``
+Compilation of patches and changes should be tested using the the ``test-build.sh`` script in the ``devtools``
directory of the DPDK repo::
- scripts/test-build.sh x86_64-native-linuxapp-gcc+next+shared
+ devtools/test-build.sh x86_64-native-linuxapp-gcc+next+shared
The script usage is::
@@ -359,9 +424,57 @@ The options ``--annotate`` and ``confirm = always`` are recommended for checking
The Review Process
------------------
-The more work you put into the previous steps the easier it will be to get a patch accepted.
+Patches are reviewed by the community, relying on the experience and
+collaboration of the members to double-check each other's work. There are a
+number of ways to indicate that you have checked a patch on the mailing list.
+
+
+Tested, Acked and Reviewed by
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To indicate that you have interacted with a patch on the mailing list you
+should respond to the patch in an email with one of the following tags:
+
+ * Reviewed-by:
+ * Acked-by:
+ * Tested-by:
+ * Reported-by:
+ * Suggested-by:
+
+The tag should be on a separate line as follows::
+
+ tag-here: Name Surname <email@address.com>
+
+Each of these tags has a specific meaning. In general, the DPDK community
+follows the kernel usage of the tags. A short summary of the meanings of each
+tag is given here for reference:
+
+.. _statement: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#reviewer-s-statement-of-oversight
+
+``Reviewed-by:`` is a strong statement_ that the patch is an appropriate state
+for merging without any remaining serious technical issues. Reviews from
+community members who are known to understand the subject area and to perform
+thorough reviews will increase the likelihood of the patch getting merged.
+
+``Acked-by:`` is a record that the person named was not directly involved in
+the preparation of the patch but wishes to signify and record their acceptance
+and approval of it.
+
+``Tested-by:`` indicates that the patch has been successfully tested (in some
+environment) by the person named.
+
+``Reported-by:`` is used to acknowledge person who found or reported the bug.
+
+``Suggested-by:`` indicates that the patch idea was suggested by the named
+person.
+
+
+
+Steps to getting your patch merged
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The general cycle for patch review and acceptance is:
+The more work you put into the previous steps the easier it will be to get a
+patch accepted. The general cycle for patch review and acceptance is:
#. Submit the patch.
@@ -392,4 +505,20 @@ The general cycle for patch review and acceptance is:
#. In addition a patch will not be accepted if it doesn't address comments from a previous version with fixes or
valid arguments.
-#. Acked patches will be merged in the current or next merge window.
+#. It is the responsibility of a maintainer to ensure that patches are reviewed and to provide an ``ack`` or
+ ``nack`` of those patches as appropriate.
+
+#. Once a patch has been acked by the relevant maintainer, reviewers may still comment on it for a further
+ two weeks. After that time, the patch should be merged into the relevant git tree for the next release.
+ Additional notes and restrictions:
+
+ * Patches should be acked by a maintainer at least two days before the release merge
+ deadline, in order to make that release.
+ * For patches acked with less than two weeks to go to the merge deadline, all additional
+ comments should be made no later than two days before the merge deadline.
+ * After the appropriate time for additional feedback has passed, if the patch has not yet
+ been merged to the relevant tree by the committer, it should be treated as though it had,
+ in that any additional changes needed to it must be addressed by a follow-on patch, rather
+ than rework of the original.
+ * Trivial patches may be merged sooner than described above at the tree committer's
+ discretion.
diff --git a/doc/guides/contributing/stable.rst b/doc/guides/contributing/stable.rst
new file mode 100644
index 00000000..d52ec477
--- /dev/null
+++ b/doc/guides/contributing/stable.rst
@@ -0,0 +1,99 @@
+.. stable_lts_releases:
+
+DPDK Stable Releases and Long Term Support
+==========================================
+
+This section sets out the guidelines for the DPDK Stable Releases and the DPDK
+Long Term Support releases (LTS).
+
+
+Introduction
+------------
+
+The purpose of the DPDK Stable Releases is to maintain releases of DPDK with
+backported fixes over an extended period of time. This provides downstream
+consumers of DPDK with a stable target on which to base applications or
+packages.
+
+The Long Term Support release (LTS) is a designation applied to a Stable
+Release to indicate longer term support.
+
+
+Stable Releases
+---------------
+
+Any major release of DPDK can be designated as a Stable Release if a
+maintainer volunteers to maintain it.
+
+A Stable Release is used to backport fixes from an ``N`` release back to an
+``N-1`` release, for example, from 16.11 to 16.07.
+
+The duration of a stable is one complete release cycle (3 months). It can be
+longer, up to 1 year, if a maintainer continues to support the stable branch,
+or if users supply backported fixes, however the explicit commitment should be
+for one release cycle.
+
+The release cadence is determined by the maintainer based on the number of
+bugfixes and the criticality of the bugs. Releases should be coordinated with
+the validation engineers to ensure that a tagged release has been tested.
+
+
+LTS Release
+-----------
+
+A stable release can be designated as an LTS release based on community
+agreement and a commitment from a maintainer. An LTS release will have a
+maintenance duration of 2 years.
+
+The current DPDK LTS release is 16.11.
+
+It is anticipated that there will be at least 4 releases per year of the LTS
+or approximately 1 every 3 months. However, the cadence can be shorter or
+longer depending on the number and criticality of the backported
+fixes. Releases should be coordinated with the validation engineers to ensure
+that a tagged release has been tested.
+
+
+What changes should be backported
+---------------------------------
+
+Backporting should be limited to bug fixes.
+
+Features should not be backported to stable releases. It may be acceptable, in
+limited cases, to back port features for the LTS release where:
+
+* There is a justifiable use case (for example a new PMD).
+* The change is non-invasive.
+* The work of preparing the backport is done by the proposer.
+* There is support within the community.
+
+
+The Stable Mailing List
+-----------------------
+
+The Stable and LTS release are coordinated on the stable@dpdk.org mailing
+list.
+
+All fix patches to the master branch that are candidates for backporting
+should also be CCed to the `stable@dpdk.org <http://dpdk.org/ml/listinfo/stable>`_
+mailing list.
+
+
+Releasing
+---------
+
+A Stable Release will be released by:
+
+* Tagging the release with YY.MM.n (year, month, number).
+* Uploading a tarball of the release to dpdk.org.
+* Sending an announcement to the `announce@dpdk.org <http://dpdk.org/ml/listinfo/announce>`_
+ list.
+
+Stable releases are available on the `dpdk.org download page <http://dpdk.org/download>`_.
+
+
+ABI
+---
+
+The Stable Release should not be seen as a way of breaking or circumventing
+the DPDK ABI policy.
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index 08e2e217..8aaf370d 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -133,6 +133,31 @@ The macros exported are:
fully qualified function ``p``, so that if a symbol becomes versioned, it
can still be mapped back to the public symbol name.
+Setting a Major ABI version
+---------------------------
+
+Downstreams might want to provide different DPDK releases at the same time to
+support multiple consumers of DPDK linked against older and newer sonames.
+
+Also due to the interdependencies that DPDK libraries can have applications
+might end up with an executable space in which multiple versions of a library
+are mapped by ld.so.
+
+Think of LibA that got an ABI bump and LibB that did not get an ABI bump but is
+depending on LibA.
+
+.. note::
+
+ Application
+ \-> LibA.old
+ \-> LibB.new -> LibA.new
+
+That is a conflict which can be avoided by setting ``CONFIG_RTE_MAJOR_ABI``.
+If set, the value of ``CONFIG_RTE_MAJOR_ABI`` overwrites all - otherwise per
+library - versions defined in the libraries ``LIBABIVER``.
+An example might be ``CONFIG_RTE_MAJOR_ABI=16.11`` which will make all libraries
+``librte<?>.so.16.11`` instead of ``librte<?>.so.<LIBABIVER>``.
+
Examples of ABI Macro use
-------------------------
@@ -457,7 +482,7 @@ versions of the symbol.
Running the ABI Validator
-------------------------
-The ``scripts`` directory in the DPDK source tree contains a utility program,
+The ``devtools`` directory in the DPDK source tree contains a utility program,
``validate-abi.sh``, for validating the DPDK ABI based on the Linux `ABI
Compliance Checker
<http://ispras.linuxbase.org/index.php/ABI_compliance_checker>`_.
@@ -470,7 +495,7 @@ utilities which can be installed via a package manager. For example::
The syntax of the ``validate-abi.sh`` utility is::
- ./scripts/validate-abi.sh <REV1> <REV2> <TARGET>
+ ./devtools/validate-abi.sh <REV1> <REV2> <TARGET>
Where ``REV1`` and ``REV2`` are valid gitrevisions(7)
https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
@@ -479,13 +504,13 @@ on the local repo and target is the usual DPDK compilation target.
For example::
# Check between the previous and latest commit:
- ./scripts/validate-abi.sh HEAD~1 HEAD x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh HEAD~1 HEAD x86_64-native-linuxapp-gcc
# Check between two tags:
- ./scripts/validate-abi.sh v2.0.0 v2.1.0 x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh v2.0.0 v2.1.0 x86_64-native-linuxapp-gcc
# Check between git master and local topic-branch "vhost-hacking":
- ./scripts/validate-abi.sh master vhost-hacking x86_64-native-linuxapp-gcc
+ ./devtools/validate-abi.sh master vhost-hacking x86_64-native-linuxapp-gcc
After the validation script completes (it can take a while since it need to
compile both tags) it will create compatibility reports in the
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index 04bf43c2..84cdc52a 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -32,10 +32,8 @@ AES-NI GCM Crypto Poll Mode Driver
The AES-NI GCM PMD (**librte_pmd_aesni_gcm**) provides poll mode crypto driver
-support for utilizing Intel multi buffer library (see AES-NI Multi-buffer PMD documentation
-to learn more about it, including installation).
-
-The AES-NI GCM PMD has current only been tested on Fedora 21 64-bit with gcc.
+support for utilizing Intel ISA-L crypto library, which provides operation acceleration
+through the AES-NI instruction sets for AES-GCM authenticated cipher algorithm.
Features
--------
@@ -49,24 +47,29 @@ Cipher algorithms:
Authentication algorithms:
* RTE_CRYPTO_AUTH_AES_GCM
+* RTE_CRYPTO_AUTH_AES_GMAC
+
+Installation
+------------
+
+To build DPDK with the AESNI_GCM_PMD the user is required to install
+the ``libisal_crypto`` library in the build environment.
+For download and more details please visit `<https://github.com/01org/isa-l_crypto>`_.
Initialization
--------------
In order to enable this virtual crypto PMD, user must:
-* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
- the library was extracted.
-
-* Build the multi buffer library (go to Installation section in AES-NI MB PMD documentation).
+* Install the ISA-L crypto library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_aesni_gcm") within the application.
+* Call rte_vdev_init("crypto_aesni_gcm") within the application.
-* Use --vdev="crypto_aesni_gcm" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_aesni_gcm" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -81,14 +84,11 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_gcm,socket_id=1,max_nb_sessions=128"
Limitations
-----------
-* Chained mbufs are not supported.
+* Chained mbufs are supported but only out-of-place (destination mbuf must be contiguous).
* Hash only is not supported.
* Cipher only is not supported.
-* Only in-place is currently supported (destination address is the same as source address).
-* Only supports session-oriented API implementation (session-less APIs are not supported).
-* Not performance tuned.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index e812e957..ecb52a10 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -27,7 +27,7 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-AESN-NI Multi Buffer Crytpo Poll Mode Driver
+AESN-NI Multi Buffer Crypto Poll Mode Driver
============================================
@@ -51,36 +51,53 @@ Cipher algorithms:
* RTE_CRYPTO_CIPHER_AES128_CTR
* RTE_CRYPTO_CIPHER_AES192_CTR
* RTE_CRYPTO_CIPHER_AES256_CTR
+* RTE_CRYPTO_CIPHER_AES_DOCSISBPI
Hash algorithms:
+* RTE_CRYPTO_HASH_MD5_HMAC
* RTE_CRYPTO_HASH_SHA1_HMAC
+* RTE_CRYPTO_HASH_SHA224_HMAC
* RTE_CRYPTO_HASH_SHA256_HMAC
+* RTE_CRYPTO_HASH_SHA384_HMAC
* RTE_CRYPTO_HASH_SHA512_HMAC
+* RTE_CRYPTO_HASH_AES_XCBC_HMAC
Limitations
-----------
* Chained mbufs are not supported.
-* Hash only is not supported.
-* Cipher only is not supported.
* Only in-place is currently supported (destination address is the same as source address).
* Only supports session-oriented API implementation (session-less APIs are not supported).
-* Not performance tuned.
Installation
------------
-To build DPDK with the AESNI_MB_PMD the user is required to download the mult-
-buffer library from `here <https://downloadcenter.intel.com/download/22972>`_
-and compile it on their user system before building DPDK. When building the
-multi-buffer library it is necessary to have YASM package installed and also
-requires the overriding of YASM path when building, as a path is hard coded in
-the Makefile of the release package.
+To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
+library from `here <https://github.com/01org/intel-ipsec-mb>`_
+and compile it on their user system before building DPDK.
+The latest version of the library supported by this PMD is v0.45, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.45.zip>`_.
.. code-block:: console
- make YASM=/usr/bin/yasm
+ make
+
+As a reference, the following table shows a mapping between the past DPDK versions
+and the Multi-Buffer library version supported by them:
+
+.. _table_aesni_mb_versions:
+
+.. table:: DPDK and Multi-Buffer library version compatibility
+
+ ============= ============================
+ DPDK version Multi-buffer library version
+ ============= ============================
+ 2.2 - 16.11 0.43 - 0.44
+ 17.02 0.44
+ 17.05 0.45
+ ============= ============================
+
Initialization
--------------
@@ -96,9 +113,9 @@ In order to enable this virtual crypto PMD, user must:
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_aesni_mb") within the application.
+* Call rte_vdev_init("crypto_aesni_mb") within the application.
-* Use --vdev="crypto_aesni_mb" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_aesni_mb" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -113,4 +130,4 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_aesni_mb,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_aesni_mb,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/armv8.rst b/doc/guides/cryptodevs/armv8.rst
new file mode 100644
index 00000000..de63793f
--- /dev/null
+++ b/doc/guides/cryptodevs/armv8.rst
@@ -0,0 +1,98 @@
+.. BSD LICENSE
+ Copyright (C) Cavium networks Ltd. 2017.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium networks nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+ARMv8 Crypto Poll Mode Driver
+=============================
+
+This code provides the initial implementation of the ARMv8 crypto PMD.
+The driver uses ARMv8 cryptographic extensions to process chained crypto
+operations in an optimized way. The core functionality is provided by
+a low-level library, written in the assembly code.
+
+Features
+--------
+
+ARMv8 Crypto PMD has support for the following algorithm pairs:
+
+Supported cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+
+Supported authentication algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+
+Installation
+------------
+
+In order to enable this virtual crypto PMD, user must:
+
+* Download ARMv8 crypto library source code from
+ `here <https://github.com/caviumnetworks/armv8_crypto>`_
+
+* Export the environmental variable ARMV8_CRYPTO_LIB_PATH with
+ the path where the ``armv8_crypto`` library was downloaded
+ or cloned.
+
+* Build the library by invoking:
+
+.. code-block:: console
+
+ make -C $ARMV8_CRYPTO_LIB_PATH/
+
+* Set CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=y in
+ config/defconfig_arm64-armv8a-linuxapp-gcc
+
+The corresponding device can be created only if the following features
+are supported by the CPU:
+
+* ``RTE_CPUFLAG_AES``
+* ``RTE_CPUFLAG_SHA1``
+* ``RTE_CPUFLAG_SHA2``
+* ``RTE_CPUFLAG_NEON``
+
+Initialization
+--------------
+
+User can use app/test application to check how to use this PMD and to verify
+crypto processing.
+
+Test name is cryptodev_sw_armv8_autotest.
+For performance test cryptodev_sw_armv8_perftest can be used.
+
+Limitations
+-----------
+
+* Maximum number of sessions is 2048.
+* Only chained operations are supported.
+* AES-128-CBC is the only supported cipher variant.
+* Cipher input data has to be a multiple of 16 bytes.
+* Digest input data has to be a multiple of 8 bytes.
diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
new file mode 100644
index 00000000..becb910e
--- /dev/null
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -0,0 +1,232 @@
+.. BSD LICENSE
+ Copyright(c) 2016 NXP. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+NXP DPAA2 CAAM (DPAA2_SEC)
+==========================
+
+The DPAA2_SEC PMD provides poll mode crypto driver support for NXP DPAA2 CAAM
+hardware accelerator.
+
+Architecture
+------------
+
+SEC is the SOC's security engine, which serves as NXP's latest cryptographic
+acceleration and offloading hardware. It combines functions previously
+implemented in separate modules to create a modular and scalable acceleration
+and assurance engine. It also implements block encryption algorithms, stream
+cipher algorithms, hashing algorithms, public key algorithms, run-time
+integrity checking, and a hardware random number generator. SEC performs
+higher-level cryptographic operations than previous NXP cryptographic
+accelerators. This provides significant improvement to system level performance.
+
+DPAA2_SEC is one of the hardware resource in DPAA2 Architecture. More information
+on DPAA2 Architecture is described in :ref:`dpaa2_overview`.
+
+DPAA2_SEC PMD is one of DPAA2 drivers which interacts with Management Complex (MC)
+portal to access the hardware object - DPSECI. The MC provides access to create,
+discover, connect, configure and destroy dpseci objects in DPAA2_SEC PMD.
+
+DPAA2_SEC PMD also uses some of the other hardware resources like buffer pools,
+queues, queue portals to store and to enqueue/dequeue data to the hardware SEC.
+
+DPSECI objects are detected by PMD using a resource container called DPRC (like
+in :ref:`dpaa2_overview`).
+
+For example:
+
+.. code-block:: console
+
+ DPRC.1 (bus)
+ |
+ +--+--------+-------+-------+-------+---------+
+ | | | | | |
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1 DPSECI.1
+ DPMCP.2 DPIO.2 DPNI.2 DPMAC.2 DPSECI.2
+ DPMCP.3
+
+Implementation
+--------------
+
+SEC provides platform assurance by working with SecMon, which is a companion
+logic block that tracks the security state of the SOC. SEC is programmed by
+means of descriptors (not to be confused with frame descriptors (FDs)) that
+indicate the operations to be performed and link to the message and
+associated data. SEC incorporates two DMA engines to fetch the descriptors,
+read the message data, and write the results of the operations. The DMA
+engine provides a scatter/gather capability so that SEC can read and write
+data scattered in memory. SEC may be configured by means of software for
+dynamic changes in byte ordering. The default configuration for this version
+of SEC is little-endian mode.
+
+A block diagram similar to dpaa2 NIC is shown below to show where DPAA2_SEC
+fits in the DPAA2 Bus model
+
+.. code-block:: console
+
+
+ +----------------+
+ | DPDK DPAA2_SEC |
+ | PMD |
+ +----------------+ +------------+
+ | MC SEC object |.......| Mempool |
+ . . . . . . . . . | (DPSECI) | | (DPBP) |
+ . +---+---+--------+ +-----+------+
+ . ^ | .
+ . | |<enqueue, .
+ . | | dequeue> .
+ . | | .
+ . +---+---V----+ .
+ . . . . . . . . . . .| DPIO driver| .
+ . . | (DPIO) | .
+ . . +-----+------+ .
+ . . | QBMAN | .
+ . . | Driver | .
+ +----+------+-------+ +-----+----- | .
+ | dpaa2 bus | | .
+ | VFIO fslmc-bus |....................|.........................
+ | | |
+ | /bus/fslmc | |
+ +-------------------+ |
+ |
+ ========================== HARDWARE =====|=======================
+ DPIO
+ |
+ DPSECI---DPBP
+ =========================================|========================
+
+
+
+Features
+--------
+
+The DPAA2 PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES128_CBC``
+* ``RTE_CRYPTO_CIPHER_AES192_CBC``
+* ``RTE_CRYPTO_CIPHER_AES256_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+
+Supported DPAA2 SoCs
+--------------------
+
+* LS2080A/LS2040A
+* LS2084A/LS2044A
+* LS2088A/LS2048A
+* LS1088A/LS1048A
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+* Hash followed by Cipher mode is not supported
+* Only supports the session-oriented API implementation (session-less APIs are not supported).
+
+Prerequisites
+-------------
+
+DPAA2_SEC driver has similar pre-requisites as described in :ref:`dpaa2_overview`.
+The following dependencies are not part of DPDK and must be installed separately:
+
+* **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for the family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+* **DPDK Helper Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK helper repository.
+
+ `DPDK Helper Scripts <https://github.com/qoriq-open-source/dpdk-helper>`_.
+
+Currently supported by DPDK:
+
+* NXP SDK **2.0+**.
+* MC Firmware version **10.0.0** and higher.
+* Supported architectures: **arm64 LE**.
+
+* Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+Basic DPAA2 config file options are described in :ref:`dpaa2_overview`.
+In addition to those, the following options can be modified in the ``config`` file
+to enable DPAA2_SEC PMD.
+
+Please note that enabling debugging options may affect system performance.
+
+* ``CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC`` (default ``n``)
+ By default it is only enabled in defconfig_arm64-dpaa2-* config.
+ Toggle compilation of the ``librte_pmd_dpaa2_sec`` driver.
+
+* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT`` (default ``n``)
+ Toggle display of initialization related driver messages
+
+* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER`` (default ``n``)
+ Toggle display of driver runtime messages
+
+* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX`` (default ``n``)
+ Toggle display of receive fast path run-time message
+
+* ``CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS``
+ By default it is set as 2048 in defconfig_arm64-dpaa2-* config.
+ It indicates Number of sessions to create in the session memory pool
+ on a single DPAA2 SEC device.
+
+Installations
+-------------
+To compile the DPAA2_SEC PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-dpaa2-linuxapp-gcc install
diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini
new file mode 100644
index 00000000..5d9e119d
--- /dev/null
+++ b/doc/guides/cryptodevs/features/aesni_gcm.ini
@@ -0,0 +1,27 @@
+;
+; Supported features of the 'aesni_gcm' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+CPU AESNI = Y
+
+;
+; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
+;
+[Cipher]
+
+;
+; Supported authentication algorithms of the 'aesni_gcm' crypto driver.
+;
+[Auth]
+AES GMAC = Y
+
+;
+; Supported AEAD algorithms of the 'aesni_gcm' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
new file mode 100644
index 00000000..03d8485d
--- /dev/null
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -0,0 +1,41 @@
+;
+; Supported features of the 'aesni_mb' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+CPU SSE = Y
+CPU AVX = Y
+CPU AVX2 = Y
+CPU AVX512 = Y
+CPU AESNI = Y
+
+;
+; Supported crypto algorithms of the 'aesni_mb' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+AES DOCSIS BPI = Y
+;
+; Supported authentication algorithms of the 'aesni_mb' crypto driver.
+;
+[Auth]
+MD5 HMAC = Y
+SHA1 HMAC = Y
+SHA224 HMAC = Y
+SHA256 HMAC = Y
+SHA384 HMAC = Y
+SHA512 HMAC = Y
+AES XCBC MAC = Y
+
+;
+; Supported AEAD algorithms of the 'aesni_mb' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/armv8.ini b/doc/guides/cryptodevs/features/armv8.ini
new file mode 100644
index 00000000..1e104771
--- /dev/null
+++ b/doc/guides/cryptodevs/features/armv8.ini
@@ -0,0 +1,28 @@
+;
+; Supported features of the 'armv8' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+CPU NEON = Y
+CPU ARM CE = Y
+
+;
+; Supported crypto algorithms of the 'armv8' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+
+;
+; Supported authentication algorithms of the 'armv8' crypto driver.
+;
+[Auth]
+SHA1 HMAC = Y
+SHA256 HMAC = Y
+
+;
+; Supported AEAD algorithms of the 'armv8' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
new file mode 100644
index 00000000..0926887b
--- /dev/null
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -0,0 +1,70 @@
+;
+; Features of a default crypto driver.
+;
+; This file defines the features that are valid for inclusion in
+; the other driver files and also the order that they appear in
+; the features table in the documentation.
+;
+[Features]
+Symmetric crypto =
+Asymmetric crypto =
+Sym operation chaining =
+HW Accelerated =
+CPU SSE =
+CPU AVX =
+CPU AVX2 =
+CPU AVX512 =
+CPU AESNI =
+CPU NEON =
+CPU ARM CE =
+
+;
+; Supported crypto algorithms of a default crypto driver.
+;
+[Cipher]
+NULL =
+AES CBC (128) =
+AES CBC (192) =
+AES CBC (256) =
+AES CTR (128) =
+AES CTR (192) =
+AES CTR (256) =
+AES DOCSIS BPI =
+3DES CBC =
+3DES CTR =
+DES CBC =
+DES DOCSIS BPI =
+SNOW3G UEA2 =
+KASUMI F8 =
+ZUC EEA3 =
+
+;
+; Supported authentication algorithms of a default crypto driver.
+;
+[Auth]
+NULL =
+MD5 =
+MD5 HMAC =
+SHA1 =
+SHA1 HMAC =
+SHA224 =
+SHA224 HMAC =
+SHA256 =
+SHA256 HMAC =
+SHA384 =
+SHA384 HMAC =
+SHA512 =
+SHA512 HMAC =
+AES XCBC MAC =
+AES GMAC =
+SNOW3G UIA2 =
+KASUMI F9 =
+ZUC EIA3 =
+
+;
+; Supported AEAD algorithms of a default crypto driver.
+;
+[AEAD]
+AES GCM (128) =
+AES GCM (192) =
+AES GCM (256) =
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
new file mode 100644
index 00000000..db0ea4f9
--- /dev/null
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -0,0 +1,34 @@
+;
+; Supported features of the 'dpaa2_sec' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+HW Accelerated = Y
+
+;
+; Supported crypto algorithms of the 'dpaa2_sec' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+3DES CBC = Y
+
+;
+; Supported authentication algorithms of the 'dpaa2_sec' crypto driver.
+;
+[Auth]
+MD5 HMAC = Y
+SHA1 HMAC = Y
+SHA224 HMAC = Y
+SHA256 HMAC = Y
+SHA384 HMAC = Y
+SHA512 HMAC = Y
+
+;
+; Supported AEAD algorithms of the 'openssl' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/kasumi.ini b/doc/guides/cryptodevs/features/kasumi.ini
new file mode 100644
index 00000000..0e138f5a
--- /dev/null
+++ b/doc/guides/cryptodevs/features/kasumi.ini
@@ -0,0 +1,24 @@
+;
+; Supported features of the 'kasumi' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'kasumi' crypto driver.
+;
+[Cipher]
+KASUMI F8 = Y
+;
+; Supported authentication algorithms of the 'kasumi' crypto driver.
+;
+[Auth]
+KASUMI F9 = Y
+
+;
+; Supported AEAD algorithms of the 'kasumi' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/null.ini b/doc/guides/cryptodevs/features/null.ini
new file mode 100644
index 00000000..523c453c
--- /dev/null
+++ b/doc/guides/cryptodevs/features/null.ini
@@ -0,0 +1,25 @@
+;
+; Supported features of the 'null' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'null' crypto driver.
+;
+[Cipher]
+NULL = Y
+
+;
+; Supported authentication algorithms of the 'null' crypto driver.
+;
+[Auth]
+NULL = Y
+
+;
+; Supported AEAD algorithms of the 'null' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/openssl.ini b/doc/guides/cryptodevs/features/openssl.ini
new file mode 100644
index 00000000..aeb2a500
--- /dev/null
+++ b/doc/guides/cryptodevs/features/openssl.ini
@@ -0,0 +1,47 @@
+;
+; Supported features of the 'openssl' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'openssl' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+3DES CBC = Y
+3DES CTR = Y
+DES DOCSIS BPI = Y
+;
+; Supported authentication algorithms of the 'openssl' crypto driver.
+;
+[Auth]
+MD5 = Y
+MD5 HMAC = Y
+SHA1 = Y
+SHA1 HMAC = Y
+SHA224 = Y
+SHA224 HMAC = Y
+SHA256 = Y
+SHA256 HMAC = Y
+SHA384 = Y
+SHA384 HMAC = Y
+SHA512 = Y
+SHA512 HMAC = Y
+AES GMAC = Y
+
+;
+; Supported AEAD algorithms of the 'openssl' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
new file mode 100644
index 00000000..40da8985
--- /dev/null
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -0,0 +1,53 @@
+;
+; Supported features of the 'qat' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+HW Accelerated = Y
+
+;
+; Supported crypto algorithms of the 'qat' crypto driver.
+;
+[Cipher]
+NULL = Y
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+3DES CBC = Y
+3DES CTR = Y
+DES CBC = Y
+SNOW3G UEA2 = Y
+KASUMI F8 = Y
+AES DOCSIS BPI = Y
+DES DOCSIS BPI = Y
+ZUC EEA3 = Y
+;
+; Supported authentication algorithms of the 'qat' crypto driver.
+;
+[Auth]
+NULL = Y
+MD5 HMAC = Y
+SHA1 HMAC = Y
+SHA224 HMAC = Y
+SHA256 HMAC = Y
+SHA384 HMAC = Y
+SHA512 HMAC = Y
+AES GMAC = Y
+SNOW3G UIA2 = Y
+KASUMI F9 = Y
+AES XCBC MAC = Y
+ZUC EIA3 = Y
+
+;
+; Supported AEAD algorithms of the 'qat' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/snow3g.ini b/doc/guides/cryptodevs/features/snow3g.ini
new file mode 100644
index 00000000..27713617
--- /dev/null
+++ b/doc/guides/cryptodevs/features/snow3g.ini
@@ -0,0 +1,24 @@
+;
+; Supported features of the 'snow3g' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'snow3g' crypto driver.
+;
+[Cipher]
+SNOW3G UEA2 = Y
+;
+; Supported authentication algorithms of the 'snow3g' crypto driver.
+;
+[Auth]
+SNOW3G UIA2 = Y
+
+;
+; Supported AEAD algorithms of the 'snow3g' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/zuc.ini b/doc/guides/cryptodevs/features/zuc.ini
new file mode 100644
index 00000000..5bb02afd
--- /dev/null
+++ b/doc/guides/cryptodevs/features/zuc.ini
@@ -0,0 +1,24 @@
+;
+; Supported features of the 'zuc' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'zuc' crypto driver.
+;
+[Cipher]
+ZUC EEA3 = Y
+;
+; Supported authentication algorithms of the 'zuc' crypto driver.
+;
+[Auth]
+ZUC EIA3 = Y
+
+;
+; Supported AEAD algorithms of the 'zuc' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/img/scheduler-overview.svg b/doc/guides/cryptodevs/img/scheduler-overview.svg
new file mode 100644
index 00000000..82bb775b
--- /dev/null
+++ b/doc/guides/cryptodevs/img/scheduler-overview.svg
@@ -0,0 +1,277 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export scheduler-fan.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="6.81229in" height="3.40992in"
+ viewBox="0 0 490.485 245.514" xml:space="preserve" color-interpolation-filters="sRGB" class="st10">
+ <v:documentProperties v:langID="1033" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#fec000;fill-opacity:0.25;filter:url(#filter_2);stroke:#fec000;stroke-opacity:0.25}
+ .st3 {fill:#cc3399;stroke:#ff8c00;stroke-width:3}
+ .st4 {fill:#ffffff;font-family:Calibri;font-size:1.33333em}
+ .st5 {fill:#ff9900;stroke:#ff8c00;stroke-width:3}
+ .st6 {fill:#ffffff;font-family:Calibri;font-size:1.33333em;font-weight:bold}
+ .st7 {fill:#ffc000;stroke:#ffffff;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.5}
+ .st8 {marker-end:url(#mrkr4-40);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st9 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st10 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend4">
+ <path d="M 2 1 L 0 0 L 2 -1 L 2 1 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr4-40" class="st9" v:arrowType="4" v:arrowSize="2" v:setback="5.36" refX="-5.36" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend4" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="8.50394"
+ v:shadowOffsetY="-8.50394"/>
+ <v:layer v:name="Connector" v:index="0"/>
+ <g id="shape31-1" v:mID="31" v:groupContext="shape" transform="translate(4.15435,-179.702)">
+ <title>Rounded Rectangle.55</title>
+ <desc>User Application</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="240.743" cy="214.108" width="481.49" height="62.8119"/>
+ <g id="shadow31-2" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M11.05 245.51 L470.43 245.51 A11.0507 11.0507 -180 0 0 481.49 234.46 L481.49 193.75 A11.0507 11.0507 -180
+ 0 0 470.43 182.7 L11.05 182.7 A11.0507 11.0507 -180 0 0 -0 193.75 L0 234.46 A11.0507 11.0507 -180 0
+ 0 11.05 245.51 Z" class="st2"/>
+ </g>
+ <path d="M11.05 245.51 L470.43 245.51 A11.0507 11.0507 -180 0 0 481.49 234.46 L481.49 193.75 A11.0507 11.0507 -180 0
+ 0 470.43 182.7 L11.05 182.7 A11.0507 11.0507 -180 0 0 -0 193.75 L0 234.46 A11.0507 11.0507 -180 0 0 11.05
+ 245.51 Z" class="st3"/>
+ <text x="187.04" y="218.91" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>User Application</text> </g>
+ <g id="shape135-7" v:mID="135" v:groupContext="shape" transform="translate(4.15435,-6.4728)">
+ <title>Rounded Rectangle.135</title>
+ <desc>Cryptodev</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="72.0307" cy="230.549" width="144.07" height="29.9308"/>
+ <g id="shadow135-8" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180
+ 0 0 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0
+ 0 3.31 245.51 Z" class="st2"/>
+ </g>
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180 0 0
+ 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0 0 3.31 245.51
+ Z" class="st5"/>
+ <text x="38.46" y="235.35" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Cryptodev</text> </g>
+ <g id="shape136-13" v:mID="136" v:groupContext="shape" transform="translate(172.866,-6.4728)">
+ <title>Rounded Rectangle.136</title>
+ <desc>Cryptodev</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="72.0307" cy="230.549" width="144.07" height="29.9308"/>
+ <g id="shadow136-14" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180
+ 0 0 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0
+ 0 3.31 245.51 Z" class="st2"/>
+ </g>
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180 0 0
+ 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0 0 3.31 245.51
+ Z" class="st5"/>
+ <text x="38.46" y="235.35" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Cryptodev</text> </g>
+ <g id="shape137-19" v:mID="137" v:groupContext="shape" transform="translate(341.578,-6.4728)">
+ <title>Rounded Rectangle.137</title>
+ <desc>Cryptodev</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.045922865409173):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="72.0307" cy="230.549" width="144.07" height="29.9308"/>
+ <g id="shadow137-20" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180
+ 0 0 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0
+ 0 3.31 245.51 Z" class="st2"/>
+ </g>
+ <path d="M3.31 245.51 L140.76 245.51 A3.30639 3.30639 -180 0 0 144.06 242.21 L144.06 218.89 A3.30639 3.30639 -180 0 0
+ 140.76 215.58 L3.31 215.58 A3.30639 3.30639 -180 0 0 0 218.89 L0 242.21 A3.30639 3.30639 -180 0 0 3.31 245.51
+ Z" class="st5"/>
+ <text x="38.46" y="235.35" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Cryptodev</text> </g>
+ <g id="group139-25" transform="translate(4.15435,-66.8734)" v:mID="139" v:groupContext="group">
+ <title>Sheet.139</title>
+ <g id="shape33-26" v:mID="33" v:groupContext="shape">
+ <title>Rounded Rectangle.40</title>
+ <desc>Cryptodev Scheduler</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15348434426561):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197" v:verticalAlign="0"/>
+ <v:textRect cx="240.743" cy="204.056" width="481.49" height="82.916"/>
+ <g id="shadow33-27" v:groupContext="shadow" v:shadowOffsetX="0.3456" v:shadowOffsetY="-1.9728" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.3456,1.9728)" class="st1">
+ <path d="M11.05 245.51 L470.43 245.51 A11.0507 11.0507 -180 0 0 481.49 234.46 L481.49 173.65 A11.0507 11.0507
+ -180 0 0 470.43 162.6 L11.05 162.6 A11.0507 11.0507 -180 0 0 0 173.65 L0 234.46 A11.0507 11.0507
+ -180 0 0 11.05 245.51 Z" class="st2"/>
+ </g>
+ <path d="M11.05 245.51 L470.43 245.51 A11.0507 11.0507 -180 0 0 481.49 234.46 L481.49 173.65 A11.0507 11.0507 -180
+ 0 0 470.43 162.6 L11.05 162.6 A11.0507 11.0507 -180 0 0 0 173.65 L0 234.46 A11.0507 11.0507 -180 0 0
+ 11.05 245.51 Z" class="st5"/>
+ <text x="171.72" y="181" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Cryptodev Scheduler</text> </g>
+ <g id="shape138-32" v:mID="138" v:groupContext="shape" transform="translate(24.6009,-12.5889)">
+ <title>Rounded Rectangle.138</title>
+ <desc>Crypto Op Distribution Mechanism</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15748031496063):24"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.13780016666367):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.13780016666367):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.13780016666367):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.13780016666367):1"/>
+ <v:ud v:nameU="msvThemeColors" v:val="VT0(254):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="216.142" cy="230.549" width="432.29" height="29.9308"/>
+ <path d="M9.92 245.51 L422.36 245.51 A9.92145 9.92145 -180 0 0 432.28 235.59 L432.28 225.51 A9.92145 9.92145 -180
+ 0 0 422.36 215.58 L9.92 215.58 A9.92145 9.92145 -180 0 0 0 225.51 L0 235.59 A9.92145 9.92145 -180 0
+ 0 9.92 245.51 Z" class="st7"/>
+ <text x="103.11" y="235.35" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Crypto Op Distribution Mechanism</text> </g>
+ </g>
+ <g id="shape140-35" v:mID="140" v:groupContext="shape" v:layerMember="0" transform="translate(234.378,-149.789)">
+ <title>Dynamic connector.229</title>
+ <path d="M7.09 245.51 L7.09 223.64" class="st8"/>
+ </g>
+ <g id="shape141-41" v:mID="141" v:groupContext="shape" v:layerMember="0" transform="translate(248.551,-179.702)">
+ <title>Dynamic connector.141</title>
+ <path d="M7.09 245.51 L7.09 267.39" class="st8"/>
+ </g>
+ <g id="shape142-46" v:mID="142" v:groupContext="shape" v:layerMember="0" transform="translate(71.3856,-35.6203)">
+ <title>Dynamic connector.142</title>
+ <path d="M7.09 245.51 L7.09 223.64" class="st8"/>
+ </g>
+ <g id="shape143-51" v:mID="143" v:groupContext="shape" v:layerMember="0" transform="translate(85.5588,-65.5333)">
+ <title>Dynamic connector.143</title>
+ <path d="M7.09 245.51 L7.09 267.39" class="st8"/>
+ </g>
+ <g id="shape144-56" v:mID="144" v:groupContext="shape" v:layerMember="0" transform="translate(234.378,-35.6203)">
+ <title>Dynamic connector.144</title>
+ <path d="M7.09 245.51 L7.09 223.64" class="st8"/>
+ </g>
+ <g id="shape145-61" v:mID="145" v:groupContext="shape" v:layerMember="0" transform="translate(248.551,-65.5333)">
+ <title>Dynamic connector.145</title>
+ <path d="M7.09 245.51 L7.09 267.39" class="st8"/>
+ </g>
+ <g id="shape146-66" v:mID="146" v:groupContext="shape" v:layerMember="0" transform="translate(397.37,-34.837)">
+ <title>Dynamic connector.146</title>
+ <path d="M7.09 245.51 L7.09 223.64" class="st8"/>
+ </g>
+ <g id="shape147-71" v:mID="147" v:groupContext="shape" v:layerMember="0" transform="translate(411.543,-64.75)">
+ <title>Dynamic connector.147</title>
+ <path d="M7.09 245.51 L7.09 267.39" class="st8"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index a6a9f23c..361b82dd 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2015 - 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2015 - 2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -38,9 +38,12 @@ Crypto Device Drivers
overview
aesni_mb
aesni_gcm
+ armv8
+ dpaa2_sec
kasumi
openssl
null
+ scheduler
snow3g
qat
zuc
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index 90d8e7b3..bff9321e 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -90,9 +90,9 @@ In order to enable this virtual crypto PMD, user must:
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_kasumi") within the application.
+* Call rte_vdev_init("crypto_kasumi") within the application.
-* Use --vdev="crypto_kasumi" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_kasumi" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -107,4 +107,4 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_kasumi,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_kasumi,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/null.rst b/doc/guides/cryptodevs/null.rst
index ec5bc70d..4a3bfdfd 100644
--- a/doc/guides/cryptodevs/null.rst
+++ b/doc/guides/cryptodevs/null.rst
@@ -76,9 +76,9 @@ Initialization
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_null") within the application.
+* Call rte_vdev_init("crypto_null") within the application.
-* Use --vdev="crypto_null" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_null" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -93,4 +93,4 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_null,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_null,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst
index f1c39bac..e3419151 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -49,6 +49,7 @@ Supported cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES_CTR``
* ``RTE_CRYPTO_CIPHER_3DES_CTR``
* ``RTE_CRYPTO_CIPHER_AES_GCM``
+* ``RTE_CRYPTO_CIPHER_DES_DOCSISBPI``
Supported authentication algorithms:
* ``RTE_CRYPTO_AUTH_AES_GMAC``
@@ -98,7 +99,7 @@ To verify real traffic l2fwd-crypto example can be used with this command:
.. code-block:: console
- sudo ./build/l2fwd-crypto -c 0x3 -n 4 --vdev "crypto_openssl"
+ sudo ./build/l2fwd-crypto -l 0-1 -n 4 --vdev "crypto_openssl"
--vdev "crypto_openssl"-- -p 0x3 --chain CIPHER_HASH
--cipher_op ENCRYPT --cipher_algo AES_CBC
--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
@@ -112,6 +113,7 @@ Limitations
-----------
* Maximum number of sessions is 2048.
-* Chained mbufs are not supported.
+* Chained mbufs are supported only for source mbuf (destination must be
+ contiguous).
* Hash only is not supported for GCM and GMAC.
* Cipher only is not supported for GCM and GMAC.
diff --git a/doc/guides/cryptodevs/overview.rst b/doc/guides/cryptodevs/overview.rst
index d612f71b..6764d0d9 100644
--- a/doc/guides/cryptodevs/overview.rst
+++ b/doc/guides/cryptodevs/overview.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
@@ -11,7 +11,7 @@
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
- * Neither the name of Intel Corporation nor the names of its
+ * Neither the name of 6WIND S.A. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
@@ -28,68 +28,32 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Crypto Device Supported Functionality Matrices
-----------------------------------------------
+==============================================
Supported Feature Flags
+-----------------------
-.. csv-table::
- :header: "Feature Flags", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
- :stub-columns: 1
+.. _table_crypto_pmd_features:
- "RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO",x,x,x,x,x,x
- "RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO",,,,,,
- "RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING",x,x,x,x,x,x
- "RTE_CRYPTODEV_FF_CPU_SSE",,,x,x,x,x
- "RTE_CRYPTODEV_FF_CPU_AVX",,,x,x,x,x
- "RTE_CRYPTODEV_FF_CPU_AVX2",,,x,x,,
- "RTE_CRYPTODEV_FF_CPU_AESNI",,,x,x,,
- "RTE_CRYPTODEV_FF_HW_ACCELERATED",x,,,,,
+.. include:: overview_feature_table.txt
Supported Cipher Algorithms
+---------------------------
-.. csv-table::
- :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
- :stub-columns: 1
+.. _table_crypto_pmd_cipher_algos:
- "NULL",,x,,,,
- "AES_CBC_128",x,,x,,,
- "AES_CBC_192",x,,x,,,
- "AES_CBC_256",x,,x,,,
- "AES_CTR_128",x,,x,,,
- "AES_CTR_192",x,,x,,,
- "AES_CTR_256",x,,x,,,
- "SNOW3G_UEA2",x,,,,x,
- "KASUMI_F8",,,,,,x
+.. include:: overview_cipher_table.txt
Supported Authentication Algorithms
+-----------------------------------
-.. csv-table::
- :header: "Cipher Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
- :stub-columns: 1
-
- "NONE",,x,,,,
- "MD5",,,,,,
- "MD5_HMAC",,,x,,,
- "SHA1",,,,,,
- "SHA1_HMAC",x,,x,,,
- "SHA224",,,,,,
- "SHA224_HMAC",,,x,,,
- "SHA256",,,,,,
- "SHA256_HMAC",x,,x,,,
- "SHA384",,,,,,
- "SHA384_HMAC",,,x,,,
- "SHA512",,,,,,
- "SHA512_HMAC",x,,x,,,
- "AES_XCBC",x,,x,,,
- "SNOW3G_UIA2",x,,,,x,
- "KASUMI_F9",,,,,,x
+.. _table_crypto_pmd_auth_algos:
+
+.. include:: overview_auth_table.txt
Supported AEAD Algorithms
+-------------------------
-.. csv-table::
- :header: "AEAD Algorithms", "qat", "null", "aesni_mb", "aesni_gcm", "snow3g", "kasumi"
- :stub-columns: 1
+.. _table_crypto_pmd_aead_algos:
- "AES_GCM_128",x,,x,,,
- "AES_GCM_192",x,,,,,
- "AES_GCM_256",x,,,,,
+.. include:: overview_aead_table.txt
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 52a9ae35..21b56fc6 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -30,9 +30,13 @@
Intel(R) QuickAssist (QAT) Crypto Poll Mode Driver
==================================================
-The QAT PMD provides poll mode crypto driver support for **Intel QuickAssist
-Technology DH895xxC**, **Intel QuickAssist Technology C62x** and
-**Intel QuickAssist Technology C3xxx** hardware accelerator.
+The QAT PMD provides poll mode crypto driver support for the following
+hardware accelerator devices:
+
+* ``Intel QuickAssist Technology DH895xCC``
+* ``Intel QuickAssist Technology C62x``
+* ``Intel QuickAssist Technology C3xxx``
+* ``Intel QuickAssist Technology D15xx``
Features
@@ -54,6 +58,10 @@ Cipher algorithms:
* ``RTE_CRYPTO_CIPHER_AES_GCM``
* ``RTE_CRYPTO_CIPHER_NULL``
* ``RTE_CRYPTO_CIPHER_KASUMI_F8``
+* ``RTE_CRYPTO_CIPHER_DES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_DOCSISBPI``
+* ``RTE_CRYPTO_CIPHER_DES_DOCSISBPI``
+* ``RTE_CRYPTO_CIPHER_ZUC_EEA3``
Hash algorithms:
@@ -68,349 +76,291 @@ Hash algorithms:
* ``RTE_CRYPTO_AUTH_NULL``
* ``RTE_CRYPTO_AUTH_KASUMI_F9``
* ``RTE_CRYPTO_AUTH_AES_GMAC``
+* ``RTE_CRYPTO_AUTH_ZUC_EIA3``
Limitations
-----------
-* Chained mbufs are not supported.
* Hash only is not supported except SNOW 3G UIA2 and KASUMI F9.
-* Cipher only is not supported except SNOW 3G UEA2, KASUMI F8 and 3DES.
* Only supports the session-oriented API implementation (session-less APIs are not supported).
* SNOW 3G (UEA2) and KASUMI (F8) supported only if cipher length, cipher offset fields are byte-aligned.
* SNOW 3G (UIA2) and KASUMI (F9) supported only if hash length, hash offset fields are byte-aligned.
* No BSD support as BSD QAT kernel driver not available.
+* ZUC EEA3/EIA3 is not supported by dh895xcc devices
+* Maximum additional authenticated data (AAD) for GCM is 240 bytes long.
Installation
------------
-To use the DPDK QAT PMD an SRIOV-enabled QAT kernel driver is required. The
-VF devices exposed by this driver will be used by QAT PMD.
-
-To enable QAT in DPDK, follow the instructions mentioned in
-http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html
+To enable QAT in DPDK, follow the instructions for modifying the compile-time
+configuration file as described `here <http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html>`_.
-Quick instructions as follows:
+Quick instructions are as follows:
.. code-block:: console
+ cd to the top-level DPDK directory
make config T=x86_64-native-linuxapp-gcc
sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_QAT\)=n,\1=y,' build/.config
make
-If you are running on kernel 4.4 or greater, see instructions for
-`Installation using kernel.org driver`_ below. If you are on a kernel earlier
-than 4.4, see `Installation using 01.org QAT driver`_.
-
-For **Intel QuickAssist Technology C62x** and **Intel QuickAssist Technology C3xxx**
-device, kernel 4.5 or greater is needed.
-See instructions for `Installation using kernel.org driver`_ below.
-
-
-Installation using 01.org QAT driver
-------------------------------------
-
-NOTE: There is no driver available for **Intel QuickAssist Technology C62x** and
-**Intel QuickAssist Technology C3xxx** devices on 01.org.
-
-Download the latest QuickAssist Technology Driver from `01.org
-<https://01.org/packet-processing/intel%C2%AE-quickassist-technology-drivers-and-patches>`_
-Consult the *Getting Started Guide* at the same URL for further information.
+To use the DPDK QAT PMD an SRIOV-enabled QAT kernel driver is required. The VF
+devices exposed by this driver will be used by the QAT PMD. The devices and
+available kernel drivers and device ids are :
-The steps below assume you are:
+.. _table_qat_pmds_drivers:
-* Building on a platform with one ``DH895xCC`` device.
-* Using package ``qatmux.l.2.3.0-34.tgz``.
-* On Fedora21 kernel ``3.17.4-301.fc21.x86_64``.
+.. table:: QAT devices and drivers
-In the BIOS ensure that SRIOV is enabled and VT-d is disabled.
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
+ | Device | Driver | Kernel Module | Pci Driver | PF Did | Num PFs | Vf Did | VFs per PF |
+ +==========+========+===============+============+========+=========+========+============+
+ | DH895xCC | 01.org | icp_qa_al | n/a | 435 | 1 | 443 | 32 |
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
+ | DH895xCC | 4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 |
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
+ | C62x | 4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 |
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
+ | C3xxx | 4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 |
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
+ | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 |
+ +----------+--------+---------------+------------+--------+---------+--------+------------+
-Uninstall any existing QAT driver, for example by running:
-* ``./installer.sh uninstall`` in the directory where originally installed.
+The ``Driver`` column indicates either the Linux kernel version in which
+support for this device was introduced or a driver available on Intel's 01.org
+website. There are both linux and 01.org kernel drivers available for some
+devices. p = release pending.
-* or ``rmmod qat_dh895xcc; rmmod intel_qat``.
-
-Build and install the SRIOV-enabled QAT driver::
-
- mkdir /QAT
- cd /QAT
- # copy qatmux.l.2.3.0-34.tgz to this location
- tar zxof qatmux.l.2.3.0-34.tgz
-
- export ICP_WITHOUT_IOMMU=1
- ./installer.sh install QAT1.6 host
-
-You can use ``cat /proc/icp_dh895xcc_dev0/version`` to confirm the driver is correctly installed.
-You can use ``lspci -d:443`` to confirm the bdf of the 32 VF devices are available per ``DH895xCC`` device.
-
-To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
-
-**Note**: If using a later kernel and the build fails with an error relating to ``strict_stroul`` not being available apply the following patch:
-
-.. code-block:: diff
-
- /QAT/QAT1.6/quickassist/utilities/downloader/Target_CoreLibs/uclo/include/linux/uclo_platform.h
- + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,5)
- + #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; if (kstrtoul((str), (base), (num))) printk("Error strtoull convert %s\n", str); }
- + #else
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
- #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; if (strict_strtoull((str), (base), (num))) printk("Error strtoull convert %s\n", str); }
- #else
- #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
- #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; strict_strtoll((str), (base), (num));}
- #else
- #define STR_TO_64(str, base, num, endPtr) \
- do { \
- if (str[0] == '-') \
- { \
- *(num) = -(simple_strtoull((str+1), &(endPtr), (base))); \
- }else { \
- *(num) = simple_strtoull((str), &(endPtr), (base)); \
- } \
- } while(0)
- + #endif
- #endif
- #endif
-
-
-If the build fails due to missing header files you may need to do following:
-
-* ``sudo yum install zlib-devel``
-* ``sudo yum install openssl-devel``
-
-If the build or install fails due to mismatching kernel sources you may need to do the following:
-
-* ``sudo yum install kernel-headers-`uname -r```
-* ``sudo yum install kernel-src-`uname -r```
-* ``sudo yum install kernel-devel-`uname -r```
+If you are running on a kernel which includes a driver for your device, see
+`Installation using kernel.org driver`_ below. Otherwise see
+`Installation using 01.org QAT driver`_.
Installation using kernel.org driver
------------------------------------
-For **Intel QuickAssist Technology DH895xxC**:
-
-Assuming you are running on at least a 4.4 kernel, you can use the stock kernel.org QAT
-driver to start the QAT hardware.
-
-The steps below assume you are:
+The examples below are based on the C62x device, if you have a different device
+use the corresponding values in the above table.
-* Running DPDK on a platform with one ``DH895xCC`` device.
-* On a kernel at least version 4.4.
+In BIOS ensure that SRIOV is enabled and either:
-In BIOS ensure that SRIOV is enabled and either
-a) disable VT-d or
-b) enable VT-d and set ``"intel_iommu=on iommu=pt"`` in the grub file.
+* Disable VT-d or
+* Enable VT-d and set ``"intel_iommu=on iommu=pt"`` in the grub file.
-Ensure the QAT driver is loaded on your system, by executing::
+Check that the QAT driver is loaded on your system, by executing::
- lsmod | grep qat
+ lsmod | grep qa
-You should see the following output::
+You should see the kernel module for your device listed, e.g.::
- qat_dh895xcc 5626 0
- intel_qat 82336 1 qat_dh895xcc
+ qat_c62x 5626 0
+ intel_qat 82336 1 qat_c62x
Next, you need to expose the Virtual Functions (VFs) using the sysfs file system.
-First find the bdf of the physical function (PF) of the DH895xCC device::
+First find the BDFs (Bus-Device-Function) of the physical functions (PFs) of
+your device, e.g.::
- lspci -d : 435
+ lspci -d : 37c8
You should see output similar to::
- 03:00.0 Co-processor: Intel Corporation Coleto Creek PCIe Endpoint
-
-Using the sysfs, enable the VFs::
-
- echo 32 > /sys/bus/pci/drivers/dh895xcc/0000\:03\:00.0/sriov_numvfs
-
-If you get an error, it's likely you're using a QAT kernel driver earlier than kernel 4.4.
-
-To verify that the VFs are available for use - use ``lspci -d:443`` to confirm
-the bdf of the 32 VF devices are available per ``DH895xCC`` device.
+ 1a:00.0 Co-processor: Intel Corporation Device 37c8
+ 3d:00.0 Co-processor: Intel Corporation Device 37c8
+ 3f:00.0 Co-processor: Intel Corporation Device 37c8
-To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
+Enable the VFs for each PF by echoing the number of VFs per PF to the pci driver::
-**Note**: If the QAT kernel modules are not loaded and you see an error like
- ``Failed to load MMP firmware qat_895xcc_mmp.bin`` this may be as a
- result of not using a distribution, but just updating the kernel directly.
+ echo 16 > /sys/bus/pci/drivers/c6xx/0000:1a:00.0/sriov_numvfs
+ echo 16 > /sys/bus/pci/drivers/c6xx/0000:3d:00.0/sriov_numvfs
+ echo 16 > /sys/bus/pci/drivers/c6xx/0000:3f:00.0/sriov_numvfs
-Download firmware from the kernel firmware repo at:
-http://git.kernel.org/cgit/linux/kernel/git/firmware/linux-firmware.git/tree/
+Check that the VFs are available for use. For example ``lspci -d:37c9`` should
+list 48 VF devices available for a ``C62x`` device.
-Copy qat binaries to /lib/firmware:
-* ``cp qat_895xcc.bin /lib/firmware``
-* ``cp qat_895xcc_mmp.bin /lib/firmware``
+To complete the installation follow the instructions in
+`Binding the available VFs to the DPDK UIO driver`_.
-cd to your linux source root directory and start the qat kernel modules:
-* ``insmod ./drivers/crypto/qat/qat_common/intel_qat.ko``
-* ``insmod ./drivers/crypto/qat/qat_dh895xcc/qat_dh895xcc.ko``
+.. Note::
-**Note**:The following warning in /var/log/messages can be ignored:
- ``IOMMU should be enabled for SR-IOV to work correctly``
+ If the QAT kernel modules are not loaded and you see an error like ``Failed
+ to load MMP firmware qat_895xcc_mmp.bin`` in kernel logs, this may be as a
+ result of not using a distribution, but just updating the kernel directly.
-For **Intel QuickAssist Technology C62x**:
-Assuming you are running on at least a 4.5 kernel, you can use the stock kernel.org QAT
-driver to start the QAT hardware.
+ Download firmware from the `kernel firmware repo
+ <http://git.kernel.org/cgit/linux/kernel/git/firmware/linux-firmware.git/tree/>`_.
-The steps below assume you are:
+ Copy qat binaries to ``/lib/firmware``::
-* Running DPDK on a platform with one ``C62x`` device.
-* On a kernel at least version 4.5.
+ cp qat_895xcc.bin /lib/firmware
+ cp qat_895xcc_mmp.bin /lib/firmware
-In BIOS ensure that SRIOV is enabled and either
-a) disable VT-d or
-b) enable VT-d and set ``"intel_iommu=on iommu=pt"`` in the grub file.
+ Change to your linux source root directory and start the qat kernel modules::
-Ensure the QAT driver is loaded on your system, by executing::
+ insmod ./drivers/crypto/qat/qat_common/intel_qat.ko
+ insmod ./drivers/crypto/qat/qat_dh895xcc/qat_dh895xcc.ko
- lsmod | grep qat
-You should see the following output::
+.. Note::
- qat_c62x 16384 0
- intel_qat 122880 1 qat_c62x
+ If you see the following warning in ``/var/log/messages`` it can be ignored:
+ ``IOMMU should be enabled for SR-IOV to work correctly``.
-Next, you need to expose the VFs using the sysfs file system.
-First find the bdf of the C62x device::
+Installation using 01.org QAT driver
+------------------------------------
- lspci -d:37c8
+Download the latest QuickAssist Technology Driver from `01.org
+<https://01.org/packet-processing/intel%C2%AE-quickassist-technology-drivers-and-patches>`_.
+Consult the *Getting Started Guide* at the same URL for further information.
-You should see output similar to::
+The steps below assume you are:
- 1a:00.0 Co-processor: Intel Corporation Device 37c8
- 3d:00.0 Co-processor: Intel Corporation Device 37c8
- 3f:00.0 Co-processor: Intel Corporation Device 37c8
+* Building on a platform with one ``DH895xCC`` device.
+* Using package ``qatmux.l.2.3.0-34.tgz``.
+* On Fedora21 kernel ``3.17.4-301.fc21.x86_64``.
-For each c62x device there are 3 PFs.
-Using the sysfs, for each PF, enable the 16 VFs::
+In the BIOS ensure that SRIOV is enabled and VT-d is disabled.
- echo 16 > /sys/bus/pci/drivers/c6xx/0000\:1a\:00.0/sriov_numvfs
+Uninstall any existing QAT driver, for example by running:
-If you get an error, it's likely you're using a QAT kernel driver earlier than kernel 4.5.
+* ``./installer.sh uninstall`` in the directory where originally installed.
-To verify that the VFs are available for use - use ``lspci -d:37c9`` to confirm
-the bdf of the 48 VF devices are available per ``C62x`` device.
+* or ``rmmod qat_dh895xcc; rmmod intel_qat``.
-To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
+Build and install the SRIOV-enabled QAT driver::
-For **Intel QuickAssist Technology C3xxx**:
-Assuming you are running on at least a 4.5 kernel, you can use the stock kernel.org QAT
-driver to start the QAT hardware.
+ mkdir /QAT
+ cd /QAT
-The steps below assume you are:
+ # Copy qatmux.l.2.3.0-34.tgz to this location
+ tar zxof qatmux.l.2.3.0-34.tgz
-* Running DPDK on a platform with one ``C3xxx`` device.
-* On a kernel at least version 4.5.
+ export ICP_WITHOUT_IOMMU=1
+ ./installer.sh install QAT1.6 host
-In BIOS ensure that SRIOV is enabled and either
-a) disable VT-d or
-b) enable VT-d and set ``"intel_iommu=on iommu=pt"`` in the grub file.
+You can use ``cat /proc/icp_dh895xcc_dev0/version`` to confirm the driver is correctly installed.
+You can use ``lspci -d:443`` to confirm the of the 32 VF devices available per ``DH895xCC`` device.
-Ensure the QAT driver is loaded on your system, by executing::
+To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
- lsmod | grep qat
+.. Note::
-You should see the following output::
+ If using a later kernel and the build fails with an error relating to
+ ``strict_stroul`` not being available apply the following patch:
- qat_c3xxx 16384 0
- intel_qat 122880 1 qat_c3xxx
+ .. code-block:: diff
-Next, you need to expose the Virtual Functions (VFs) using the sysfs file system.
+ /QAT/QAT1.6/quickassist/utilities/downloader/Target_CoreLibs/uclo/include/linux/uclo_platform.h
+ + #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,5)
+ + #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; if (kstrtoul((str), (base), (num))) printk("Error strtoull convert %s\n", str); }
+ + #else
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
+ #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; if (strict_strtoull((str), (base), (num))) printk("Error strtoull convert %s\n", str); }
+ #else
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
+ #define STR_TO_64(str, base, num, endPtr) {endPtr=NULL; strict_strtoll((str), (base), (num));}
+ #else
+ #define STR_TO_64(str, base, num, endPtr) \
+ do { \
+ if (str[0] == '-') \
+ { \
+ *(num) = -(simple_strtoull((str+1), &(endPtr), (base))); \
+ }else { \
+ *(num) = simple_strtoull((str), &(endPtr), (base)); \
+ } \
+ } while(0)
+ + #endif
+ #endif
+ #endif
-First find the bdf of the physical function (PF) of the C3xxx device
- lspci -d:19e2
+.. Note::
-You should see output similar to::
+ If the build fails due to missing header files you may need to do following::
- 01:00.0 Co-processor: Intel Corporation Device 19e2
+ sudo yum install zlib-devel
+ sudo yum install openssl-devel
-For c3xxx device there is 1 PFs.
-Using the sysfs, enable the 16 VFs::
+.. Note::
- echo 16 > /sys/bus/pci/drivers/c3xxx/0000\:01\:00.0/sriov_numvfs
+ If the build or install fails due to mismatching kernel sources you may need to do the following::
-If you get an error, it's likely you're using a QAT kernel driver earlier than kernel 4.5.
+ sudo yum install kernel-headers-`uname -r`
+ sudo yum install kernel-src-`uname -r`
+ sudo yum install kernel-devel-`uname -r`
-To verify that the VFs are available for use - use ``lspci -d:19e3`` to confirm
-the bdf of the 16 VF devices are available per ``C3xxx`` device.
-To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
Binding the available VFs to the DPDK UIO driver
------------------------------------------------
-For **Intel(R) QuickAssist Technology DH895xcc** device:
-The unbind command below assumes ``bdfs`` of ``03:01.00-03:04.07``, if yours are different adjust the unbind command below::
+Unbind the VFs from the stock driver so they can be bound to the uio driver.
- cd $RTE_SDK
- modprobe uio
- insmod ./build/kmod/igb_uio.ko
+For an Intel(R) QuickAssist Technology DH895xCC device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- for device in $(seq 1 4); do \
- for fn in $(seq 0 7); do \
- echo -n 0000:03:0${device}.${fn} > \
- /sys/bus/pci/devices/0000\:03\:0${device}.${fn}/driver/unbind; \
- done; \
- done
+The unbind command below assumes ``BDFs`` of ``03:01.00-03:04.07``, if your
+VFs are different adjust the unbind command below::
- echo "8086 0443" > /sys/bus/pci/drivers/igb_uio/new_id
+ for device in $(seq 1 4); do \
+ for fn in $(seq 0 7); do \
+ echo -n 0000:03:0${device}.${fn} > \
+ /sys/bus/pci/devices/0000\:03\:0${device}.${fn}/driver/unbind; \
+ done; \
+ done
-You can use ``lspci -vvd:443`` to confirm that all devices are now in use by igb_uio kernel driver.
+For an Intel(R) QuickAssist Technology C62x device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For **Intel(R) QuickAssist Technology C62x** device:
-The unbind command below assumes ``bdfs`` of ``1a:01.00-1a:02.07``, ``3d:01.00-3d:02.07`` and ``3f:01.00-3f:02.07``,
-if yours are different adjust the unbind command below::
+The unbind command below assumes ``BDFs`` of ``1a:01.00-1a:02.07``,
+``3d:01.00-3d:02.07`` and ``3f:01.00-3f:02.07``, if your VFs are different
+adjust the unbind command below::
- cd $RTE_SDK
- modprobe uio
- insmod ./build/kmod/igb_uio.ko
+ for device in $(seq 1 2); do \
+ for fn in $(seq 0 7); do \
+ echo -n 0000:1a:0${device}.${fn} > \
+ /sys/bus/pci/devices/0000\:1a\:0${device}.${fn}/driver/unbind; \
- for device in $(seq 1 2); do \
- for fn in $(seq 0 7); do \
- echo -n 0000:1a:0${device}.${fn} > \
- /sys/bus/pci/devices/0000\:1a\:0${device}.${fn}/driver/unbind; \
+ echo -n 0000:3d:0${device}.${fn} > \
+ /sys/bus/pci/devices/0000\:3d\:0${device}.${fn}/driver/unbind; \
- echo -n 0000:3d:0${device}.${fn} > \
- /sys/bus/pci/devices/0000\:3d\:0${device}.${fn}/driver/unbind; \
+ echo -n 0000:3f:0${device}.${fn} > \
+ /sys/bus/pci/devices/0000\:3f\:0${device}.${fn}/driver/unbind; \
+ done; \
+ done
- echo -n 0000:3f:0${device}.${fn} > \
- /sys/bus/pci/devices/0000\:3f\:0${device}.${fn}/driver/unbind; \
- done; \
- done
+For Intel(R) QuickAssist Technology C3xxx or D15xx device
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- echo "8086 37c9" > /sys/bus/pci/drivers/igb_uio/new_id
+The unbind command below assumes ``BDFs`` of ``01:01.00-01:02.07``, if your
+VFs are different adjust the unbind command below::
-You can use ``lspci -vvd:37c9`` to confirm that all devices are now in use by igb_uio kernel driver.
+ for device in $(seq 1 2); do \
+ for fn in $(seq 0 7); do \
+ echo -n 0000:01:0${device}.${fn} > \
+ /sys/bus/pci/devices/0000\:01\:0${device}.${fn}/driver/unbind; \
+ done; \
+ done
-For **Intel(R) QuickAssist Technology C3xxx** device:
-The unbind command below assumes ``bdfs`` of ``01:01.00-01:02.07``,
-if yours are different adjust the unbind command below::
+Bind to the DPDK uio driver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
- cd $RTE_SDK
- modprobe uio
- insmod ./build/kmod/igb_uio.ko
+Install the DPDK igb_uio driver, bind the VF PCI Device id to it and use lspci
+to confirm the VF devices are now in use by igb_uio kernel driver,
+e.g. for the C62x device::
- for device in $(seq 1 2); do \
- for fn in $(seq 0 7); do \
- echo -n 0000:01:0${device}.${fn} > \
- /sys/bus/pci/devices/0000\:01\:0${device}.${fn}/driver/unbind; \
+ cd to the top-level DPDK directory
+ modprobe uio
+ insmod ./build/kmod/igb_uio.ko
+ echo "8086 37c9" > /sys/bus/pci/drivers/igb_uio/new_id
+ lspci -vvd:37c9
- done; \
- done
- echo "8086 19e3" > /sys/bus/pci/drivers/igb_uio/new_id
-
-You can use ``lspci -vvd:19e3`` to confirm that all devices are now in use by igb_uio kernel driver.
-
-
-The other way to bind the VFs to the DPDK UIO driver is by using the ``dpdk-devbind.py`` script:
-
-.. code-block:: console
+Another way to bind the VFs to the DPDK UIO driver is by using the
+``dpdk-devbind.py`` script::
- cd $RTE_SDK
- ./tools/dpdk-devbind.py -b igb_uio 0000:03:01.1
+ cd to the top-level DPDK directory
+ ./usertools/dpdk-devbind.py -b igb_uio 0000:03:01.1
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
new file mode 100644
index 00000000..32e56537
--- /dev/null
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -0,0 +1,172 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Cryptodev Scheduler Poll Mode Driver Library
+============================================
+
+Scheduler PMD is a software crypto PMD, which has the capabilities of
+attaching hardware and/or software cryptodevs, and distributes ingress
+crypto ops among them in a certain manner.
+
+.. figure:: img/scheduler-overview.*
+
+ Cryptodev Scheduler Overview
+
+
+The Cryptodev Scheduler PMD library (**librte_pmd_crypto_scheduler**) acts as
+a software crypto PMD and shares the same API provided by librte_cryptodev.
+The PMD supports attaching multiple crypto PMDs, software or hardware, as
+slaves, and distributes the crypto workload to them with certain behavior.
+The behaviors are categorizes as different "modes". Basically, a scheduling
+mode defines certain actions for scheduling crypto ops to its slaves.
+
+The librte_pmd_crypto_scheduler library exports a C API which provides an API
+for attaching/detaching slaves, set/get scheduling modes, and enable/disable
+crypto ops reordering.
+
+Limitations
+-----------
+
+* Sessionless crypto operation is not supported
+* OOP crypto operation is not supported when the crypto op reordering feature
+ is enabled.
+
+
+Installation
+------------
+
+To build DPDK with CRYTPO_SCHEDULER_PMD the user is required to set
+CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=y in config/common_base, and
+recompile DPDK
+
+
+Initialization
+--------------
+
+To use the PMD in an application, user must:
+
+* Call rte_vdev_init("crpyto_scheduler") within the application.
+
+* Use --vdev="crpyto_scheduler" in the EAL options, which will call
+ rte_vdev_init() internally.
+
+
+The following parameters (all optional) can be provided in the previous
+two calls:
+
+* socket_id: Specify the socket where the memory for the device is going
+ to be allocated (by default, socket_id will be the socket where the core
+ that is creating the PMD is running on).
+
+* max_nb_sessions: Specify the maximum number of sessions that can be
+ created. This value may be overwritten internally if there are too
+ many devices are attached.
+
+* slave: If a cryptodev has been initialized with specific name, it can be
+ attached to the scheduler using this parameter, simply filling the name
+ here. Multiple cryptodevs can be attached initially by presenting this
+ parameter multiple times.
+
+* mode: Specify the scheduling mode of the PMD. The supported scheduling
+ mode parameter values are specified in the "Cryptodev Scheduler Modes
+ Overview" section.
+
+* ordering: Specify the status of the crypto operations ordering feature.
+ The value of this parameter can be "enable" or "disable". This feature
+ is disabled by default.
+
+Example:
+
+.. code-block:: console
+
+ ... --vdev "crypto_aesni_mb_pmd,name=aesni_mb_1" --vdev "crypto_aesni_mb_pmd,name=aesni_mb_2" --vdev "crypto_scheduler_pmd,slave=aesni_mb_1,slave=aesni_mb_2" ...
+
+.. note::
+
+ * The scheduler cryptodev cannot be started unless the scheduling mode
+ is set and at least one slave is attached. Also, to configure the
+ scheduler in the run-time, like attach/detach slave(s), change
+ scheduling mode, or enable/disable crypto op ordering, one should stop
+ the scheduler first, otherwise an error will be returned.
+
+ * The crypto op reordering feature requires using the userdata field of
+ every mbuf to be processed to store temporary data. By the end of
+ processing, the field is set to pointing to NULL, any previously
+ stored value of this field will be lost.
+
+
+Cryptodev Scheduler Modes Overview
+----------------------------------
+
+Currently the Crypto Scheduler PMD library supports following modes of
+operation:
+
+* **CDEV_SCHED_MODE_ROUNDROBIN:**
+
+ *Initialization mode parameter*: **round-robin**
+
+ Round-robin mode, which distributes the enqueued burst of crypto ops
+ among its slaves in a round-robin manner. This mode may help to fill
+ the throughput gap between the physical core and the existing cryptodevs
+ to increase the overall performance.
+
+* **CDEV_SCHED_MODE_PKT_SIZE_DISTR:**
+
+ *Initialization mode parameter*: **packet-size-distr**
+
+ Packet-size based distribution mode, which works with 2 slaves, the primary
+ slave and the secondary slave, and distributes the enqueued crypto
+ operations to them based on their data lengths. A crypto operation will be
+ distributed to the primary slave if its data length is equal to or bigger
+ than the designated threshold, otherwise it will be handled by the secondary
+ slave.
+
+ A typical usecase in this mode is with the QAT cryptodev as the primary and
+ a software cryptodev as the secondary slave. This may help applications to
+ process additional crypto workload than what the QAT cryptodev can handle on
+ its own, by making use of the available CPU cycles to deal with smaller
+ crypto workloads.
+
+ The threshold is set to 128 bytes by default. It can be updated by calling
+ function **rte_cryptodev_scheduler_option_set**. The parameter of
+ **option_type** must be **CDEV_SCHED_OPTION_THRESHOLD** and **option** should
+ point to a rte_cryptodev_scheduler_threshold_option structure filled with
+ appropriate threshold value. Please NOTE this threshold has be a power-of-2
+ unsigned integer.
+
+* **CDEV_SCHED_MODE_FAILOVER:**
+
+ *Initialization mode parameter*: **fail-over**
+
+ Fail-over mode, which works with 2 slaves, the primary slave and the
+ secondary slave. In this mode, the scheduler will enqueue the incoming
+ crypto operation burst to the primary slave. When one or more crypto
+ operations fail to be enqueued, then they will be enqueued to the secondary
+ slave.
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 75a08aab..12b6c4af 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -83,9 +83,9 @@ In order to enable this virtual crypto PMD, user must:
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_snow3g") within the application.
+* Call rte_vdev_init("crypto_snow3g") within the application.
-* Use --vdev="crypto_snow3g" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_snow3g" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -100,4 +100,4 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_snow3g,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_snow3g,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index 38d5068a..6deb11ab 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -91,9 +91,9 @@ In order to enable this virtual crypto PMD, user must:
To use the PMD in an application, user must:
-* Call rte_eal_vdev_init("crypto_zuc") within the application.
+* Call rte_vdev_init("crypto_zuc") within the application.
-* Use --vdev="crypto_zuc" in the EAL options, which will call rte_eal_vdev_init() internally.
+* Use --vdev="crypto_zuc" in the EAL options, which will call rte_vdev_init() internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -108,4 +108,4 @@ Example:
.. code-block:: console
- ./l2fwd-crypto -c 40 -n 4 --vdev="crypto_zuc,socket_id=1,max_nb_sessions=128"
+ ./l2fwd-crypto -l 6 -n 4 --vdev="crypto_zuc,socket_id=1,max_nb_sessions=128"
diff --git a/doc/guides/eventdevs/index.rst b/doc/guides/eventdevs/index.rst
new file mode 100644
index 00000000..fad869d6
--- /dev/null
+++ b/doc/guides/eventdevs/index.rst
@@ -0,0 +1,41 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Event Device Drivers
+====================
+
+The following are a list of event device PMDs, which can be used from an
+application trough the eventdev API.
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ sw
+ octeontx
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
new file mode 100644
index 00000000..6697c544
--- /dev/null
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -0,0 +1,151 @@
+.. BSD LICENSE
+ Copyright (C) Cavium networks Ltd. 2017.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium networks nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+OCTEONTX SSOVF Eventdev Driver
+==============================
+
+The OCTEONTX SSOVF PMD (**librte_pmd_octeontx_ssovf**) provides poll mode
+eventdev driver support for the inbuilt event device found in the **Cavium OCTEONTX**
+SoC family as well as their virtual functions (VF) in SR-IOV context.
+
+More information can be found at `Cavium Networks Official Website
+<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
+
+Features
+--------
+
+Features of the OCTEONTX SSOVF PMD are:
+
+- 64 Event queues
+- 32 Event ports
+- HW event scheduler
+- Supports 1M flows per event queue
+- Flow based event pipelining
+- Flow pinning support in flow based event pipelining
+- Queue based event pipelining
+- Supports ATOMIC, ORDERED, PARALLEL schedule types per flow
+- Event scheduling QoS based on event queue priority
+- Open system with configurable amount of outstanding events
+- HW accelerated dequeue timeout support to enable power management
+- SR-IOV VF
+
+Supported OCTEONTX SoCs
+-----------------------
+- CN83xx
+
+Prerequisites
+-------------
+
+There are three main pre-perquisites for executing SSOVF PMD on a OCTEONTX
+compatible board:
+
+1. **OCTEONTX Linux kernel PF driver for Network acceleration HW blocks**
+
+ The OCTEONTX Linux kernel drivers (including the required PF driver for the
+ SSOVF) are available on Github at `octeontx-kmod <https://github.com/caviumnetworks/octeontx-kmod>`_
+ along with build, install and dpdk usage instructions.
+
+2. **ARM64 Tool Chain**
+
+ For example, the *aarch64* Linaro Toolchain, which can be obtained from
+ `here <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `<http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+ As an alternative method, SSOVF PMD can also be executed using images provided
+ as part of SDK from Cavium. The SDK includes all the above prerequisites necessary
+ to bring up a OCTEONTX board.
+
+ SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF`` (default ``y``)
+
+ Toggle compilation of the ``librte_pmd_octeontx_ssovf`` driver.
+
+- ``CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the OCTEONTX SSOVF PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-thunderx-linuxapp-gcc install
+
+
+Initialization
+--------------
+
+The octeontx eventdev is exposed as a vdev device which consists of a set
+of SSO group and work-slot PCIe VF devices. On EAL initialization,
+SSO PCIe VF devices will be probed and then the vdev device can be created
+from the application code, or from the EAL command line based on
+the number of probed/bound SSO PCIe VF device to DPDK by
+
+* Invoking ``rte_vdev_init("event_octeontx")`` from the application
+
+* Using ``--vdev="event_octeontx"`` in the EAL options, which will call
+ rte_vdev_init() internally
+
+Example:
+
+.. code-block:: console
+
+ ./your_eventdev_application --vdev="event_octeontx"
+
+Limitations
+-----------
+
+Burst mode support
+~~~~~~~~~~~~~~~~~~
+
+Burst mode is not supported. Dequeue and Enqueue functions accepts only single
+event at a time.
+
diff --git a/doc/guides/eventdevs/sw.rst b/doc/guides/eventdevs/sw.rst
new file mode 100644
index 00000000..fb63c844
--- /dev/null
+++ b/doc/guides/eventdevs/sw.rst
@@ -0,0 +1,157 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Software Eventdev Poll Mode Driver
+==================================
+
+The software eventdev is an implementation of the eventdev API, that provides a
+wide range of the eventdev features. The eventdev relies on a CPU core to
+perform event scheduling.
+
+
+Features
+--------
+
+The software eventdev implements many features in the eventdev API;
+
+Queues
+ * Atomic
+ * Ordered
+ * Parallel
+ * Single-Link
+
+Ports
+ * Load balanced (for Atomic, Ordered, Parallel queues)
+ * Single Link (for single-link queues)
+
+Event Priorities
+ * Each event has a priority, which can be used to provide basic QoS
+
+
+Configuration and Options
+-------------------------
+
+The software eventdev is a vdev device, and as such can be created from the
+application code, or from the EAL command line:
+
+* Call ``rte_vdev_init("event_sw0")`` from the application
+
+* Use ``--vdev="event_sw0"`` in the EAL options, which will call
+ rte_vdev_init() internally
+
+Example:
+
+.. code-block:: console
+
+ ./your_eventdev_application --vdev="event_sw0"
+
+
+Scheduling Quanta
+~~~~~~~~~~~~~~~~~
+
+The scheduling quanta sets the number of events that the device attempts to
+schedule before returning to the application from the ``rte_event_schedule()``
+function. Note that is a *hint* only, and that fewer or more events may be
+scheduled in a given iteration.
+
+The scheduling quanta can be set using a string argument to the vdev
+create call:
+
+.. code-block:: console
+
+ --vdev="event_sw0,sched_quanta=64"
+
+
+Credit Quanta
+~~~~~~~~~~~~~
+
+The credit quanta is the number of credits that a port will fetch at a time from
+the instance's credit pool. Higher numbers will cause less overhead in the
+atomic credit fetch code, however it also reduces the overall number of credits
+in the system faster. A balanced number (eg 32) ensures that only small numbers
+of credits are pre-allocated at a time, while also mitigating performance impact
+of the atomics.
+
+Experimentation with higher values may provide minor performance improvements,
+at the cost of the whole system having less credits. On the other hand,
+reducing the quanta may cause measurable performance impact but provide the
+system with a higher number of credits at all times.
+
+A value of 32 seems a good balance however your specific application may
+benefit from a higher or reduced quanta size, experimentation is required to
+verify possible gains.
+
+.. code-block:: console
+
+ --vdev="event_sw0,credit_quanta=64"
+
+
+Limitations
+-----------
+
+The software eventdev implementation has a few limitations. The reason for
+these limitations is usually that the performance impact of supporting the
+feature would be significant.
+
+
+"All Types" Queues
+~~~~~~~~~~~~~~~~~~
+
+The software eventdev does not support creating queues that handle all types of
+traffic. An eventdev with this capability allows enqueueing Atomic, Ordered and
+Parallel traffic to the same queue, but scheduling each of them appropriately.
+
+The reason to not allow Atomic, Ordered and Parallel event types in the
+same queue is that it causes excessive branching in the code to enqueue packets
+to the queue, causing a significant performance impact.
+
+The ``RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES`` flag is not set in the
+``event_dev_cap`` field of the ``rte_event_dev_info`` struct for the software
+eventdev.
+
+Distributed Scheduler
+~~~~~~~~~~~~~~~~~~~~~
+
+The software eventdev is a centralized scheduler, requiring the
+``rte_event_schedule()`` function to be called by a CPU core to perform the
+required event distribution. This is not really a limitation but rather a
+design decision.
+
+The ``RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED`` flag is not set in the
+``event_dev_cap`` field of the ``rte_event_dev_info`` struct for the software
+eventdev.
+
+Dequeue Timeout
+~~~~~~~~~~~~~~~
+
+The eventdev API supports a timeout when dequeuing packets using the
+``rte_event_dequeue_burst`` function.
+This allows a core to wait for an event to arrive, or until ``timeout`` number
+of ticks have passed. Timeout ticks is not supported by the software eventdev
+for performance reasons.
diff --git a/doc/guides/faq/faq.rst b/doc/guides/faq/faq.rst
index 8d1ea6cc..308a2873 100644
--- a/doc/guides/faq/faq.rst
+++ b/doc/guides/faq/faq.rst
@@ -50,12 +50,12 @@ When you stop and restart the test application, it looks to see if the pages are
If you look in the directory, you will see ``n`` number of 2M pages files. If you specified 1024, you will see 1024 page files.
These are then placed in memory segments to get contiguous memory.
-If you need to change the number of pages, it is easier to first remove the pages. The tools/dpdk-setup.sh script provides an option to do this.
+If you need to change the number of pages, it is easier to first remove the pages. The usertools/dpdk-setup.sh script provides an option to do this.
See the "Quick Start Setup Script" section in the :ref:`DPDK Getting Started Guide <linux_gsg>` for more information.
-If I execute "l2fwd -c f -m 64 -n 3 -- -p 3", I get the following output, indicating that there are no socket 0 hugepages to allocate the mbuf and ring structures to?
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
+If I execute "l2fwd -l 0-3 -m 64 -n 3 -- -p 3", I get the following output, indicating that there are no socket 0 hugepages to allocate the mbuf and ring structures to?
+------------------------------------------------------------------------------------------------------------------------------------------------------------------------
I have set up a total of 1024 Hugepages (that is, allocated 512 2M pages to each NUMA node).
@@ -66,19 +66,21 @@ To request memory to be reserved on a specific socket, please use the --socket-m
I am running a 32-bit DPDK application on a NUMA system, and sometimes the application initializes fine but cannot allocate memory. Why is that happening?
----------------------------------------------------------------------------------------------------------------------------------------------------------
-32-bit applications have limitations in terms of how much virtual memory is available, hence the number of hugepages they are able to allocate is also limited (1 GB per page size).
-If your system has a lot (>1 GB per page size) of hugepage memory, not all of it will be allocated.
+32-bit applications have limitations in terms of how much virtual memory is available, hence the number of hugepages they are able to allocate is also limited (1 GB size).
+If your system has a lot (>1 GB size) of hugepage memory, not all of it will be allocated.
Due to hugepages typically being allocated on a local NUMA node, the hugepages allocation the application gets during the initialization depends on which
NUMA node it is running on (the EAL does not affinitize cores until much later in the initialization process).
Sometimes, the Linux OS runs the DPDK application on a core that is located on a different NUMA node from DPDK master core and
therefore all the hugepages are allocated on the wrong socket.
-To avoid this scenario, either lower the amount of hugepage memory available to 1 GB per page size (or less), or run the application with taskset
+To avoid this scenario, either lower the amount of hugepage memory available to 1 GB size (or less), or run the application with taskset
affinitizing the application to a would-be master core.
For example, if your EAL coremask is 0xff0, the master core will usually be the first core in the coremask (0x10); this is what you have to supply to taskset::
- taskset 0x10 ./l2fwd -c 0xff0 -n 2
+ taskset 0x10 ./l2fwd -l 4-11 -n 2
+
+.. Note: Instead of '-c 0xff0' use the '-l 4-11' as a cleaner way to define lcores.
In this way, the hugepages have a greater chance of being allocated to the correct socket.
Additionally, a ``--socket-mem`` option could be used to ensure the availability of memory for each socket, so that if hugepages were allocated on
@@ -101,7 +103,7 @@ Yes, the option ``--log-level=`` accepts one of these numbers:
#define RTE_LOG_INFO 7U /* Informational. */
#define RTE_LOG_DEBUG 8U /* Debug-level messages. */
-It is also possible to change the maximum (and default level) at compile time
+It is also possible to change the default level at compile time
with ``CONFIG_RTE_LOG_LEVEL``.
diff --git a/doc/guides/freebsd_gsg/build_dpdk.rst b/doc/guides/freebsd_gsg/build_dpdk.rst
index 24a9f879..8bd9b6a1 100644
--- a/doc/guides/freebsd_gsg/build_dpdk.rst
+++ b/doc/guides/freebsd_gsg/build_dpdk.rst
@@ -119,7 +119,7 @@ The DPDK is composed of several directories:
* examples: Source code of DPDK applications
-* config, tools, scripts, mk: Framework-related makefiles, scripts and configuration
+* config, buildtools, mk: Framework-related makefiles, scripts and configuration
Installation of the DPDK Target Environments
--------------------------------------------
diff --git a/doc/guides/freebsd_gsg/build_sample_apps.rst b/doc/guides/freebsd_gsg/build_sample_apps.rst
index fffc4c01..9faa0e6e 100644
--- a/doc/guides/freebsd_gsg/build_sample_apps.rst
+++ b/doc/guides/freebsd_gsg/build_sample_apps.rst
@@ -119,7 +119,7 @@ The following is the list of options that can be given to the EAL:
.. code-block:: console
- ./rte-app -c COREMASK [-n NUM] [-b <domain:bus:devid.func>] \
+ ./rte-app -l CORELIST [-n NUM] [-b <domain:bus:devid.func>] \
[-r NUM] [-v] [--proc-type <primary|secondary|auto>]
.. note::
@@ -130,9 +130,10 @@ The following is the list of options that can be given to the EAL:
The EAL options for FreeBSD are as follows:
-* ``-c COREMASK``:
+* ``-c COREMASK`` or ``-l CORELIST``:
A hexadecimal bit mask of the cores to run on. Note that core numbering
- can change between platforms and should be determined beforehand.
+ can change between platforms and should be determined beforehand. The corelist
+ is a list of cores to use instead of a core mask.
* ``-n NUM``:
Number of memory channels per processor socket.
@@ -169,13 +170,13 @@ Other options, specific to Linux and are not supported under FreeBSD are as foll
Memory to allocate from hugepages, regardless of processor socket.
It is recommended that ``--socket-mem`` be used instead of this option.
-The ``-c`` option is mandatory; the others are optional.
+The ``-c`` or ``-l`` option is mandatory; the others are optional.
Copy the DPDK application binary to your target, then run the application
as follows (assuming the platform has four memory channels, and that cores 0-3
are present and are to be used for running the application)::
- ./helloworld -c f -n 4
+ ./helloworld -l 0-3 -n 4
.. note::
diff --git a/doc/guides/freebsd_gsg/install_from_ports.rst b/doc/guides/freebsd_gsg/install_from_ports.rst
index 81770291..67e63d27 100644
--- a/doc/guides/freebsd_gsg/install_from_ports.rst
+++ b/doc/guides/freebsd_gsg/install_from_ports.rst
@@ -111,7 +111,7 @@ compiled and run as below:
INSTALL-APP helloworld
INSTALL-MAP helloworld.map
- sudo ./build/helloworld -c F -n 2
+ sudo ./build/helloworld -l 0-3 -n 2
EAL: Contigmem driver has 2 buffers, each of size 1GB
EAL: Sysctl reports 8 cpus
diff --git a/doc/guides/howto/flow_bifurcation.rst b/doc/guides/howto/flow_bifurcation.rst
index 0d7226ae..61016a4f 100644
--- a/doc/guides/howto/flow_bifurcation.rst
+++ b/doc/guides/howto/flow_bifurcation.rst
@@ -126,7 +126,7 @@ The typical procedure to achieve this is as follows:
.. code-block:: console
- testpmd -c 0xff -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac
+ testpmd -l 0-7 -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac
In this example, traffic matching the rules will go through the VF by matching
the filter rule. All other traffic, not matching the rules, will go through
@@ -286,7 +286,7 @@ The typical procedure to achieve this is as follows:
.. code-block:: console
- testpmd -c 0xff -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac
+ testpmd -l 0-7 -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac
.. note::
diff --git a/doc/guides/howto/img/pvp_2nics.svg b/doc/guides/howto/img/pvp_2nics.svg
new file mode 100644
index 00000000..517a8008
--- /dev/null
+++ b/doc/guides/howto/img/pvp_2nics.svg
@@ -0,0 +1,556 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="127.46428mm"
+ height="139.41411mm"
+ viewBox="0 0 451.64508 493.987"
+ id="svg2"
+ version="1.1"
+ inkscape:version="0.92pre2 r"
+ sodipodi:docname="pvp_2nics.svg"
+ inkscape:export-filename="/home/max/Pictures/dpdk/pvp/pvp.png"
+ inkscape:export-xdpi="90"
+ inkscape:export-ydpi="90">
+ <defs
+ id="defs4">
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4760"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4762"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker4642"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4644"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker10370"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path10372"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker10306"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path10308"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker9757"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend"
+ inkscape:collect="always">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path9759"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path4224"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path4227"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4227-27"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4224-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker9757-0"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path9759-6"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4224-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-62"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4227-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker10370-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path10372-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker9757-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow1Lend">
+ <path
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ style="fill:#ff0000;fill-opacity:1;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.00000003pt;stroke-opacity:1"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ id="path9759-0"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-9-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4224-3-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-1-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4227-27-5"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.49497475"
+ inkscape:cx="206.7485"
+ inkscape:cy="227.93958"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:object-nodes="true"
+ inkscape:window-width="1916"
+ inkscape:window-height="1040"
+ inkscape:window-x="0"
+ inkscape:window-y="38"
+ inkscape:window-maximized="0"
+ inkscape:snap-grids="true"
+ inkscape:snap-to-guides="true"
+ inkscape:snap-others="false"
+ inkscape:snap-nodes="false"
+ inkscape:snap-global="false" />
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-5.3301459,-7.348317)">
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.78969002;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4140"
+ width="434.38919"
+ height="75.295639"
+ x="21.691195"
+ y="404.59354" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="421.47873"
+ y="501.3353"
+ id="text4142"><tspan
+ sodipodi:role="line"
+ id="tspan4144"
+ x="421.47873"
+ y="501.3353"
+ style="font-size:25px;line-height:125%">TE</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.46599996;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4146"
+ width="92.934036"
+ height="32.324883"
+ x="182.57764"
+ y="372.03574" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="183.5878"
+ y="397.28958"
+ id="text4148"><tspan
+ sodipodi:role="line"
+ id="tspan4150"
+ x="183.5878"
+ y="397.28958"
+ style="font-size:25px">10G NIC</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="166.92024"
+ y="451.33276"
+ id="text4152"><tspan
+ sodipodi:role="line"
+ id="tspan4154"
+ x="166.92024"
+ y="451.33276">Moongen</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.39882457;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4156"
+ width="449.73071"
+ height="244.32167"
+ x="6.0295582"
+ y="29.046324" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="405.31628"
+ y="25.048317"
+ id="text4158"><tspan
+ sodipodi:role="line"
+ id="tspan4160"
+ x="405.31628"
+ y="25.048317"
+ style="font-size:25px">DUT</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.14168489;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4162"
+ width="418.69415"
+ height="107.50462"
+ x="19.038134"
+ y="41.044758" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="395.38812"
+ y="66.496857"
+ id="text4164"><tspan
+ sodipodi:role="line"
+ id="tspan4166"
+ x="395.38812"
+ y="66.496857"
+ style="font-size:25px">VM</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.46599996;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4146-3"
+ width="92.934036"
+ height="32.324883"
+ x="183.0827"
+ y="274.05093" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="184.09286"
+ y="299.30475"
+ id="text4148-6"><tspan
+ sodipodi:role="line"
+ id="tspan4150-7"
+ x="184.09286"
+ y="299.30475"
+ style="font-size:25px">10G NIC</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:2.4804399;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4189"
+ width="398.00476"
+ height="65.451302"
+ x="26.901583"
+ y="82.647781" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:25px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="30.683046"
+ y="108.31288"
+ id="text4191"><tspan
+ sodipodi:role="line"
+ id="tspan4193"
+ x="30.683046"
+ y="108.31288">TestPMD</tspan><tspan
+ sodipodi:role="line"
+ x="30.683046"
+ y="139.56288"
+ id="tspan10476">(macswap)</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:2.49124122;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4189-5"
+ width="397.22263"
+ height="66.152573"
+ x="29.743357"
+ y="207.6543" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:25px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="42.720772"
+ y="231.14902"
+ id="text4191-3"><tspan
+ sodipodi:role="line"
+ id="tspan4193-5"
+ x="42.720772"
+ y="231.14902">TestPMD </tspan><tspan
+ sodipodi:role="line"
+ x="42.720772"
+ y="262.39902"
+ id="tspan9747">(io)</tspan></text>
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.97838062px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
+ d="M 202.56669,371.44487 V 308.37034"
+ id="path4218"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.97297633px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart)"
+ d="M 252.03098,369.63533 V 307.25568"
+ id="path4218-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.92982113px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-1)"
+ d="M 198.63811,207.44389 V 150.47507"
+ id="path4218-0"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.95360273px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-9)"
+ d="M 255.56859,206.9303 V 147.01008"
+ id="path4218-9-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3, 1;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker9757)"
+ d="M 199.50513,271.00921 V 207.3696"
+ id="path9749"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3, 1;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker10370)"
+ d="M 255.56859,270.56991 V 206.9303"
+ id="path9749-2"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.46599996;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4146-36"
+ width="92.934036"
+ height="32.324883"
+ x="304.05591"
+ y="372.52954" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="305.06607"
+ y="397.78339"
+ id="text4148-7"><tspan
+ sodipodi:role="line"
+ id="tspan4150-5"
+ x="305.06607"
+ y="397.78339"
+ style="font-size:25px">10G NIC</tspan></text>
+ <rect
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.46599996;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect4146-3-3"
+ width="92.934036"
+ height="32.324883"
+ x="306.07623"
+ y="273.53461" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ x="307.0864"
+ y="298.78842"
+ id="text4148-6-5"><tspan
+ sodipodi:role="line"
+ id="tspan4150-7-6"
+ x="307.0864"
+ y="298.78842"
+ style="font-size:25px">10G NIC</tspan></text>
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.97838062px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-62)"
+ d="M 323.7504,370.24835 V 307.17382"
+ id="path4218-1"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.97297633px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-6)"
+ d="M 373.21469,368.43881 V 306.05916"
+ id="path4218-9-8"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.92982113px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend-1-7)"
+ d="M 324.93036,207.24894 V 150.28012"
+ id="path4218-0-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.95360273px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-9-2)"
+ d="M 381.86084,206.73535 V 146.81513"
+ id="path4218-9-6-2"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3, 1;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker9757-2)"
+ d="M 325.79738,270.81426 V 207.17465"
+ id="path9749-28"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:3, 1;stroke-dashoffset:0;stroke-opacity:1;marker-start:url(#marker10370-7)"
+ d="M 381.86084,270.37496 V 206.73535"
+ id="path9749-2-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2, 1;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker4642)"
+ d="M 198.57143,148.79077 V 95.93363 h 182.85714 v 50"
+ id="path3748"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;fill-rule:evenodd;stroke:#ff0000;stroke-width:1.01005316;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.02010632, 1.01005316;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker4760)"
+ d="m 325.70774,148.78714 v -32.84999 h -70.7012 v 30.70761"
+ id="path4634"
+ inkscape:connector-curvature="0" />
+ </g>
+</svg>
diff --git a/doc/guides/howto/img/use_models_for_running_dpdk_in_containers.svg b/doc/guides/howto/img/use_models_for_running_dpdk_in_containers.svg
new file mode 100644
index 00000000..662c2266
--- /dev/null
+++ b/doc/guides/howto/img/use_models_for_running_dpdk_in_containers.svg
@@ -0,0 +1,398 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export user_models_for_running_dpdk_in_containers.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="10.6194in" height="4.55593in"
+ viewBox="0 0 764.596 328.027" xml:space="preserve" color-interpolation-filters="sRGB" class="st19">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#ffffff;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#000000;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st6 {fill:none;stroke:#c7c8c8;stroke-width:0.25}
+ .st7 {fill:#d8d8d8;stroke:#c7c8c8;stroke-width:0.25}
+ .st8 {fill:none;stroke:none;stroke-width:0.25}
+ .st9 {fill:#000000;font-family:Calibri;font-size:1.00001em;font-style:italic}
+ .st10 {fill:#ed7d31;stroke:#c7c8c8;stroke-width:0.25}
+ .st11 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st12 {fill:#a5a5a5;stroke:#c7c8c8;stroke-width:0.25}
+ .st13 {marker-end:url(#mrkr4-61);marker-start:url(#mrkr4-59);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st14 {fill:#000000;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st15 {font-size:1em}
+ .st16 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-dasharray:7,5;stroke-opacity:0.22}
+ .st17 {fill:none;stroke:#ff0000;stroke-dasharray:7,5;stroke-width:1}
+ .st18 {stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st19 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend4">
+ <path d="M 2 1 L 0 0 L 2 -1 L 2 1 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr4-59" class="st14" v:arrowType="4" v:arrowSize="2" v:setback="6.68" refX="6.68" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend4" transform="scale(3.52) "/>
+ </marker>
+ <marker id="mrkr4-61" class="st14" v:arrowType="4" v:arrowSize="2" v:setback="7.04" refX="-7.04" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend4" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <v:layer v:name="Connector" v:index="0"/>
+ <g id="shape1-1" v:mID="1" v:groupContext="shape" transform="translate(146.2,-258.819)">
+ <title>Rectangle</title>
+ <desc>Container</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="300.937" width="68.1" height="54.1807"/>
+ <g id="shadow1-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st2"/>
+ </g>
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st3"/>
+ <text x="14.04" y="291.94" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Container<v:newlineChar/><v:newlineChar/></text> </g>
+ <g id="shape3-7" v:mID="3" v:groupContext="shape" transform="translate(18.25,-169.971)">
+ <title>Rectangle.3</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow3-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="270.699" width="306" height="57.3286" rx="13.5" ry="13.5" class="st5"/>
+ </g>
+ <rect x="0" y="270.699" width="306" height="57.3286" rx="13.5" ry="13.5" class="st6"/>
+ </g>
+ <g id="shape4-12" v:mID="4" v:groupContext="shape" transform="translate(18.25,-61)">
+ <title>Rectangle.4</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow4-13" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="256.027" width="306" height="72" rx="13.5" ry="13.5" class="st2"/>
+ </g>
+ <rect x="0" y="256.027" width="306" height="72" rx="13.5" ry="13.5" class="st7"/>
+ </g>
+ <g id="shape5-17" v:mID="5" v:groupContext="shape" transform="translate(17.65,-202.75)">
+ <title>Sheet.5</title>
+ <desc>Host kernel</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="316.777" width="68.1" height="22.5"/>
+ <rect x="0" y="305.527" width="68.1" height="22.5" class="st8"/>
+ <text x="6.55" y="320.38" class="st9" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Host kernel</text> </g>
+ <g id="shape6-20" v:mID="6" v:groupContext="shape" transform="translate(0.25,-110.5)">
+ <title>Sheet.6</title>
+ <desc>NIC</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="316.777" width="68.1" height="22.5"/>
+ <rect x="0" y="305.527" width="68.1" height="22.5" class="st8"/>
+ <text x="25.54" y="320.38" class="st9" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>NIC</text> </g>
+ <g id="shape7-23" v:mID="7" v:groupContext="shape" transform="translate(67.75,-99.3)">
+ <title>Rectangle.7</title>
+ <desc>PF</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.625" cy="316.777" width="29.26" height="22.5"/>
+ <g id="shadow7-24" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st2"/>
+ </g>
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st10"/>
+ <text x="9.74" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>PF</text> </g>
+ <g id="shape8-29" v:mID="8" v:groupContext="shape" transform="translate(165.625,-99.3)">
+ <title>Rectangle.8</title>
+ <desc>VF</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.625" cy="316.777" width="29.26" height="22.5"/>
+ <g id="shadow8-30" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st2"/>
+ </g>
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st10"/>
+ <text x="9.49" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>VF</text> </g>
+ <g id="shape10-35" v:mID="10" v:groupContext="shape" transform="translate(67.75,-70)">
+ <title>Rectangle.10</title>
+ <desc>Hardware virtual switch</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="112.5" cy="316.777" width="225" height="22.5"/>
+ <g id="shadow10-36" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="305.527" width="225" height="22.5" class="st2"/>
+ </g>
+ <rect x="0" y="305.527" width="225" height="22.5" class="st12"/>
+ <text x="64.07" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Hardware virtual switch</text> </g>
+ <g id="shape14-41" v:mID="14" v:groupContext="shape" transform="translate(238.15,-258.7)">
+ <title>Rectangle.14</title>
+ <desc>Container</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="300.937" width="68.1" height="54.1807"/>
+ <g id="shadow14-42" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st2"/>
+ </g>
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st3"/>
+ <text x="14.04" y="291.94" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Container<v:newlineChar/><v:newlineChar/></text> </g>
+ <g id="shape15-47" v:mID="15" v:groupContext="shape" transform="translate(257.575,-99.2)">
+ <title>Rectangle.15</title>
+ <desc>VF</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.625" cy="316.777" width="29.26" height="22.5"/>
+ <g id="shadow15-48" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st2"/>
+ </g>
+ <rect x="0" y="305.527" width="29.25" height="22.5" class="st10"/>
+ <text x="9.49" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>VF</text> </g>
+ <g id="shape16-53" v:mID="16" v:groupContext="shape" v:layerMember="0" transform="translate(263.2,-258.7)">
+ <title>Dynamic connector.16</title>
+ <path d="M9 334.71 L9 335.07 L9 457.99" class="st13"/>
+ </g>
+ <g id="shape18-62" v:mID="18" v:groupContext="shape" transform="translate(54.25,-180.25)">
+ <title>Ellipse</title>
+ <desc>PF driver</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="49.22" height="19.6875"/>
+ <g id="shadow18-63" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st2"/>
+ </g>
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st10"/>
+ <text x="10.11" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>PF driver</text> </g>
+ <g id="shape19-68" v:mID="19" v:groupContext="shape" v:layerMember="0" transform="translate(73.375,-180.25)">
+ <title>Dynamic connector.19</title>
+ <path d="M9 334.71 L9 335.07 L9 379.44" class="st13"/>
+ </g>
+ <g id="shape20-75" v:mID="20" v:groupContext="shape" transform="translate(152.125,-263.44)">
+ <title>Ellipse.20</title>
+ <desc>DPDK</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="49.22" height="19.6875"/>
+ <g id="shadow20-76" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st2"/>
+ </g>
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st10"/>
+ <text x="16.79" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>DPDK</text> </g>
+ <g id="shape21-81" v:mID="21" v:groupContext="shape" v:layerMember="0" transform="translate(171.25,-258.819)">
+ <title>Dynamic connector.21</title>
+ <path d="M9 334.71 L9 335.07 L9 458.01" class="st13"/>
+ </g>
+ <g id="shape22-88" v:mID="22" v:groupContext="shape" transform="translate(243.25,-263.44)">
+ <title>Ellipse.22</title>
+ <desc>DPDK</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="49.22" height="19.6875"/>
+ <g id="shadow22-89" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st2"/>
+ </g>
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st10"/>
+ <text x="16.79" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>DPDK</text> </g>
+ <g id="shape23-94" v:mID="23" v:groupContext="shape" transform="translate(395.65,-254.5)">
+ <title>Rectangle.23</title>
+ <desc>Virtual Appliance</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="300.937" width="68.1" height="54.1807"/>
+ <g id="shadow23-95" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st2"/>
+ </g>
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st3"/>
+ <text x="20.48" y="297.94" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Virtual <tspan
+ x="13.98" dy="1.2em" class="st15">Appliance</tspan></text> </g>
+ <g id="shape25-101" v:mID="25" v:groupContext="shape" transform="translate(476.65,-254.681)">
+ <title>Rectangle.25</title>
+ <desc>VM + Container</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="300.937" width="68.1" height="54.1807"/>
+ <g id="shadow25-102" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st2"/>
+ </g>
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st3"/>
+ <text x="23.32" y="297.94" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>VM + <tspan x="14.04"
+ dy="1.2em" class="st15">Container</tspan></text> </g>
+ <g id="shape27-108" v:mID="27" v:groupContext="shape" transform="translate(566.65,-254.681)">
+ <title>Rectangle.27</title>
+ <desc>Container</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="300.937" width="68.1" height="54.1807"/>
+ <g id="shadow27-109" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st2"/>
+ </g>
+ <rect x="0" y="273.847" width="68.1" height="54.1807" class="st3"/>
+ <text x="14.04" y="291.94" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Container<v:newlineChar/><v:newlineChar/></text> </g>
+ <g id="shape28-114" v:mID="28" v:groupContext="shape" transform="translate(570.625,-261.431)">
+ <title>Ellipse.28</title>
+ <desc>DPDK</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="49.22" height="19.6875"/>
+ <g id="shadow28-115" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st2"/>
+ </g>
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st10"/>
+ <text x="16.79" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>DPDK</text> </g>
+ <g id="shape29-120" v:mID="29" v:groupContext="shape" transform="translate(405.25,-110.5)">
+ <title>Rectangle.29</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow29-121" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="270.699" width="346.5" height="57.3286" rx="13.5" ry="13.5" class="st5"/>
+ </g>
+ <rect x="0" y="270.699" width="346.5" height="57.3286" rx="13.5" ry="13.5" class="st6"/>
+ </g>
+ <g id="shape30-125" v:mID="30" v:groupContext="shape" transform="translate(405.25,-142)">
+ <title>Sheet.30</title>
+ <desc>Host kernel</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="316.777" width="68.1" height="22.5"/>
+ <rect x="0" y="305.527" width="68.1" height="22.5" class="st8"/>
+ <text x="6.55" y="320.38" class="st9" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>Host kernel</text> </g>
+ <g id="shape31-128" v:mID="31" v:groupContext="shape" transform="translate(681.417,-205)">
+ <title>Rectangle.31</title>
+ <desc>vSwitch or vRouter</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="276.277" width="68.1" height="103.5"/>
+ <g id="shadow31-129" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="224.527" width="68.1" height="103.5" class="st2"/>
+ </g>
+ <rect x="0" y="224.527" width="68.1" height="103.5" class="st3"/>
+ <text x="18.36" y="255.28" class="st4" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>vSwitch<v:newlineChar/><tspan
+ x="29.67" dy="1.2em" class="st15">or<v:newlineChar/></tspan><tspan x="17.91" dy="1.2em" class="st15">vRouter</tspan><v:newlineChar/><v:newlineChar/></text> </g>
+ <g id="shape32-136" v:mID="32" v:groupContext="shape" transform="translate(687.342,-214)">
+ <title>Ellipse.32</title>
+ <desc>DPDK</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="49.22" height="19.6875"/>
+ <g id="shadow32-137" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st2"/>
+ </g>
+ <path d="M0 316.78 A28.125 11.25 0 0 1 56.25 316.78 A28.125 11.25 0 1 1 0 316.78 Z" class="st10"/>
+ <text x="16.79" y="319.78" class="st11" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>DPDK</text> </g>
+ <g id="shape34-142" v:mID="34" v:groupContext="shape" v:layerMember="0" transform="translate(429.7,-254.5)">
+ <title>Dynamic connector</title>
+ <path d="M0 334.71 L0 335.07 L0 364.03 L244.68 364.03" class="st13"/>
+ </g>
+ <g id="shape35-149" v:mID="35" v:groupContext="shape" v:layerMember="0" transform="translate(510.7,-254.681)">
+ <title>Dynamic connector.35</title>
+ <path d="M0 334.71 L0 335.07 L0 355.21 L163.68 355.21" class="st13"/>
+ </g>
+ <g id="shape36-156" v:mID="36" v:groupContext="shape" v:layerMember="0" transform="translate(600.7,-254.681)">
+ <title>Dynamic connector.36</title>
+ <path d="M0 334.71 L0 335.07 L0 346.21 L73.68 346.21" class="st13"/>
+ </g>
+ <g id="shape37-163" v:mID="37" v:groupContext="shape" transform="translate(557.933,-182.5)">
+ <title>Rectangle.37</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow37-164" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 328.03 L202.82 328.03 L202.82 184.03 L0 184.03 L0 328.03 Z" class="st16"/>
+ </g>
+ <path d="M0 328.03 L202.82 328.03 L202.82 184.03 L0 184.03 L0 328.03 Z" class="st17"/>
+ </g>
+ <g id="shape38-168" v:mID="38" v:groupContext="shape" transform="translate(676.9,-72.25)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow38-169" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="305.527" width="74.85" height="22.5" rx="11.25" ry="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="305.527" width="74.85" height="22.5" rx="11.25" ry="11.25" class="st7"/>
+ </g>
+ <g id="shape39-173" v:mID="39" v:groupContext="shape" transform="translate(686.2,-72.25)">
+ <title>Sheet.39</title>
+ <desc>NIC</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="316.777" width="56.25" height="22.5"/>
+ <rect x="0" y="305.527" width="56.25" height="22.5" class="st8"/>
+ <text x="19.61" y="320.38" class="st9" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>NIC</text> </g>
+ <g id="shape41-176" v:mID="41" v:groupContext="shape" v:layerMember="0" transform="translate(723.896,-205)">
+ <title>Dynamic connector.41</title>
+ <path d="M-8.5 334.71 L-8.5 335.07 L-9.5 431.24" class="st13"/>
+ </g>
+ <g id="shape42-183" v:mID="42" v:groupContext="shape" v:layerMember="0" transform="translate(382.75,-317.5)">
+ <title>Dynamic connector.42</title>
+ <path d="M-9 328.03 L-9 589.03" class="st18"/>
+ </g>
+ <g id="shape43-186" v:mID="43" v:groupContext="shape" transform="translate(161.65,-0.25)">
+ <title>Sheet.43</title>
+ <desc>(1) Slicing</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="316.777" width="68.1" height="22.5"/>
+ <rect x="0" y="305.527" width="68.1" height="22.5" class="st8"/>
+ <text x="10.5" y="320.38" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(1) Slicing</text> </g>
+ <g id="shape44-189" v:mID="44" v:groupContext="shape" transform="translate(553.75,-0.25)">
+ <title>Sheet.44</title>
+ <desc>(2) Aggregation</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="44.025" cy="316.777" width="88.05" height="22.5"/>
+ <rect x="0" y="305.527" width="88.05" height="22.5" class="st8"/>
+ <text x="5.7" y="320.38" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(2) Aggregation</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/howto/img/vf_daemon_overview.svg b/doc/guides/howto/img/vf_daemon_overview.svg
new file mode 100644
index 00000000..d4a47234
--- /dev/null
+++ b/doc/guides/howto/img/vf_daemon_overview.svg
@@ -0,0 +1,440 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# - Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# - Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+# OF THE POSSIBILITY OF SUCH DAMAGE.
+-->
+
+<!-- Generated by Microsoft Visio, SVG Export vf_daemon_overview.svg Page-1 -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="653.98083"
+ height="346.37814"
+ viewBox="0 0 523.18544 277.10257"
+ xml:space="preserve"
+ class="st16"
+ id="svg3406"
+ version="1.1"
+ inkscape:version="0.92.1 r15371"
+ sodipodi:docname="vf_daemon_overview.svg"
+ style="font-size:12px;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-rule:evenodd;stroke-linecap:square;stroke-miterlimit:3"><metadata
+ id="metadata3652"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1"
+ objecttolerance="10"
+ gridtolerance="10"
+ guidetolerance="10"
+ inkscape:pageopacity="0"
+ inkscape:pageshadow="2"
+ inkscape:window-width="1920"
+ inkscape:window-height="1003"
+ id="namedview3650"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:zoom="1.683916"
+ inkscape:cx="370.95135"
+ inkscape:cy="160.84375"
+ inkscape:window-x="-9"
+ inkscape:window-y="-9"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="svg3406" /><style
+ type="text/css"
+ id="style3408">
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.25;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.25}
+ .st3 {fill:#4f87bb;stroke:#40709c;stroke-width:0.75}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:url(#grad0-11);stroke:#4f87bb;stroke-width:0.75}
+ .st6 {fill:#4f87bb;font-family:Calibri;font-size:0.833336em}
+ .st7 {fill:#759fcc;fill-opacity:0.25;filter:url(#filter_2);stroke:#759fcc;stroke-opacity:0.25}
+ .st8 {fill:#668bb3;stroke:#547395;stroke-width:0.75}
+ .st9 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st10 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st11 {fill:#759fcc;fill-opacity:0.22;filter:url(#filter_2);stroke:#759fcc;stroke-opacity:0.22}
+ .st12 {fill:#759fcc;stroke:#c7c8c8;stroke-width:0.25}
+ .st13 {fill:url(#grad0-40);stroke:#a6b6cd;stroke-width:0.75}
+ .st14 {fill:#70ad47;fill-opacity:0.25;filter:url(#filter_2);stroke:#70ad47;stroke-opacity:0.25}
+ .st15 {fill:#61973d;stroke:#507e31;stroke-width:0.75}
+ .st16 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ </style><defs
+ id="Patterns_And_Gradients"><linearGradient
+ id="grad0-11"
+ x1="-0.24584444"
+ y1="740.83429"
+ x2="167.49742"
+ y2="740.83429"
+ gradientTransform="scale(1.5253548,0.65558519)"
+ gradientUnits="userSpaceOnUse"><stop
+ offset="0"
+ stop-color="#e9eff7"
+ stop-opacity="1"
+ id="stop3412" /><stop
+ offset="0.24"
+ stop-color="#f4f7fb"
+ stop-opacity="1"
+ id="stop3414" /><stop
+ offset="0.54"
+ stop-color="#feffff"
+ stop-opacity="1"
+ id="stop3416" /></linearGradient><linearGradient
+ id="grad0-40"
+ x1="0"
+ y1="0"
+ x2="1"
+ y2="0"
+ gradientTransform="rotate(60,0.5,0.5)"><stop
+ offset="0"
+ stop-color="#f3f6fa"
+ stop-opacity="1"
+ id="stop3419" /><stop
+ offset="0.24"
+ stop-color="#f9fafc"
+ stop-opacity="1"
+ id="stop3421" /><stop
+ offset="0.54"
+ stop-color="#feffff"
+ stop-opacity="1"
+ id="stop3423" /></linearGradient><linearGradient
+ id="grad0-40-2"
+ x1="0"
+ y1="0"
+ x2="1"
+ y2="0"
+ gradientTransform="rotate(60,0.5,0.5)"><stop
+ offset="0"
+ stop-color="#f3f6fa"
+ stop-opacity="1"
+ id="stop3419-2" /><stop
+ offset="0.24"
+ stop-color="#f9fafc"
+ stop-opacity="1"
+ id="stop3421-8" /><stop
+ offset="0.54"
+ stop-color="#feffff"
+ stop-opacity="1"
+ id="stop3423-0" /></linearGradient><filter
+ style="color-interpolation-filters:sRGB"
+ id="filter_2-6"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur3427-3" /></filter><filter
+ style="color-interpolation-filters:sRGB"
+ id="filter4802"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur4800" /></filter><filter
+ style="color-interpolation-filters:sRGB"
+ id="filter4810"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur4808" /></filter><filter
+ style="color-interpolation-filters:sRGB"
+ id="filter_2-1"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur3427-8" /></filter><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-11"
+ id="linearGradient5846"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.2751142,0,0,0.85239422,296.69995,-509.84788)"
+ x1="-0.24584444"
+ y1="740.83429"
+ x2="167.49742"
+ y2="740.83429" /><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-40"
+ id="linearGradient5848"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="scale(2.9084098,0.3438305)"
+ x1="-0.12893644"
+ y1="1717.1688"
+ x2="28.140806"
+ y2="1717.1688" /><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-40"
+ id="linearGradient5917"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="scale(2.9084098,0.3438305)"
+ x1="-0.12893644"
+ y1="1717.1688"
+ x2="28.140806"
+ y2="1717.1688" /><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-11"
+ id="linearGradient6028"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="matrix(1.2751142,0,0,0.85239422,14.0251,-510.3054)"
+ x1="-0.24584444"
+ y1="740.83429"
+ x2="167.49742"
+ y2="740.83429" /><linearGradient
+ inkscape:collect="always"
+ xlink:href="#grad0-40"
+ id="linearGradient6030"
+ gradientUnits="userSpaceOnUse"
+ gradientTransform="scale(2.9084098,0.3438305)"
+ x1="-0.12893644"
+ y1="1717.1688"
+ x2="28.140806"
+ y2="1717.1688" /></defs><defs
+ id="Filters"><filter
+ id="filter_2"
+ style="color-interpolation-filters:sRGB"><feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur3427" /></filter></defs><flowRoot
+ xml:space="preserve"
+ id="flowRoot5059"
+ style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ transform="translate(83.77187,-3.3273299)"><flowRegion
+ id="flowRegion5061"><rect
+ id="rect5063"
+ width="319.13776"
+ height="378.76611"
+ x="246.91183"
+ y="-24.140537" /></flowRegion><flowPara
+ id="flowPara5065" /></flowRoot><rect
+ x="310.9368"
+ y="41.112034"
+ width="185.48367"
+ height="52.464527"
+ class="st3"
+ id="rect3441-6"
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#4f87bb;fill-rule:evenodd;stroke:#40709c;stroke-width:0.59376031;stroke-linecap:square;stroke-miterlimit:3" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:url(#linearGradient5846);fill-rule:evenodd;stroke:#4f87bb;stroke-width:0.78190857;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3453-5"
+ class="st5"
+ height="142.00824"
+ width="213.26486"
+ y="122.12257"
+ x="296.69995" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#668bb3;fill-rule:evenodd;stroke:#547395;stroke-width:0.81434548;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3465-8"
+ class="st8"
+ height="107.19906"
+ width="191.24162"
+ y="148.73914"
+ x="303.27353" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#5b9bd5;fill-rule:evenodd;stroke:#c7c8c8;stroke-width:0.30626383;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3477-1"
+ class="st10"
+ height="37.991375"
+ width="99.433281"
+ y="201.63286"
+ x="345.86914" /><g
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:none;fill-rule:evenodd;stroke-linecap:square;stroke-miterlimit:3"
+ transform="matrix(0.00129134,-1.4946882,0.98914737,0.00195132,-182.90697,199.1254)"
+ id="shape8-37-9"><title
+ id="title3506-4">Simple Double Arrow.14</title><path
+ style="fill:url(#linearGradient5848);stroke:#a6b6cd;stroke-width:0.75"
+ inkscape:connector-curvature="0"
+ id="path3508-6"
+ class="st13"
+ d="m 0,595.28 11.34,-4.49 v 2.24 h 58.8 v -2.24 l 11.33,4.49 -11.33,4.48 v -2.24 h -58.8 v 2.24 z" /></g><rect
+ style="font-size:medium;opacity:0.347;color-interpolation-filters:sRGB;fill:none;fill-opacity:0.91387556;fill-rule:evenodd;stroke:#0044ea;stroke-width:0.6845746;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4923-1"
+ width="228.54221"
+ height="267.54898"
+ x="288.59995"
+ y="5.0613203" /><text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:19.20002937px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#0026e8;fill-opacity:0.83732054;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="478.50806"
+ y="25.851391"
+ id="text4927-7"><tspan
+ sodipodi:role="line"
+ id="tspan4925-3"
+ x="478.50806"
+ y="25.851391"
+ style="font-size:19.20002937px;fill:#0026e8;fill-opacity:0.83732054;stroke-width:0.8000012">VM</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:17.06669235px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="343.81976"
+ y="68.998184"
+ id="text5156-5"><tspan
+ sodipodi:role="line"
+ id="tspan5154-8"
+ x="343.81976"
+ y="68.998184"
+ style="font-size:17.06669235px;fill:#ffffff;stroke-width:0.8000012">VF Application</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:17.06669235px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#008080;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="457.61066"
+ y="138.71524"
+ id="text5715-9"><tspan
+ sodipodi:role="line"
+ id="tspan5713-1"
+ x="457.61066"
+ y="138.71524"
+ style="font-size:17.06669235px;fill:#008080;stroke-width:0.8000012">DPDK</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:14.93335533px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="309.18256"
+ y="170.07077"
+ id="text5723-9"><tspan
+ sodipodi:role="line"
+ id="tspan5721-0"
+ x="309.18256"
+ y="170.07077"
+ style="font-size:14.93335533px;fill:#ffffff;stroke-width:0.8000012">Virtual ethdev</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.80001926px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="367.12158"
+ y="223.89334"
+ id="text5735-1"><tspan
+ sodipodi:role="line"
+ id="tspan5733-1"
+ x="367.12158"
+ y="223.89334"
+ style="font-size:12.80001926px;fill:#ffffff;stroke-width:0.8000012">VF driver</tspan></text>
+<g
+ style="font-size:medium;overflow:visible;color-interpolation-filters:sRGB;fill:none;fill-rule:evenodd;stroke-linecap:square;stroke-miterlimit:3"
+ transform="matrix(-1.1390578,0.0062451,-0.01163082,-1.009126,315.58916,668.0438)"
+ id="shape8-37-9-3"><title
+ id="title3506-4-4">Simple Double Arrow.14</title><path
+ style="fill:url(#linearGradient5917);stroke:#a6b6cd;stroke-width:0.75"
+ inkscape:connector-curvature="0"
+ id="path3508-6-2"
+ class="st13"
+ d="m 0,595.28 11.34,-4.49 v 2.24 h 58.8 v -2.24 l 11.33,4.49 -11.33,4.48 v -2.24 h -58.8 v 2.24 z" /></g><rect
+ x="28.261948"
+ y="40.65451"
+ width="185.48367"
+ height="52.464527"
+ class="st3"
+ id="rect3441-6-5"
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#4f87bb;fill-rule:evenodd;stroke:#40709c;stroke-width:0.59376031;stroke-linecap:square;stroke-miterlimit:3" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:url(#linearGradient6028);fill-rule:evenodd;stroke:#4f87bb;stroke-width:0.78190857;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3453-5-5"
+ class="st5"
+ height="142.00824"
+ width="213.26486"
+ y="121.66504"
+ x="14.025101" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#668bb3;fill-rule:evenodd;stroke:#547395;stroke-width:0.81434548;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3465-8-0"
+ class="st8"
+ height="107.19906"
+ width="191.24162"
+ y="148.28162"
+ x="20.598679" /><rect
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:#5b9bd5;fill-rule:evenodd;stroke:#c7c8c8;stroke-width:0.30626383;stroke-linecap:square;stroke-miterlimit:3"
+ id="rect3477-1-1"
+ class="st10"
+ height="37.991375"
+ width="99.433281"
+ y="201.17534"
+ x="63.19429" /><g
+ style="font-size:medium;color-interpolation-filters:sRGB;fill:none;fill-rule:evenodd;stroke-linecap:square;stroke-miterlimit:3"
+ transform="matrix(0.00129134,-1.4946882,0.98914737,0.00195132,-465.58182,198.66788)"
+ id="shape8-37-9-33"><title
+ id="title3506-4-3">Simple Double Arrow.14</title><path
+ style="fill:url(#linearGradient6030);stroke:#a6b6cd;stroke-width:0.75"
+ inkscape:connector-curvature="0"
+ id="path3508-6-1"
+ class="st13"
+ d="m 0,595.28 11.34,-4.49 v 2.24 h 58.8 v -2.24 l 11.33,4.49 -11.33,4.48 v -2.24 h -58.8 v 2.24 z" /></g><rect
+ style="font-size:medium;opacity:0.347;color-interpolation-filters:sRGB;fill:none;fill-opacity:0.91387556;fill-rule:evenodd;stroke:#0044ea;stroke-width:0.6845746;stroke-linecap:square;stroke-miterlimit:3;stroke-opacity:1"
+ id="rect4923-1-0"
+ width="228.54222"
+ height="267.54898"
+ x="5.9250998"
+ y="4.6037965" /><text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:19.20002937px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#0026e8;fill-opacity:0.83732054;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="184.63316"
+ y="25.393867"
+ id="text4927-7-3"><tspan
+ sodipodi:role="line"
+ id="tspan4925-3-7"
+ x="184.63316"
+ y="25.393867"
+ style="font-size:19.20002937px;fill:#0026e8;fill-opacity:0.83732054;stroke-width:0.8000012">Host</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:17.06669235px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="61.144913"
+ y="68.540657"
+ id="text5156-5-7"><tspan
+ sodipodi:role="line"
+ id="tspan5154-8-6"
+ x="61.144913"
+ y="68.540657"
+ style="font-size:17.06669235px;fill:#ffffff;stroke-width:0.8000012">PF Application</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:17.06669235px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#008080;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="174.93581"
+ y="138.25772"
+ id="text5715-9-7"><tspan
+ sodipodi:role="line"
+ id="tspan5713-1-4"
+ x="174.93581"
+ y="138.25772"
+ style="font-size:17.06669235px;fill:#008080;stroke-width:0.8000012">DPDK</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:14.93335533px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="26.507706"
+ y="169.61325"
+ id="text5723-9-4"><tspan
+ sodipodi:role="line"
+ id="tspan5721-0-5"
+ x="26.507706"
+ y="169.61325"
+ style="font-size:14.93335533px;fill:#ffffff;stroke-width:0.8000012">Ethdev</tspan></text>
+<text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:12.80001926px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;color-interpolation-filters:sRGB;fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:0.8000012;stroke-linecap:square;stroke-miterlimit:3"
+ x="84.446732"
+ y="223.43582"
+ id="text5735-1-0"><tspan
+ sodipodi:role="line"
+ id="tspan5733-1-1"
+ x="84.446732"
+ y="223.43582"
+ style="font-size:12.80001926px;fill:#ffffff;stroke-width:0.8000012">PF driver</tspan></text>
+</svg>
diff --git a/doc/guides/howto/img/virtio_user_as_exceptional_path.svg b/doc/guides/howto/img/virtio_user_as_exceptional_path.svg
new file mode 100644
index 00000000..b231b709
--- /dev/null
+++ b/doc/guides/howto/img/virtio_user_as_exceptional_path.svg
@@ -0,0 +1,207 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export virtio_user_as_exceptional_pathvsdx.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="5.77778in" height="3.88851in"
+ viewBox="0 0 416 279.973" xml:space="preserve" color-interpolation-filters="sRGB" class="st13">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#ffffff;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {stroke:#000000;stroke-dasharray:7,5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st5 {fill:#000000;font-family:Calibri;font-size:0.833336em}
+ .st6 {fill:none;stroke:none;stroke-width:0.25}
+ .st7 {fill:#000000;font-family:Calibri;font-size:1.00001em;font-style:italic}
+ .st8 {fill:#70ad47;stroke:#c7c8c8;stroke-width:0.25}
+ .st9 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.75}
+ .st10 {marker-end:url(#mrkr4-68);marker-start:url(#mrkr4-66);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st11 {fill:#000000;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st12 {fill:#d8d8d8;stroke:#c7c8c8;stroke-width:0.25}
+ .st13 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend4">
+ <path d="M 2 1 L 0 0 L 2 -1 L 2 1 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr4-66" class="st11" v:arrowType="4" v:arrowSize="2" v:setback="6.68" refX="6.68" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend4" transform="scale(3.52) "/>
+ </marker>
+ <marker id="mrkr4-68" class="st11" v:arrowType="4" v:arrowSize="2" v:setback="7.04" refX="-7.04" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend4" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <v:layer v:name="Connector" v:index="0"/>
+ <g id="shape23-1" v:mID="23" v:groupContext="shape" transform="translate(195.804,-74.9728)">
+ <title>Rectangle.23</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow23-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="113.473" width="216.6" height="166.5" class="st2"/>
+ </g>
+ <rect x="0" y="113.473" width="216.6" height="166.5" class="st3"/>
+ </g>
+ <g id="shape42-6" v:mID="42" v:groupContext="shape" v:layerMember="0" transform="translate(146.904,-277.473)">
+ <title>Dynamic connector.42</title>
+ <path d="M-9 279.97 L-9 540.97" class="st4"/>
+ </g>
+ <g id="shape45-9" v:mID="45" v:groupContext="shape" transform="translate(2.9044,-142.292)">
+ <title>Rectangle.45</title>
+ <desc>tap</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="264.132" width="68.1" height="31.6807"/>
+ <g id="shadow45-10" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="248.292" width="68.1" height="31.6807" class="st2"/>
+ </g>
+ <rect x="0" y="248.292" width="68.1" height="31.6807" class="st3"/>
+ <text x="27.35" y="267.13" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>tap</text> </g>
+ <g id="shape46-15" v:mID="46" v:groupContext="shape" transform="translate(2.9044,-43.2921)">
+ <title>Rectangle.46</title>
+ <desc>vhost ko</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="264.132" width="68.1" height="31.6807"/>
+ <g id="shadow46-16" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="248.292" width="68.1" height="31.6807" class="st2"/>
+ </g>
+ <rect x="0" y="248.292" width="68.1" height="31.6807" class="st3"/>
+ <text x="16.86" y="267.13" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>vhost ko </text> </g>
+ <g id="shape47-21" v:mID="47" v:groupContext="shape" transform="translate(18.9544,-257.223)">
+ <title>Sheet.47</title>
+ <desc>Kernel space</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="44.025" cy="268.723" width="88.05" height="22.5"/>
+ <rect x="0" y="257.473" width="88.05" height="22.5" class="st6"/>
+ <text x="13.44" y="272.32" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Kernel space</text> </g>
+ <g id="shape48-24" v:mID="48" v:groupContext="shape" transform="translate(148.854,-257.223)">
+ <title>Sheet.48</title>
+ <desc>User space</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="44.025" cy="268.723" width="88.05" height="22.5"/>
+ <rect x="0" y="257.473" width="88.05" height="22.5" class="st6"/>
+ <text x="17.7" y="272.32" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>User space</text> </g>
+ <g id="shape49-27" v:mID="49" v:groupContext="shape" transform="translate(218.904,-182.792)">
+ <title>Rectangle.49</title>
+ <desc>ETHDEV</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="83.25" cy="264.132" width="166.5" height="31.6807"/>
+ <g id="shadow49-28" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="248.292" width="166.5" height="31.6807" class="st2"/>
+ </g>
+ <rect x="0" y="248.292" width="166.5" height="31.6807" class="st3"/>
+ <text x="66.9" y="267.13" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>ETHDEV</text> </g>
+ <g id="shape50-33" v:mID="50" v:groupContext="shape" transform="translate(218.904,-142.292)">
+ <title>Rectangle.50</title>
+ <desc>virtio PMD</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="38.7911" cy="264.132" width="77.59" height="31.6807"/>
+ <g id="shadow50-34" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="248.292" width="77.5823" height="31.6807" class="st2"/>
+ </g>
+ <rect x="0" y="248.292" width="77.5823" height="31.6807" class="st3"/>
+ <text x="17.12" y="267.13" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>virtio PMD</text> </g>
+ <g id="shape51-39" v:mID="51" v:groupContext="shape" transform="translate(308.904,-142.292)">
+ <title>Rectangle.51</title>
+ <desc>other PMDs</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="38.7911" cy="264.132" width="77.59" height="31.6807"/>
+ <g id="shadow51-40" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="248.292" width="77.5823" height="31.6807" class="st2"/>
+ </g>
+ <rect x="0" y="248.292" width="77.5823" height="31.6807" class="st3"/>
+ <text x="14.6" y="267.13" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>other PMDs</text> </g>
+ <g id="shape52-45" v:mID="52" v:groupContext="shape" transform="translate(218.904,-86.3131)">
+ <title>Rectangle.52</title>
+ <desc>virtio-user</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="38.7911" cy="256.393" width="77.59" height="47.1597"/>
+ <g id="shadow52-46" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="232.813" width="77.5823" height="47.1597" class="st2"/>
+ </g>
+ <rect x="0" y="232.813" width="77.5823" height="47.1597" class="st8"/>
+ <text x="17.84" y="247.39" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>virtio-user<v:newlineChar/><v:newlineChar/></text> </g>
+ <g id="shape53-51" v:mID="53" v:groupContext="shape" transform="translate(223.404,-90.1829)">
+ <title>Rectangle.53</title>
+ <desc>vhost adapter</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="34.05" cy="268.183" width="68.1" height="23.5798"/>
+ <g id="shadow53-52" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="256.393" width="68.1" height="23.5798" class="st2"/>
+ </g>
+ <rect x="0" y="256.393" width="68.1" height="23.5798" class="st3"/>
+ <text x="5.82" y="271.18" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>vhost adapter</text> </g>
+ <g id="shape54-57" v:mID="54" v:groupContext="shape" v:layerMember="0" transform="translate(71.0044,-59.1325)">
+ <title>Dynamic connector</title>
+ <path d="M0 279.97 L63.9 279.97 A3 3 0 1 1 69.9 279.97 L186.69 279.97 L186.69 252.79" class="st9"/>
+ </g>
+ <g id="shape55-60" v:mID="55" v:groupContext="shape" v:layerMember="0" transform="translate(71.0044,-149.132)">
+ <title>Dynamic connector.55</title>
+ <path d="M6.68 270.97 L7.04 270.97 L63.9 270.97 A3 3 0 0 1 69.9 270.97 L140.86 270.97" class="st10"/>
+ </g>
+ <g id="shape56-69" v:mID="56" v:groupContext="shape" transform="translate(308.904,-5.2228)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow56-70" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="257.473" width="77.5823" height="22.5" rx="11.25" ry="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="257.473" width="77.5823" height="22.5" rx="11.25" ry="11.25" class="st12"/>
+ </g>
+ <g id="shape57-74" v:mID="57" v:groupContext="shape" transform="translate(318.204,-5.2228)">
+ <title>Sheet.57</title>
+ <desc>NIC</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="28.125" cy="268.723" width="56.25" height="22.5"/>
+ <rect x="0" y="257.473" width="56.25" height="22.5" class="st6"/>
+ <text x="19.61" y="272.32" class="st7" v:langID="2052"><v:paragraph v:horizAlign="1"/><v:tabList/>NIC</text> </g>
+ <g id="shape58-77" v:mID="58" v:groupContext="shape" v:layerMember="0" transform="translate(356.696,-142.292)">
+ <title>Dynamic connector.41</title>
+ <path d="M-9 286.65 L-9 287.01 L-9 387.5" class="st10"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/howto/img/virtio_user_for_container_networking.svg b/doc/guides/howto/img/virtio_user_for_container_networking.svg
new file mode 100644
index 00000000..de808066
--- /dev/null
+++ b/doc/guides/howto/img/virtio_user_for_container_networking.svg
@@ -0,0 +1,685 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ version="1.1"
+ id="svg2"
+ class="st16"
+ color-interpolation-filters="sRGB"
+ xml:space="preserve"
+ viewBox="0 0 469.4 294.5"
+ height="4.09028in"
+ width="6.51944in"><metadata
+ id="metadata220"><rdf:RDF><cc:Work
+ rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><v:documentProperties
+ v:viewMarkup="false"
+ v:langID="1033" /><style
+ id="style4"
+ type="text/css"><![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#ffffff;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#000000;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:#70ad47;stroke:#c7c8c8;stroke-width:0.25}
+ .st6 {font-size:1em}
+ .st7 {fill:#d8d8d8;stroke:#c7c8c8;stroke-width:0.25}
+ .st8 {fill:none;stroke:none;stroke-width:0.25}
+ .st9 {fill:#000000;font-family:Calibri;font-size:1.00001em;font-style:italic}
+ .st10 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st11 {fill:none;stroke:#c7c8c8;stroke-width:0.25}
+ .st12 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st13 {marker-end:url(#mrkr4-90);marker-start:url(#mrkr4-88);stroke:#000000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st14 {fill:#000000;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st15 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st16 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]></style><defs
+ id="Markers"><g
+ id="lend4"><path
+ id="path8"
+ style="stroke:none"
+ d="M 2 1 L 0 0 L 2 -1 L 2 1 " /></g><marker
+ overflow="visible"
+ markerUnits="strokeWidth"
+ orient="auto"
+ refX="6.68"
+ v:setback="6.68"
+ v:arrowSize="2"
+ v:arrowType="4"
+ class="st14"
+ id="mrkr4-88"><use
+ id="use11"
+ transform="scale(3.52) "
+ xlink:href="#lend4" /></marker><marker
+ overflow="visible"
+ markerUnits="strokeWidth"
+ orient="auto"
+ refX="-7.04"
+ v:setback="7.04"
+ v:arrowSize="2"
+ v:arrowType="4"
+ class="st14"
+ id="mrkr4-90"><use
+ id="use14"
+ transform="scale(-3.52,-3.52) "
+ xlink:href="#lend4" /></marker></defs><defs
+ id="Filters"><filter
+ id="filter_2"><feGaussianBlur
+ id="feGaussianBlur18"
+ stdDeviation="2" /></filter></defs><g
+ id="g20"
+ v:groupContext="foregroundPage"
+ v:index="1"
+ v:mID="0"><title
+ id="title22">Page-1</title><v:pageProperties
+ v:shadowOffsetY="-9"
+ v:shadowOffsetX="9"
+ v:drawingUnits="0"
+ v:pageScale="1"
+ v:drawingScale="1" /><v:layer
+ v:index="0"
+ v:name="Connector" /><g
+ transform="translate(20.9044,-72.7228)"
+ v:groupContext="shape"
+ v:mID="23"
+ id="shape23-1"><title
+ id="title25">Rectangle.23</title><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow23-2"><rect
+ id="rect28"
+ class="st2"
+ height="184.5"
+ width="216.6"
+ y="110"
+ x="0" /></g><rect
+ id="rect30"
+ class="st3"
+ height="184.5"
+ width="216.6"
+ y="110"
+ x="0" /></g><g
+ transform="translate(44.0044,-198.542)"
+ v:groupContext="shape"
+ v:mID="49"
+ id="shape49-6"><title
+ id="title33">Rectangle.49</title><desc
+ id="desc35">ETHDEV</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="31.6807"
+ width="166.5"
+ cy="278.66"
+ cx="83.25" /><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow49-7"><rect
+ id="rect38"
+ class="st2"
+ height="31.6807"
+ width="166.5"
+ y="262.819"
+ x="0" /></g><rect
+ id="rect40"
+ class="st3"
+ height="31.6807"
+ width="166.5"
+ y="262.819"
+ x="0" /><text
+ style="font-size:10.00003242px;font-family:Calibri;fill:#000000"
+ id="text42"
+ v:langID="1033"
+ class="st4"
+ y="281.66"
+ x="66.900002">ethdev<v:paragraph
+ v:horizAlign="1" /><v:tabList /></text>
+</g><g
+ transform="translate(44.0044,-158.042)"
+ v:groupContext="shape"
+ v:mID="50"
+ id="shape50-12"><title
+ id="title45">Rectangle.50</title><desc
+ id="desc47">virtio PMD</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="31.6807"
+ width="166.5"
+ cy="278.66"
+ cx="83.25" /><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow50-13"><rect
+ id="rect50"
+ class="st2"
+ height="31.6807"
+ width="166.5"
+ y="262.819"
+ x="0" /></g><rect
+ id="rect52"
+ class="st3"
+ height="31.6807"
+ width="166.5"
+ y="262.819"
+ x="0" /><text
+ id="text54"
+ v:langID="1033"
+ class="st4"
+ y="281.66"
+ x="61.58"><v:paragraph
+ v:horizAlign="1" /><v:tabList />virtio PMD</text>
+</g><g
+ transform="translate(128.904,-86.2228)"
+ v:groupContext="shape"
+ v:mID="52"
+ id="shape52-18"><title
+ id="title57">Rectangle.52</title><desc
+ id="desc59">virtio-user (virtual device)</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="63"
+ width="81.61"
+ cy="263"
+ cx="40.8" /><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow52-19"><rect
+ id="rect62"
+ class="st2"
+ height="63"
+ width="81.6"
+ y="231.5"
+ x="0" /></g><rect
+ id="rect64"
+ class="st5"
+ height="63"
+ width="81.6"
+ y="231.5"
+ x="0" /><text
+ id="text66"
+ v:langID="1033"
+ class="st4"
+ y="248"
+ x="19.85"><v:paragraph
+ v:horizAlign="1" /><v:tabList />virtio-user<v:newlineChar /><tspan
+ id="tspan68"
+ class="st6"
+ dy="1.2em"
+ x="10.52">(</tspan>virtual device)<v:newlineChar /><v:newlineChar /></text>
+</g><g
+ transform="translate(129.44522,-83.349651)"
+ v:groupContext="shape"
+ v:mID="53"
+ id="shape53-25"><title
+ id="title71">Rectangle.53</title><desc
+ id="desc73">vhost-user adapter</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="23.5798"
+ width="68.1"
+ cy="282.71"
+ cx="34.05" /><g
+ style="visibility:visible"
+ class="st1"
+ transform="translate(0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow53-26"><rect
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)"
+ id="rect76"
+ class="st2"
+ height="23.5798"
+ width="68.099998"
+ y="270.92001"
+ x="0" /></g><rect
+ style="fill:#ffffff;stroke:#c7c8c8;stroke-width:0.25"
+ id="rect78"
+ class="st3"
+ height="23.5798"
+ width="68.099998"
+ y="265.79211"
+ x="6.99261" /></g><g
+ transform="translate(366.563,-5.2228)"
+ v:groupContext="shape"
+ v:mID="56"
+ id="shape56-32"><title
+ id="title85">Rectangle.38</title><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow56-33"><rect
+ id="rect88"
+ class="st2"
+ ry="11.25"
+ rx="11.25"
+ height="22.5"
+ width="77.5823"
+ y="272"
+ x="0" /></g><rect
+ id="rect90"
+ class="st7"
+ ry="11.25"
+ rx="11.25"
+ height="22.5"
+ width="77.5823"
+ y="272"
+ x="0" /></g><g
+ transform="translate(380.904,-5.2228)"
+ v:groupContext="shape"
+ v:mID="57"
+ id="shape57-37"><title
+ id="title93">Sheet.57</title><desc
+ id="desc95">NIC</desc><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="22.5"
+ width="56.25"
+ cy="283.25"
+ cx="28.125" /><rect
+ id="rect97"
+ class="st8"
+ height="22.5"
+ width="56.25"
+ y="272"
+ x="0" /><text
+ id="text99"
+ v:langID="2052"
+ class="st9"
+ y="286.85"
+ x="19.61"><v:paragraph
+ v:horizAlign="1" /><v:tabList />NIC</text>
+</g><g
+ transform="translate(43.4044,-86.2228)"
+ v:groupContext="shape"
+ v:mID="59"
+ id="shape59-40"><title
+ id="title102">Rectangle.59</title><desc
+ id="desc104">virtio (PCI device)</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="63"
+ width="77.59"
+ cy="263"
+ cx="38.7911" /><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow59-41"><rect
+ id="rect107"
+ class="st2"
+ height="63"
+ width="77.5823"
+ y="231.5"
+ x="0" /></g><rect
+ id="rect109"
+ class="st3"
+ height="63"
+ width="77.5823"
+ y="231.5"
+ x="0" /><text
+ id="text111"
+ v:langID="1033"
+ class="st4"
+ y="260"
+ x="28.18"><v:paragraph
+ v:horizAlign="1" /><v:tabList />virtio<v:newlineChar /><tspan
+ id="tspan113"
+ class="st6"
+ dy="1.2em"
+ x="15">(</tspan>PCI device)</text>
+</g><g
+ transform="translate(344.904,-77.2228)"
+ v:groupContext="shape"
+ v:mID="60"
+ id="shape60-47"><title
+ id="title116">Rectangle.60</title><desc
+ id="desc118">vSwitch or vRouter</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="216"
+ width="120.9"
+ cy="186.5"
+ cx="60.45" /><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow60-48"><rect
+ id="rect121"
+ class="st2"
+ height="216"
+ width="120.9"
+ y="78.5"
+ x="0" /></g><rect
+ id="rect123"
+ class="st3"
+ height="216"
+ width="120.9"
+ y="78.5"
+ x="0" /><text
+ id="text125"
+ v:langID="1033"
+ class="st4"
+ y="177.5"
+ x="44.76"><v:paragraph
+ v:horizAlign="1" /><v:tabList />vSwitch<v:newlineChar /><tspan
+ id="tspan127"
+ class="st6"
+ dy="1.2em"
+ x="56.07">or<v:newlineChar /></tspan><tspan
+ id="tspan129"
+ class="st6"
+ dy="1.2em"
+ x="44.31">vRouter</tspan></text>
+</g><g
+ transform="translate(20.9044,-234.723)"
+ v:groupContext="shape"
+ v:mID="61"
+ id="shape61-55"><title
+ id="title132">Sheet.61</title><desc
+ id="desc134">DPDK</desc><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="22.5"
+ width="47.25"
+ cy="283.25"
+ cx="23.625" /><rect
+ id="rect136"
+ class="st8"
+ height="22.5"
+ width="47.25"
+ y="272"
+ x="0" /><text
+ id="text138"
+ v:langID="1033"
+ class="st9"
+ y="286.85"
+ x="4"><v:paragraph /><v:tabList />DPDK</text>
+</g><g
+ transform="translate(2.9044,-52.4728)"
+ v:groupContext="shape"
+ v:mID="62"
+ id="shape62-58"><title
+ id="title141">Rectangle.62</title><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><g
+ class="st1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow62-59"><rect
+ id="rect144"
+ class="st10"
+ height="240.75"
+ width="252"
+ y="53.75"
+ x="0" /></g><rect
+ id="rect146"
+ class="st11"
+ height="240.75"
+ width="252"
+ y="53.75"
+ x="0" /></g><g
+ transform="translate(2.9044,-261.723)"
+ v:groupContext="shape"
+ v:mID="63"
+ id="shape63-63"><title
+ id="title149">Sheet.63</title><desc
+ id="desc151">Contanier/App</desc><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="22.5"
+ width="90"
+ cy="283.25"
+ cx="45" /><rect
+ id="rect153"
+ class="st8"
+ height="22.5"
+ width="90"
+ y="272"
+ x="0" /><text
+ style="font-style:italic;font-size:12.00012016px;font-family:Calibri;fill:#000000"
+ id="text155"
+ v:langID="1033"
+ class="st9"
+ y="286.85001"
+ x="4"><v:paragraph /><v:tabList />Container/App</text>
+</g><g
+ transform="translate(535.904,70.4861) rotate(90)"
+ v:groupContext="shape"
+ v:mID="64"
+ id="shape64-66"><title
+ id="title158">Rectangle.64</title><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><g
+ class="st1"
+ transform="matrix(1,0,0,1,1.97279,-0.345598)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow64-67"><rect
+ id="rect161"
+ class="st2"
+ ry="13.5"
+ rx="13.5"
+ height="27"
+ width="77.5823"
+ y="267.5"
+ x="0" /></g><rect
+ id="rect163"
+ class="st7"
+ ry="13.5"
+ rx="13.5"
+ height="27"
+ width="77.5823"
+ y="267.5"
+ x="0" /></g><g
+ transform="translate(625.904,70.4861) rotate(90)"
+ v:groupContext="shape"
+ v:mID="65"
+ id="shape65-71"><title
+ id="title166">Rectangle.65</title><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><g
+ class="st1"
+ transform="matrix(1,0,0,1,1.97279,-0.345598)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow65-72"><rect
+ id="rect169"
+ class="st2"
+ ry="13.5"
+ rx="13.5"
+ height="27"
+ width="77.5823"
+ y="267.5"
+ x="0" /></g><rect
+ id="rect171"
+ class="st7"
+ ry="13.5"
+ rx="13.5"
+ height="27"
+ width="77.5823"
+ y="267.5"
+ x="0" /></g><g
+ transform="translate(538.154,81.1522) rotate(90)"
+ v:groupContext="shape"
+ v:mID="66"
+ id="shape66-76"><title
+ id="title174">Sheet.66</title><desc
+ id="desc176">virtio</desc><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="22.5"
+ width="56.25"
+ cy="283.25"
+ cx="28.125" /><rect
+ id="rect178"
+ class="st8"
+ height="22.5"
+ width="56.25"
+ y="272"
+ x="0" /><text
+ id="text180"
+ v:langID="1033"
+ class="st12"
+ y="286.85"
+ x="15.4"><v:paragraph
+ v:horizAlign="1" /><v:tabList />virtio</text>
+</g><g
+ transform="translate(628.154,81.1522) rotate(90)"
+ v:groupContext="shape"
+ v:mID="67"
+ id="shape67-79"><title
+ id="title183">Sheet.67</title><desc
+ id="desc185">vhost</desc><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="22.5"
+ width="56.25"
+ cy="283.25"
+ cx="28.125" /><rect
+ id="rect187"
+ class="st8"
+ height="22.5"
+ width="56.25"
+ y="272"
+ x="0" /><text
+ id="text189"
+ v:langID="1033"
+ class="st12"
+ y="286.85"
+ x="14.74"><v:paragraph
+ v:horizAlign="1" /><v:tabList />vhost</text>
+</g><g
+ transform="translate(268.404,-176.223)"
+ v:layerMember="0"
+ v:groupContext="shape"
+ v:mID="69"
+ id="shape69-82"><title
+ id="title192">Dynamic connector</title><path
+ id="path194"
+ class="st13"
+ d="M6.68 285.5 L7.04 285.5 L55.96 285.5" /></g><g
+ transform="translate(396.354,-77.2228)"
+ v:layerMember="0"
+ v:groupContext="shape"
+ v:mID="70"
+ id="shape70-91"><title
+ id="title197">Dynamic connector.70</title><path
+ id="path199"
+ class="st13"
+ d="M9 301.18 L9 301.54 L9 336.96" /></g><g
+ transform="translate(205.004,-92.4329)"
+ v:layerMember="0"
+ v:groupContext="shape"
+ v:mID="72"
+ id="shape72-104"><title
+ id="title214">Dynamic connector.72</title><path
+ id="path216"
+ class="st15"
+ d="M0 285.5 L101.11 285.5" /></g><g
+ transform="matrix(1.1344321,0,0,0.98698119,292.92681,-86.402944)"
+ v:groupContext="shape"
+ v:mID="71"
+ id="shape71-98"><title
+ id="title202">Rectangle.71</title><desc
+ id="desc204">unix socket file</desc><v:userDefs><v:ud
+ v:val="VT0(15):26"
+ v:nameU="visVersion" /></v:userDefs><v:textBlock
+ v:margins="rect(4,4,4,4)" /><v:textRect
+ height="23.5798"
+ width="77.59"
+ cy="282.71"
+ cx="38.7911" /><g
+ style="visibility:visible"
+ class="st1"
+ transform="translate(0.345598,1.97279)"
+ v:shadowType="1"
+ v:shadowOffsetY="-1.97279"
+ v:shadowOffsetX="0.345598"
+ v:groupContext="shadow"
+ id="shadow71-99"><rect
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)"
+ id="rect207"
+ class="st2"
+ height="23.5798"
+ width="77.582298"
+ y="270.92001"
+ x="0" /></g><rect
+ style="fill:#ffffff;stroke:#000000;stroke-width:0.25025026;stroke-opacity:1;stroke-miterlimit:3;stroke-dasharray:none"
+ id="rect209"
+ class="st3"
+ height="23.5798"
+ width="77.582298"
+ y="270.92001"
+ x="-0.41093162" /><text
+ transform="scale(0.86136004,1.1609547)"
+ style="font-size:10.19067955px;font-family:Calibri;fill:#000000"
+ id="text211"
+ v:langID="1033"
+ class="st4"
+ y="247.29736"
+ x="7.1378384"><v:paragraph
+ v:horizAlign="1" /><v:tabList />unix socket file</text>
+</g><text
+ id="text66-3"
+ v:langID="1033"
+ class="st4"
+ y="192.78035"
+ x="143.49364"
+ style="font-size:12px;line-height:125%;font-family:Calibri;fill:#000000"><v:paragraph
+ v:horizAlign="1" /><v:tabList /><tspan
+ id="tspan4385"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:10.00000572px;line-height:125%;font-family:Calibri;-inkscape-font-specification:'Calibri, Normal';text-align:start;writing-mode:lr-tb;text-anchor:start">vhost-user</tspan><v:newlineChar /><v:newlineChar /><v:newlineChar /></text>
+<text
+ id="text66-3-9"
+ v:langID="1033"
+ class="st4"
+ y="201.73016"
+ x="149.81844"
+ style="font-size:12px;line-height:125%;font-family:Calibri;fill:#000000"><v:paragraph
+ v:horizAlign="1" /><v:tabList /><tspan
+ id="tspan4385-1"
+ style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:10.00000572px;line-height:125%;font-family:Calibri;-inkscape-font-specification:'Calibri, Normal';text-align:start;writing-mode:lr-tb;text-anchor:start">adapter</tspan><v:newlineChar /><v:newlineChar /><v:newlineChar /></text>
+</g></svg> \ No newline at end of file
diff --git a/doc/guides/howto/index.rst b/doc/guides/howto/index.rst
index 5575b274..a483444d 100644
--- a/doc/guides/howto/index.rst
+++ b/doc/guides/howto/index.rst
@@ -38,3 +38,7 @@ HowTo Guides
lm_bond_virtio_sriov
lm_virtio_vhost_user
flow_bifurcation
+ pvp_reference_benchmark
+ vfd
+ virtio_user_for_container_networking
+ virtio_user_as_exceptional_path
diff --git a/doc/guides/howto/lm_bond_virtio_sriov.rst b/doc/guides/howto/lm_bond_virtio_sriov.rst
index fe9803e4..40dd2cb2 100644
--- a/doc/guides/howto/lm_bond_virtio_sriov.rst
+++ b/doc/guides/howto/lm_bond_virtio_sriov.rst
@@ -613,17 +613,17 @@ Set up DPDK in the Virtual Machine
cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
ifconfig -a
- /root/dpdk/tools/dpdk-devbind.py --status
+ /root/dpdk/usertools/dpdk-devbind.py --status
rmmod virtio-pci ixgbevf
modprobe uio
insmod /root/dpdk/x86_64-default-linuxapp-gcc/kmod/igb_uio.ko
- /root/dpdk/tools/dpdk-devbind.py -b igb_uio 0000:00:03.0
- /root/dpdk/tools/dpdk-devbind.py -b igb_uio 0000:00:04.0
+ /root/dpdk/usertools/dpdk-devbind.py -b igb_uio 0000:00:03.0
+ /root/dpdk/usertools/dpdk-devbind.py -b igb_uio 0000:00:04.0
- /root/dpdk/tools/dpdk-devbind.py --status
+ /root/dpdk/usertools/dpdk-devbind.py --status
run_testpmd_bonding_in_vm.sh
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -641,7 +641,7 @@ Run testpmd in the Virtual Machine.
# use for bonding of virtio and vf tests in VM
/root/dpdk/x86_64-default-linuxapp-gcc/app/testpmd \
- -c f -n 4 --socket-mem 350 -- --i --port-topology=chained
+ -l 0-3 -n 4 --socket-mem 350 -- --i --port-topology=chained
.. _lm_bond_virtio_sriov_switch_conf:
diff --git a/doc/guides/howto/lm_virtio_vhost_user.rst b/doc/guides/howto/lm_virtio_vhost_user.rst
index 49377818..9402ed8d 100644
--- a/doc/guides/howto/lm_virtio_vhost_user.rst
+++ b/doc/guides/howto/lm_virtio_vhost_user.rst
@@ -90,14 +90,14 @@ For Fortville NIC.
.. code-block:: console
- cd /root/dpdk/tools
+ cd /root/dpdk/usertools
./dpdk-devbind.py -b igb_uio 0000:02:00.0
For Niantic NIC.
.. code-block:: console
- cd /root/dpdk/tools
+ cd /root/dpdk/usertools
./dpdk-devbind.py -b igb_uio 0000:09:00.0
On host_server_1: Terminal 3
@@ -171,14 +171,14 @@ For Fortville NIC.
.. code-block:: console
- cd /root/dpdk/tools
+ cd /root/dpdk/usertools
./dpdk-devbind.py -b igb_uio 0000:03:00.0
For Niantic NIC.
.. code-block:: console
- cd /root/dpdk/tools
+ cd /root/dpdk/usertools
./dpdk-devbind.py -b igb_uio 0000:06:00.0
On host_server_2: Terminal 3
@@ -444,17 +444,17 @@ setup_dpdk_virtio_in_vm.sh
cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
ifconfig -a
- /root/dpdk/tools/dpdk-devbind.py --status
+ /root/dpdk/usertools/dpdk-devbind.py --status
rmmod virtio-pci
modprobe uio
insmod /root/dpdk/x86_64-default-linuxapp-gcc/kmod/igb_uio.ko
- /root/dpdk/tools/dpdk-devbind.py -b igb_uio 0000:00:03.0
- /root/dpdk/tools/dpdk-devbind.py -b igb_uio 0000:00:04.0
+ /root/dpdk/usertools/dpdk-devbind.py -b igb_uio 0000:00:03.0
+ /root/dpdk/usertools/dpdk-devbind.py -b igb_uio 0000:00:04.0
- /root/dpdk/tools/dpdk-devbind.py --status
+ /root/dpdk/usertools/dpdk-devbind.py --status
run_testpmd_in_vm.sh
~~~~~~~~~~~~~~~~~~~~
@@ -466,4 +466,4 @@ run_testpmd_in_vm.sh
# test system has 8 cpus (0-7), use cpus 2-7 for VM
/root/dpdk/x86_64-default-linuxapp-gcc/app/testpmd \
- -c 3f -n 4 --socket-mem 350 -- --burst=64 --i --disable-hw-vlan-filter
+ -l 0-5 -n 4 --socket-mem 350 -- --burst=64 --i --disable-hw-vlan-filter
diff --git a/doc/guides/howto/pvp_reference_benchmark.rst b/doc/guides/howto/pvp_reference_benchmark.rst
new file mode 100644
index 00000000..228b4a25
--- /dev/null
+++ b/doc/guides/howto/pvp_reference_benchmark.rst
@@ -0,0 +1,398 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Red Hat, Inc. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+PVP reference benchmark setup using testpmd
+===========================================
+
+This guide lists the steps required to setup a PVP benchmark using testpmd as
+a simple forwarder between NICs and Vhost interfaces. The goal of this setup
+is to have a reference PVP benchmark without using external vSwitches (OVS,
+VPP, ...) to make it easier to obtain reproducible results and to facilitate
+continuous integration testing.
+
+The guide covers two ways of launching the VM, either by directly calling the
+QEMU command line, or by relying on libvirt. It has been tested with DPDK
+v16.11 using RHEL7 for both host and guest.
+
+
+Setup overview
+--------------
+
+.. _figure_pvp_2nics:
+
+.. figure:: img/pvp_2nics.*
+
+ PVP setup using 2 NICs
+
+In this diagram, each red arrow represents one logical core. This use-case
+requires 6 dedicated logical cores. A forwarding configuration with a single
+NIC is also possible, requiring 3 logical cores.
+
+
+Host setup
+----------
+
+In this setup, we isolate 6 cores (from CPU2 to CPU7) on the same NUMA
+node. Two cores are assigned to the VM vCPUs running testpmd and four are
+assigned to testpmd on the host.
+
+
+Host tuning
+~~~~~~~~~~~
+
+#. On BIOS, disable turbo-boost and hyper-threads.
+
+#. Append these options to Kernel command line:
+
+ .. code-block:: console
+
+ intel_pstate=disable mce=ignore_ce default_hugepagesz=1G hugepagesz=1G hugepages=6 isolcpus=2-7 rcu_nocbs=2-7 nohz_full=2-7 iommu=pt intel_iommu=on
+
+#. Disable hyper-threads at runtime if necessary or if BIOS is not accessible:
+
+ .. code-block:: console
+
+ cat /sys/devices/system/cpu/cpu*[0-9]/topology/thread_siblings_list \
+ | sort | uniq \
+ | awk -F, '{system("echo 0 > /sys/devices/system/cpu/cpu"$2"/online")}'
+
+#. Disable NMIs:
+
+ .. code-block:: console
+
+ echo 0 > /proc/sys/kernel/nmi_watchdog
+
+#. Exclude isolated CPUs from the writeback cpumask:
+
+ .. code-block:: console
+
+ echo ffffff03 > /sys/bus/workqueue/devices/writeback/cpumask
+
+#. Isolate CPUs from IRQs:
+
+ .. code-block:: console
+
+ clear_mask=0xfc #Isolate CPU2 to CPU7 from IRQs
+ for i in /proc/irq/*/smp_affinity
+ do
+ echo "obase=16;$(( 0x$(cat $i) & ~$clear_mask ))" | bc > $i
+ done
+
+
+Qemu build
+~~~~~~~~~~
+
+Build Qemu:
+
+ .. code-block:: console
+
+ git clone git://git.qemu.org/qemu.git
+ cd qemu
+ mkdir bin
+ cd bin
+ ../configure --target-list=x86_64-softmmu
+ make
+
+
+DPDK build
+~~~~~~~~~~
+
+Build DPDK:
+
+ .. code-block:: console
+
+ git clone git://dpdk.org/dpdk
+ cd dpdk
+ export RTE_SDK=$PWD
+ make install T=x86_64-native-linuxapp-gcc DESTDIR=install
+
+
+Testpmd launch
+~~~~~~~~~~~~~~
+
+#. Assign NICs to DPDK:
+
+ .. code-block:: console
+
+ modprobe vfio-pci
+ $RTE_SDK/install/sbin/dpdk-devbind -b vfio-pci 0000:11:00.0 0000:11:00.1
+
+ .. Note::
+
+ The Sandy Bridge family seems to have some IOMMU limitations giving poor
+ performance results. To achieve good performance on these machines
+ consider using UIO instead.
+
+#. Launch the testpmd application:
+
+ .. code-block:: console
+
+ $RTE_SDK/install/bin/testpmd -l 0,2,3,4,5 --socket-mem=1024 -n 4 \
+ --vdev 'net_vhost0,iface=/tmp/vhost-user1' \
+ --vdev 'net_vhost1,iface=/tmp/vhost-user2' -- \
+ --portmask=f --disable-hw-vlan -i --rxq=1 --txq=1 \
+ --nb-cores=4 --forward-mode=io
+
+ With this command, isolated CPUs 2 to 5 will be used as lcores for PMD threads.
+
+#. In testpmd interactive mode, set the portlist to obtain the correct port
+ chaining:
+
+ .. code-block:: console
+
+ set portlist 0,2,1,3
+ start
+
+
+VM launch
+~~~~~~~~~
+
+The VM may be launched either by calling QEMU directly, or by using libvirt.
+
+Qemu way
+^^^^^^^^
+
+Launch QEMU with two Virtio-net devices paired to the vhost-user sockets
+created by testpmd. Below example uses default Virtio-net options, but options
+may be specified, for example to disable mergeable buffers or indirect
+descriptors.
+
+ .. code-block:: console
+
+ <QEMU path>/bin/x86_64-softmmu/qemu-system-x86_64 \
+ -enable-kvm -cpu host -m 3072 -smp 3 \
+ -chardev socket,id=char0,path=/tmp/vhost-user1 \
+ -netdev type=vhost-user,id=mynet1,chardev=char0,vhostforce \
+ -device virtio-net-pci,netdev=mynet1,mac=52:54:00:02:d9:01,addr=0x10 \
+ -chardev socket,id=char1,path=/tmp/vhost-user2 \
+ -netdev type=vhost-user,id=mynet2,chardev=char1,vhostforce \
+ -device virtio-net-pci,netdev=mynet2,mac=52:54:00:02:d9:02,addr=0x11 \
+ -object memory-backend-file,id=mem,size=3072M,mem-path=/dev/hugepages,share=on \
+ -numa node,memdev=mem -mem-prealloc \
+ -net user,hostfwd=tcp::1002$1-:22 -net nic \
+ -qmp unix:/tmp/qmp.socket,server,nowait \
+ -monitor stdio <vm_image>.qcow2
+
+You can use this `qmp-vcpu-pin <https://patchwork.kernel.org/patch/9361617/>`_
+script to pin vCPUs.
+
+It can be used as follows, for example to pin 3 vCPUs to CPUs 1, 6 and 7,
+where isolated CPUs 6 and 7 will be used as lcores for Virtio PMDs:
+
+ .. code-block:: console
+
+ export PYTHONPATH=$PYTHONPATH:<QEMU path>/scripts/qmp
+ ./qmp-vcpu-pin -s /tmp/qmp.socket 1 6 7
+
+Libvirt way
+^^^^^^^^^^^
+
+Some initial steps are required for libvirt to be able to connect to testpmd's
+sockets.
+
+First, SELinux policy needs to be set to permissive, since testpmd is
+generally run as root (note, as reboot is required):
+
+ .. code-block:: console
+
+ cat /etc/selinux/config
+
+ # This file controls the state of SELinux on the system.
+ # SELINUX= can take one of these three values:
+ # enforcing - SELinux security policy is enforced.
+ # permissive - SELinux prints warnings instead of enforcing.
+ # disabled - No SELinux policy is loaded.
+ SELINUX=permissive
+
+ # SELINUXTYPE= can take one of three two values:
+ # targeted - Targeted processes are protected,
+ # minimum - Modification of targeted policy.
+ # Only selected processes are protected.
+ # mls - Multi Level Security protection.
+ SELINUXTYPE=targeted
+
+
+Also, Qemu needs to be run as root, which has to be specified in
+``/etc/libvirt/qemu.conf``:
+
+ .. code-block:: console
+
+ user = "root"
+
+Once the domain created, the following snippet is an extract of he most
+important information (hugepages, vCPU pinning, Virtio PCI devices):
+
+ .. code-block:: xml
+
+ <domain type='kvm'>
+ <memory unit='KiB'>3145728</memory>
+ <currentMemory unit='KiB'>3145728</currentMemory>
+ <memoryBacking>
+ <hugepages>
+ <page size='1048576' unit='KiB' nodeset='0'/>
+ </hugepages>
+ <locked/>
+ </memoryBacking>
+ <vcpu placement='static'>3</vcpu>
+ <cputune>
+ <vcpupin vcpu='0' cpuset='1'/>
+ <vcpupin vcpu='1' cpuset='6'/>
+ <vcpupin vcpu='2' cpuset='7'/>
+ <emulatorpin cpuset='0'/>
+ </cputune>
+ <numatune>
+ <memory mode='strict' nodeset='0'/>
+ </numatune>
+ <os>
+ <type arch='x86_64' machine='pc-i440fx-rhel7.0.0'>hvm</type>
+ <boot dev='hd'/>
+ </os>
+ <cpu mode='host-passthrough'>
+ <topology sockets='1' cores='3' threads='1'/>
+ <numa>
+ <cell id='0' cpus='0-2' memory='3145728' unit='KiB' memAccess='shared'/>
+ </numa>
+ </cpu>
+ <devices>
+ <interface type='vhostuser'>
+ <mac address='56:48:4f:53:54:01'/>
+ <source type='unix' path='/tmp/vhost-user1' mode='client'/>
+ <model type='virtio'/>
+ <driver name='vhost' rx_queue_size='256' />
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x10' function='0x0'/>
+ </interface>
+ <interface type='vhostuser'>
+ <mac address='56:48:4f:53:54:02'/>
+ <source type='unix' path='/tmp/vhost-user2' mode='client'/>
+ <model type='virtio'/>
+ <driver name='vhost' rx_queue_size='256' />
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x11' function='0x0'/>
+ </interface>
+ </devices>
+ </domain>
+
+
+Guest setup
+-----------
+
+
+Guest tuning
+~~~~~~~~~~~~
+
+#. Append these options to the Kernel command line:
+
+ .. code-block:: console
+
+ default_hugepagesz=1G hugepagesz=1G hugepages=1 intel_iommu=on iommu=pt isolcpus=1,2 rcu_nocbs=1,2 nohz_full=1,2
+
+#. Disable NMIs:
+
+ .. code-block:: console
+
+ echo 0 > /proc/sys/kernel/nmi_watchdog
+
+#. Exclude isolated CPU1 and CPU2 from the writeback cpumask:
+
+ .. code-block:: console
+
+ echo 1 > /sys/bus/workqueue/devices/writeback/cpumask
+
+#. Isolate CPUs from IRQs:
+
+ .. code-block:: console
+
+ clear_mask=0x6 #Isolate CPU1 and CPU2 from IRQs
+ for i in /proc/irq/*/smp_affinity
+ do
+ echo "obase=16;$(( 0x$(cat $i) & ~$clear_mask ))" | bc > $i
+ done
+
+
+DPDK build
+~~~~~~~~~~
+
+Build DPDK:
+
+ .. code-block:: console
+
+ git clone git://dpdk.org/dpdk
+ cd dpdk
+ export RTE_SDK=$PWD
+ make install T=x86_64-native-linuxapp-gcc DESTDIR=install
+
+
+Testpmd launch
+~~~~~~~~~~~~~~
+
+Probe vfio module without iommu:
+
+ .. code-block:: console
+
+ modprobe -r vfio_iommu_type1
+ modprobe -r vfio
+ modprobe vfio enable_unsafe_noiommu_mode=1
+ cat /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+ modprobe vfio-pci
+
+Bind the virtio-net devices to DPDK:
+
+ .. code-block:: console
+
+ $RTE_SDK/usertools/dpdk-devbind.py -b vfio-pci 0000:00:10.0 0000:00:11.0
+
+Start testpmd:
+
+ .. code-block:: console
+
+ $RTE_SDK/install/bin/testpmd -l 0,1,2 --socket-mem 1024 -n 4 \
+ --proc-type auto --file-prefix pg -- \
+ --portmask=3 --forward-mode=macswap --port-topology=chained \
+ --disable-hw-vlan --disable-rss -i --rxq=1 --txq=1 \
+ --rxd=256 --txd=256 --nb-cores=2 --auto-start
+
+Results template
+----------------
+
+Below template should be used when sharing results:
+
+ .. code-block:: none
+
+ Traffic Generator: <Test equipment (e.g. IXIA, Moongen, ...)>
+ Acceptable Loss: <n>%
+ Validation run time: <n>min
+ Host DPDK version/commit: <version, SHA-1>
+ Guest DPDK version/commit: <version, SHA-1>
+ Patches applied: <link to patchwork>
+ QEMU version/commit: <version>
+ Virtio features: <features (e.g. mrg_rxbuf='off', leave empty if default)>
+ CPU: <CPU model>, <CPU frequency>
+ NIC: <NIC model>
+ Result: <n> Mpps
diff --git a/doc/guides/howto/vfd.rst b/doc/guides/howto/vfd.rst
new file mode 100644
index 00000000..6f083b87
--- /dev/null
+++ b/doc/guides/howto/vfd.rst
@@ -0,0 +1,407 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+VF daemon (VFd)
+===============
+
+VFd (the VF daemon) is a mechanism which can be used to configure features on
+a VF (SR-IOV Virtual Function) without direct access to the PF (SR-IOV
+Physical Function). VFd is an *EXPERIMENTAL* feature which can only be used in
+the scenario of DPDK PF with a DPDK VF. If the PF port is driven by the Linux
+kernel driver then the VFd feature will not work. Currently VFd is only
+supported by the ixgbe and i40e drivers.
+
+In general VF features cannot be configured directly by an end user
+application since they are under the control of the PF. The normal approach to
+configuring a feature on a VF is that an application would call the APIs
+provided by the VF driver. If the required feature cannot be configured by the
+VF directly (the most common case) the VF sends a message to the PF through
+the mailbox on ixgbe and i40e. This means that the availability of the feature
+depends on whether the appropriate mailbox messages are defined.
+
+DPDK leverages the mailbox interface defined by the Linux kernel driver so
+that compatibility with the kernel driver can be guaranteed. The downside of
+this approach is that the availability of messages supported by the kernel
+become a limitation when the user wants to configure features on the VF.
+
+VFd is a new method of controlling the features on a VF. The VF driver doesn't
+talk directly to the PF driver when configuring a feature on the VF. When a VF
+application (i.e., an application using the VF ports) wants to enable a VF
+feature, it can send a message to the PF application (i.e., the application
+using the PF port, which can be the same as the VF application). The PF
+application will configure the feature for the VF. Obviously, the PF
+application can also configure the VF features without a request from the VF
+application.
+
+.. _VF_daemon_overview:
+
+.. figure:: img/vf_daemon_overview.*
+
+ VF daemon (VFd) Overview
+
+Compared with the traditional approach the VFd moves the negotiation between
+VF and PF from the driver level to application level. So the application
+should define how the negotiation between the VF and PF works, or even if the
+control should be limited to the PF.
+
+It is the application's responsibility to use VFd. Consider for example a KVM
+migration, the VF application may transfer from one VM to another. It is
+recommended in this case that the PF control the VF features without
+participation from the VF. Then the VF application has no capability to
+configure the features. So the user doesn't need to define the interface
+between the VF application and the PF application. The service provider should
+take the control of all the features.
+
+The following sections describe the VFd functionality.
+
+.. Note::
+
+ Although VFd is supported by both ixgbe and i40e, please be aware that
+ since the hardware capability is different, the functions supported by
+ ixgbe and i40e are not the same.
+
+
+Preparing
+---------
+
+VFd only can be used in the scenario of DPDK PF + DPDK VF. Users should bind
+the PF port to ``igb_uio``, then create the VFs based on the DPDK PF host.
+
+The typical procedure to achieve this is as follows:
+
+#. Boot the system without iommu, or with ``iommu=pt``.
+
+#. Bind the PF port to ``igb_uio``, for example::
+
+ dpdk-devbind.py -b igb_uio 01:00.0
+
+#. Create a Virtual Function::
+
+ echo 1 > /sys/bus/pci/devices/0000:01:00.0/max_vfs
+
+#. Start a VM with the new VF port bypassed to it.
+
+#. Run a DPDK application on the PF in the host::
+
+ testpmd -l 0-7 -n 4 -- -i --txqflags=0
+
+#. Bind the VF port to ``igb_uio`` in the VM::
+
+ dpdk-devbind.py -b igb_uio 03:00.0
+
+#. Run a DPDK application on the VF in the VM::
+
+ testpmd -l 0-7 -n 4 -- -i --txqflags=0
+
+
+Common functions of IXGBE and I40E
+----------------------------------
+
+The following sections show how to enable PF/VF functionality based on the
+above testpmd setup.
+
+
+TX loopback
+~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set TX loopback::
+
+ set tx loopback 0 on|off
+
+This sets whether the PF port and all the VF ports that belong to it are
+allowed to send the packets to other virtual ports.
+
+Although it is a VFd function, it is the global setting for the whole
+physical port. When using this function, the PF and all the VFs TX loopback
+will be enabled/disabled.
+
+
+VF MAC address setting
+~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the MAC address for a VF port::
+
+ set vf mac addr 0 0 A0:36:9F:7B:C3:51
+
+This testpmd runtime command will change the MAC address of the VF port to
+this new address. If any other addresses are set before, they will be
+overwritten.
+
+
+VF MAC anti-spoofing
+~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable the MAC
+anti-spoofing for a VF port::
+
+ set vf mac antispoof 0 0 on|off
+
+When enabling the MAC anti-spoofing, the port will not forward packets whose
+source MAC address is not the same as the port.
+
+
+VF VLAN anti-spoofing
+~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable the VLAN
+anti-spoofing for a VF port::
+
+ set vf vlan antispoof 0 0 on|off
+
+When enabling the VLAN anti-spoofing, the port will not send packets whose
+VLAN ID does not belong to VLAN IDs that this port can receive.
+
+
+VF VLAN insertion
+~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the VLAN insertion for a VF
+port::
+
+ set vf vlan insert 0 0 1
+
+When using this testpmd runtime command, an assigned VLAN ID can be inserted
+to the transmitted packets by the hardware.
+
+The assigned VLAN ID can be 0. It means disabling the VLAN insertion.
+
+
+VF VLAN stripping
+~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable the VLAN stripping
+for a VF port::
+
+ set vf vlan stripq 0 0 on|off
+
+This testpmd runtime command is used to enable/disable the RX VLAN stripping
+for a specific VF port.
+
+
+VF VLAN filtering
+~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the VLAN filtering for a VF
+port::
+
+ rx_vlan add 1 port 0 vf 1
+ rx_vlan rm 1 port 0 vf 1
+
+These two testpmd runtime commands can be used to add or remove the VLAN
+filter for several VF ports. When the VLAN filters are added only the packets
+that have the assigned VLAN IDs can be received. Other packets will be dropped
+by hardware.
+
+
+The IXGBE specific VFd functions
+--------------------------------
+
+The functions in this section are specific to the ixgbe driver.
+
+
+All queues drop
+~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable the all queues
+drop::
+
+ set all queues drop on|off
+
+This is a global setting for the PF and all the VF ports of the physical port.
+
+Enabling the ``all queues drop`` feature means that when there is no available
+descriptor for the received packets they are dropped. The ``all queues drop``
+feature should be enabled in SR-IOV mode to avoid one queue blocking others.
+
+
+VF packet drop
+~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable the packet drop for
+a specific VF::
+
+ set vf split drop 0 0 on|off
+
+This is a similar function as ``all queues drop``. The difference is that this
+function is per VF setting and the previous function is a global setting.
+
+
+VF rate limit
+~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to all queues' rate limit for a
+specific VF::
+
+ set port 0 vf 0 rate 10 queue_mask 1
+
+This is a function to set the rate limit for all the queues in the
+``queue_mask`` bitmap. It is not used to set the summary of the rate
+limit. The rate limit of every queue will be set equally to the assigned rate
+limit.
+
+
+VF RX enabling
+~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable packet receiving for
+a specific VF::
+
+ set port 0 vf 0 rx on|off
+
+This function can be used to stop/start packet receiving on a VF.
+
+
+VF TX enabling
+~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable packet transmitting
+for a specific VF::
+
+ set port 0 vf 0 tx on|off
+
+This function can be used to stop/start packet transmitting on a VF.
+
+
+VF RX mode setting
+~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the RX mode for a specific VF::
+
+ set port 0 vf 0 rxmode AUPE|ROPE|BAM|MPE on|off
+
+This function can be used to enable/disable some RX modes on the VF, including:
+
+* If it accept untagged packets.
+* If it accepts packets matching the MAC filters.
+* If it accept MAC broadcast packets,
+* If it enables MAC multicast promiscuous mode.
+
+
+The I40E specific VFd functions
+-------------------------------
+
+The functions in this section are specific to the i40e driver.
+
+
+VF statistics
+~~~~~~~~~~~~~
+
+This provides an API to get the a specific VF's statistic from PF.
+
+
+VF statistics resetting
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This provides an API to rest the a specific VF's statistic from PF.
+
+
+VF link status change notification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This provide an API to let a specific VF know if the physical link status
+changed.
+
+Normally if a VF received this notification, the driver should notify the
+application to reset the VF port.
+
+
+VF MAC broadcast setting
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable MAC broadcast packet
+receiving for a specific VF::
+
+ set vf broadcast 0 0 on|off
+
+
+VF MAC multicast promiscuous mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable MAC multicast
+promiscuous mode for a specific VF::
+
+ set vf allmulti 0 0 on|off
+
+
+VF MAC unicast promiscuous mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable MAC unicast
+promiscuous mode for a specific VF::
+
+ set vf promisc 0 0 on|off
+
+
+VF max bandwidth
+~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the TX maximum bandwidth for a
+specific VF::
+
+ set vf tx max-bandwidth 0 0 2000
+
+The maximum bandwidth is an absolute value in Mbps.
+
+
+VF TC bandwidth allocation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the TCs (traffic class) TX
+bandwidth allocation for a specific VF::
+
+ set vf tc tx min-bandwidth 0 0 (20,20,20,40)
+
+The allocated bandwidth should be set for all the TCs. The allocated bandwidth
+is a relative value as a percentage. The sum of all the bandwidth should
+be 100.
+
+
+VF TC max bandwidth
+~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to set the TCs TX maximum bandwidth
+for a specific VF::
+
+ set vf tc tx max-bandwidth 0 0 0 10000
+
+The maximum bandwidth is an absolute value in Mbps.
+
+
+TC strict priority scheduling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Run a testpmd runtime command on the PF to enable/disable several TCs TX
+strict priority scheduling::
+
+ set tx strict-link-priority 0 0x3
+
+The 0 in the TC bitmap means disabling the strict priority scheduling for this
+TC. To enable use a value of 1.
diff --git a/doc/guides/howto/virtio_user_as_exceptional_path.rst b/doc/guides/howto/virtio_user_as_exceptional_path.rst
new file mode 100644
index 00000000..0bbcd3fd
--- /dev/null
+++ b/doc/guides/howto/virtio_user_as_exceptional_path.rst
@@ -0,0 +1,142 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _virtio_user_as_excpetional_path:
+
+Virtio_user as Exceptional Path
+===============================
+
+The virtual device, virtio-user, was originally introduced with vhost-user
+backend, as a high performance solution for IPC (Inter-Process Communication)
+and user space container networking.
+
+Virtio_user with vhost-kernel backend is a solution for exceptional path,
+such as KNI which exchanges packets with kernel networking stack. This
+solution is very promising in:
+
+* Maintenance
+
+ All kernel modules needed by this solution, vhost and vhost-net (kernel),
+ are upstreamed and extensively used kernel module.
+
+* Features
+
+ vhost-net is born to be a networking solution, which has lots of networking
+ related featuers, like multi queue, tso, multi-seg mbuf, etc.
+
+* Performance
+
+ similar to KNI, this solution would use one or more kthreads to
+ send/receive packets from user space DPDK applications, which has little
+ impact on user space polling thread (except that it might enter into kernel
+ space to wake up those kthreads if necessary).
+
+The overview of an application using virtio-user as exceptional path is shown
+in :numref:`figure_virtio_user_as_exceptional_path`.
+
+.. _figure_virtio_user_as_exceptional_path:
+
+.. figure:: img/virtio_user_as_exceptional_path.*
+
+ Overview of a DPDK app using virtio-user as excpetional path
+
+
+Sample Usage
+------------
+
+As a prerequisite, the vhost/vhost-net kernel CONFIG should be chosen before
+compiling the kernel and those kernel modules should be inserted.
+
+#. Compile DPDK and bind a physical NIC to igb_uio/uio_pci_generic/vfio-pci.
+
+ This physical NIC is for communicating with outside.
+
+#. Run testpmd.
+
+ .. code-block:: console
+
+ $(testpmd) -l 2-3 -n 4 \
+ --vdev=virtio_user0,path=/dev/vhost-net,queue_size=1024 \
+ -- -i --txqflags=0x0 --disable-hw-vlan --enable-lro \
+ --enable-rx-cksum --rxd=1024 --txd=1024
+
+ This command runs testpmd with two ports, one physical NIC to communicate
+ with outside, and one virtio-user to communicate with kernel.
+
+* ``--enable-lro``
+
+ This is used to negotiate VIRTIO_NET_F_GUEST_TSO4 and
+ VIRTIO_NET_F_GUEST_TSO6 feature so that large packets from kernel can be
+ transmitted DPDK application and further TSOed by physical NIC.
+
+* ``--enable-rx-cksum``
+
+ This is used to negotiate VIRTIO_NET_F_GUEST_CSUM so that packets from
+ kernel can be deemed as valid Rx checksumed.
+
+* ``queue_size``
+
+ 256 by default. To avoid shortage of descriptors, we can increase it to 1024.
+
+* ``queues``
+
+ Number of multi-queues. Each qeueue will be served by a kthread. For example:
+
+ .. code-block:: console
+
+ $(testpmd) -l 2-3 -n 4 \
+ --vdev=virtio_user0,path=/dev/vhost-net,queues=2,queue_size=1024 \
+ -- -i --txqflags=0x0 --disable-hw-vlan --enable-lro \
+ --enable-rx-cksum --txq=2 --rxq=2 --rxd=1024 \
+ --txd=1024
+
+#. Start testpmd:
+
+ .. code-block:: console
+
+ (testpmd) start
+
+#. Configure IP address and start tap:
+
+ .. code-block:: console
+
+ ifconfig tap0 1.1.1.1/24 up
+
+.. note::
+
+ The tap device will be named tap0, tap1, etc, by kernel.
+
+Then, all traffic from physical NIC can be forwarded into kernel stack, and all
+traffic on the tap0 can be sent out from physical NIC.
+
+Limitations
+-----------
+
+This solution is only available on Linux systems.
diff --git a/doc/guides/howto/virtio_user_for_container_networking.rst b/doc/guides/howto/virtio_user_for_container_networking.rst
new file mode 100644
index 00000000..f71d0718
--- /dev/null
+++ b/doc/guides/howto/virtio_user_for_container_networking.rst
@@ -0,0 +1,144 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _virtio_user_for_container_networking:
+
+Virtio_user for Container Networking
+====================================
+
+Container becomes more and more popular for strengths, like low overhead, fast
+boot-up time, and easy to deploy, etc. How to use DPDK to accelerate container
+networking becomes a common question for users. There are two use models of
+running DPDK inside containers, as shown in
+:numref:`figure_use_models_for_running_dpdk_in_containers`.
+
+.. _figure_use_models_for_running_dpdk_in_containers:
+
+.. figure:: img/use_models_for_running_dpdk_in_containers.*
+
+ Use models of running DPDK inside container
+
+This page will only cover aggregation model.
+
+Overview
+--------
+
+The virtual device, virtio-user, with unmodified vhost-user backend, is designed
+for high performance user space container networking or inter-process
+communication (IPC).
+
+The overview of accelerating container networking by virtio-user is shown
+in :numref:`figure_virtio_user_for_container_networking`.
+
+.. _figure_virtio_user_for_container_networking:
+
+.. figure:: img/virtio_user_for_container_networking.*
+
+ Overview of accelerating container networking by virtio-user
+
+Different virtio PCI devices we usually use as a para-virtualization I/O in the
+context of QEMU/VM, the basic idea here is to present a kind of virtual devices,
+which can be attached and initialized by DPDK. The device emulation layer by
+QEMU in VM's context is saved by just registering a new kind of virtual device
+in DPDK's ether layer. And to minimize the change, we reuse already-existing
+virtio PMD code (driver/net/virtio/).
+
+Virtio, in essence, is a shm-based solution to transmit/receive packets. How is
+memory shared? In VM's case, qemu always shares the whole physical layout of VM
+to vhost backend. But it's not feasible for a container, as a process, to share
+all virtual memory regions to backend. So only those virtual memory regions
+(aka, hugepages initialized in DPDK) are sent to backend. It restricts that only
+addresses in these areas can be used to transmit or receive packets.
+
+Sample Usage
+------------
+
+Here we use Docker as container engine. It also applies to LXC, Rocket with
+some minor changes.
+
+#. Compile DPDK.
+
+ .. code-block:: console
+
+ make install RTE_SDK=`pwd` T=x86_64-native-linuxapp-gcc
+
+#. Write a Dockerfile like below.
+
+ .. code-block:: console
+
+ cat <<EOT >> Dockerfile
+ FROM ubuntu:latest
+ WORKDIR /usr/src/dpdk
+ COPY . /usr/src/dpdk
+ ENV PATH "$PATH:/usr/src/dpdk/x86_64-native-linuxapp-gcc/app/"
+ EOT
+
+#. Build a Docker image.
+
+ .. code-block:: console
+
+ docker build -t dpdk-app-testpmd .
+
+#. Start a testpmd on the host with a vhost-user port.
+
+ .. code-block:: console
+
+ $(testpmd) -l 0-1 -n 4 --socket-mem 1024,1024 \
+ --vdev 'eth_vhost0,iface=/tmp/sock0' \
+ --file-prefix=host --no-pci -- -i
+
+#. Start a container instance with a virtio-user port.
+
+ .. code-block:: console
+
+ docker run -i -t -v /tmp/sock0:/var/run/usvhost \
+ -v /dev/hugepages:/dev/hugepages \
+ dpdk-app-testpmd testpmd -l 6-7 -n 4 -m 1024 --no-pci \
+ --vdev=virtio_user0,path=/var/run/usvhost \
+ --file-prefix=container \
+ -- -i --txqflags=0xf00 --disable-hw-vlan
+
+Note: If we run all above setup on the host, it's a shm-based IPC.
+
+Limitations
+-----------
+
+We have below limitations in this solution:
+ * Cannot work with --huge-unlink option. As we need to reopen the hugepage
+ file to share with vhost backend.
+ * Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping
+ under this option which cannot be reopened to share with vhost backend.
+ * Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
+ In another word, do not use 2MB hugepage so far.
+ * Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That
+ will bring confusion when sharing hugepage files with backend by name.
+ * Root privilege is a must. DPDK resolves physical addresses of hugepages
+ which seems not necessary, and some discussions are going on to remove this
+ restriction.
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 82b00e97..63716b09 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -43,6 +43,7 @@ DPDK documentation
testpmd_app_ug/index
nics/index
cryptodevs/index
+ eventdevs/index
xen/index
contributing/index
rel_notes/index
diff --git a/doc/guides/linux_gsg/build_dpdk.rst b/doc/guides/linux_gsg/build_dpdk.rst
index 527c38dc..cf6c06d6 100644
--- a/doc/guides/linux_gsg/build_dpdk.rst
+++ b/doc/guides/linux_gsg/build_dpdk.rst
@@ -45,8 +45,8 @@ First, uncompress the archive and move to the uncompressed DPDK source directory
.. code-block:: console
- unzip DPDK-<version>.zip
- cd DPDK-<version>
+ tar xJf dpdk-<version>.tar.xz
+ cd dpdk-<version>
The DPDK is composed of several directories:
@@ -58,7 +58,7 @@ The DPDK is composed of several directories:
* examples: Source code of DPDK application examples
-* config, tools, scripts, mk: Framework-related makefiles, scripts and configuration
+* config, buildtools, mk: Framework-related makefiles, scripts and configuration
Installation of DPDK Target Environments
----------------------------------------
@@ -155,6 +155,10 @@ can provide the uio capability. This module can be loaded using the command
sudo modprobe uio_pci_generic
+.. note::
+
+ ``uio_pci_generic`` module doesn't support the creation of virtual functions.
+
As an alternative to the ``uio_pci_generic``, the DPDK also includes the igb_uio
module which can be found in the kmod subdirectory referred to above. It can
be loaded as shown below:
@@ -187,8 +191,12 @@ however please consult your distributions documentation to make sure that is the
Also, to use VFIO, both kernel and BIOS must support and be configured to use IO virtualization (such as Intel® VT-d).
+.. note::
+
+ ``vfio-pci`` module doesn't support the creation of virtual functions.
+
For proper operation of VFIO when running DPDK applications as a non-privileged user, correct permissions should also be set up.
-This can be done by using the DPDK setup script (called dpdk-setup.sh and located in the tools directory).
+This can be done by using the DPDK setup script (called dpdk-setup.sh and located in the usertools directory).
.. _linux_gsg_binding_kernel:
@@ -208,7 +216,7 @@ Any network ports under Linux* control will be ignored by the DPDK poll-mode dri
To bind ports to the ``uio_pci_generic``, ``igb_uio`` or ``vfio-pci`` module for DPDK use,
and then subsequently return ports to Linux* control,
-a utility script called dpdk_nic _bind.py is provided in the tools subdirectory.
+a utility script called dpdk-devbind.py is provided in the usertools subdirectory.
This utility can be used to provide a view of the current state of the network ports on the system,
and to bind and unbind those ports from the different kernel modules, including the uio and vfio modules.
The following are some examples of how the script can be used.
@@ -235,7 +243,7 @@ To see the status of all network ports on the system:
.. code-block:: console
- ./tools/dpdk-devbind.py --status
+ ./usertools/dpdk-devbind.py --status
Network devices using DPDK-compatible driver
============================================
@@ -257,16 +265,16 @@ To bind device ``eth1``,``04:00.1``, to the ``uio_pci_generic`` driver:
.. code-block:: console
- ./tools/dpdk-devbind.py --bind=uio_pci_generic 04:00.1
+ ./usertools/dpdk-devbind.py --bind=uio_pci_generic 04:00.1
or, alternatively,
.. code-block:: console
- ./tools/dpdk-devbind.py --bind=uio_pci_generic eth1
+ ./usertools/dpdk-devbind.py --bind=uio_pci_generic eth1
To restore device ``82:00.0`` to its original kernel binding:
.. code-block:: console
- ./tools/dpdk-devbind.py --bind=ixgbe 82:00.0
+ ./usertools/dpdk-devbind.py --bind=ixgbe 82:00.0
diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst
index e53bd517..12fefffd 100644
--- a/doc/guides/linux_gsg/build_sample_apps.rst
+++ b/doc/guides/linux_gsg/build_sample_apps.rst
@@ -112,15 +112,16 @@ The following is the list of options that can be given to the EAL:
.. code-block:: console
- ./rte-app -c COREMASK [-n NUM] [-b <domain:bus:devid.func>] \
- [--socket-mem=MB,...] [-m MB] [-r NUM] [-v] [--file-prefix] \
+ ./rte-app [-c COREMASK | -l CORELIST] [-n NUM] [-b <domain:bus:devid.func>] \
+ [--socket-mem=MB,...] [-d LIB.so|DIR] [-m MB] [-r NUM] [-v] [--file-prefix] \
[--proc-type <primary|secondary|auto>] [-- xen-dom0]
The EAL options are as follows:
-* ``-c COREMASK``:
+* ``-c COREMASK`` or ``-l CORELIST``:
An hexadecimal bit mask of the cores to run on. Note that core numbering can
- change between platforms and should be determined beforehand.
+ change between platforms and should be determined beforehand. The corelist is
+ a set of core numbers instead of a bitmap core mask.
* ``-n NUM``:
Number of memory channels per processor socket.
@@ -136,6 +137,11 @@ The EAL options are as follows:
* ``--socket-mem``:
Memory to allocate from hugepages on specific sockets.
+* ``-d``:
+ Add a driver or driver directory to be loaded.
+ The application should use this option to load the pmd drivers
+ that are built as shared libraries.
+
* ``-m MB``:
Memory to allocate from hugepages, regardless of processor socket. It is
recommended that ``--socket-mem`` be used instead of this option.
@@ -167,13 +173,13 @@ The EAL options are as follows:
* ``--vfio-intr``:
Specify interrupt type to be used by VFIO (has no effect if VFIO is not used).
-The ``-c`` and option is mandatory; the others are optional.
+The ``-c`` or ``-l`` and option is mandatory; the others are optional.
Copy the DPDK application binary to your target, then run the application as follows
(assuming the platform has four memory channels per processor socket,
and that cores 0-3 are present and are to be used for running the application)::
- ./helloworld -c f -n 4
+ ./helloworld -l 0-3 -n 4
.. note::
@@ -185,10 +191,10 @@ and that cores 0-3 are present and are to be used for running the application)::
Logical Core Use by Applications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The coremask parameter is always mandatory for DPDK applications.
-Each bit of the mask corresponds to the equivalent logical core number as reported by Linux.
+The coremask (-c 0x0f) or corelist (-l 0-3) parameter is always mandatory for DPDK applications.
+Each bit of the mask corresponds to the equivalent logical core number as reported by Linux. The preferred corelist option is a cleaner method to define cores to be used.
Since these logical core numbers, and their mapping to specific cores on specific NUMA sockets, can vary from platform to platform,
-it is recommended that the core layout for each platform be considered when choosing the coremask to use in each case.
+it is recommended that the core layout for each platform be considered when choosing the coremask/corelist to use in each case.
On initialization of the EAL layer by an DPDK application, the logical cores to be used and their socket location are displayed.
This information can also be determined for all cores on the system by examining the ``/proc/cpuinfo`` file, for example, by running cat ``/proc/cpuinfo``.
@@ -205,7 +211,7 @@ This can be useful when using other processors to understand the mapping of the
.. warning::
- The logical core layout can change between different board layouts and should be checked before selecting an application coremask.
+ The logical core layout can change between different board layouts and should be checked before selecting an application coremask/corelist.
Hugepage Memory Use by Applications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/linux_gsg/nic_perf_intel_platform.rst b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
index d4a83624..709113dc 100644
--- a/doc/guides/linux_gsg/nic_perf_intel_platform.rst
+++ b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
@@ -158,7 +158,7 @@ Configurations before running DPDK
cd dpdk_folder
- tools/cpu_layout.py
+ usertools/cpu_layout.py
Or run ``lscpu`` to check the the cores on each socket.
@@ -192,10 +192,10 @@ Configurations before running DPDK
# Bind ports 82:00.0 and 85:00.0 to dpdk driver
- ./dpdk_folder/tools/dpdk-devbind.py -b igb_uio 82:00.0 85:00.0
+ ./dpdk_folder/usertools/dpdk-devbind.py -b igb_uio 82:00.0 85:00.0
# Check the port driver status
- ./dpdk_folder/tools/dpdk-devbind.py --status
+ ./dpdk_folder/usertools/dpdk-devbind.py --status
See ``dpdk-devbind.py --help`` for more details.
@@ -246,7 +246,7 @@ See :numref:`figure_intel_perf_test_setup` for the performance test setup.
7. The command line of running l3fwd would be something like the followings::
- ./l3fwd -c 0x3c0000 -n 4 -w 82:00.0 -w 85:00.0 \
+ ./l3fwd -l 18-21 -n 4 -w 82:00.0 -w 85:00.0 \
-- -p 0x3 --config '(0,0,18),(0,1,19),(1,0,20),(1,1,21)'
This means that the application uses core 18 for port 0, queue pair 0 forwarding, core 19 for port 0, queue pair 1 forwarding,
diff --git a/doc/guides/linux_gsg/quick_start.rst b/doc/guides/linux_gsg/quick_start.rst
index 6e858c2a..39675db5 100644
--- a/doc/guides/linux_gsg/quick_start.rst
+++ b/doc/guides/linux_gsg/quick_start.rst
@@ -33,7 +33,7 @@
Quick Start Setup Script
========================
-The dpdk-setup.sh script, found in the tools subdirectory, allows the user to perform the following tasks:
+The dpdk-setup.sh script, found in the usertools subdirectory, allows the user to perform the following tasks:
* Build the DPDK libraries
@@ -108,7 +108,7 @@ Some options in the script prompt the user for further data before proceeding.
.. code-block:: console
- source tools/dpdk-setup.sh
+ source usertools/dpdk-setup.sh
------------------------------------------------------------------------
@@ -282,7 +282,7 @@ the logical core layout of the platform should be determined when selecting a co
INSTALL-APP helloworld
INSTALL-MAP helloworld.map
- sudo ./build/app/helloworld -c 0xf -n 3
+ sudo ./build/app/helloworld -l 0-3 -n 3
[sudo] password for rte:
EAL: coremask set to f
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index 3d743421..3a28c9e5 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -79,14 +79,12 @@ Compilation of the DPDK
* glibc.ppc64, libgcc.ppc64, libstdc++.ppc64 and glibc-devel.ppc64 for IBM ppc_64;
-.. note::
-
- x86_x32 ABI is currently supported with distribution packages only on Ubuntu
- higher than 13.10 or recent Debian distribution. The only supported compiler is gcc 4.9+.
+ .. note::
-.. note::
+ x86_x32 ABI is currently supported with distribution packages only on Ubuntu
+ higher than 13.10 or recent Debian distribution. The only supported compiler is gcc 4.9+.
- Python, version 2.6 or 2.7, to use various helper scripts included in the DPDK package.
+* Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
**Optional Tools:**
@@ -202,6 +200,12 @@ On a NUMA machine, pages should be allocated explicitly on separate nodes::
For 1G pages, it is not possible to reserve the hugepage memory after the system has booted.
+ On IBM POWER system, the nr_overcommit_hugepages should be set to the same value as nr_hugepages.
+ For example, if the required page number is 128, the following commands are used::
+
+ echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_hugepages
+ echo 128 > /sys/kernel/mm/hugepages/hugepages-16384kB/nr_overcommit_hugepages
+
Using Hugepages with the DPDK
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/guides/nics/ark.rst b/doc/guides/nics/ark.rst
new file mode 100644
index 00000000..a7c2590b
--- /dev/null
+++ b/doc/guides/nics/ark.rst
@@ -0,0 +1,261 @@
+.. BSD LICENSE
+
+ Copyright (c) 2015-2017 Atomic Rules LLC
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Atomic Rules LLC nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ARK Poll Mode Driver
+====================
+
+The ARK PMD is a DPDK poll-mode driver for the Atomic Rules Arkville
+(ARK) family of devices.
+
+More information can be found at the `Atomic Rules website
+<http://atomicrules.com>`_.
+
+Overview
+--------
+
+The Atomic Rules Arkville product is DPDK and AXI compliant product
+that marshals packets across a PCIe conduit between host DPDK mbufs and
+FPGA AXI streams.
+
+The ARK PMD, and the spirit of the overall Arkville product,
+has been to take the DPDK API/ABI as a fixed specification;
+then implement much of the business logic in FPGA RTL circuits.
+The approach of *working backwards* from the DPDK API/ABI and having
+the GPP host software *dictate*, while the FPGA hardware *copes*,
+results in significant performance gains over a naive implementation.
+
+While this document describes the ARK PMD software, it is helpful to
+understand what the FPGA hardware is and is not. The Arkville RTL
+component provides a single PCIe Physical Function (PF) supporting
+some number of RX/Ingress and TX/Egress Queues. The ARK PMD controls
+the Arkville core through a dedicated opaque Core BAR (CBAR).
+To allow users full freedom for their own FPGA application IP,
+an independent FPGA Application BAR (ABAR) is provided.
+
+One popular way to imagine Arkville's FPGA hardware aspect is as the
+FPGA PCIe-facing side of a so-called Smart NIC. The Arkville core does
+not contain any MACs, and is link-speed independent, as well as
+agnostic to the number of physical ports the application chooses to
+use. The ARK driver exposes the familiar PMD interface to allow packet
+movement to and from mbufs across multiple queues.
+
+However FPGA RTL applications could contain a universe of added
+functionality that an Arkville RTL core does not provide or can
+not anticipate. To allow for this expectation of user-defined
+innovation, the ARK PMD provides a dynamic mechanism of adding
+capabilities without having to modify the ARK PMD.
+
+The ARK PMD is intended to support all instances of the Arkville
+RTL Core, regardless of configuration, FPGA vendor, or target
+board. While specific capabilities such as number of physical
+hardware queue-pairs are negotiated; the driver is designed to
+remain constant over a broad and extendable feature set.
+
+Intentionally, Arkville by itself DOES NOT provide common NIC
+capabilities such as offload or receive-side scaling (RSS).
+These capabilities would be viewed as a gate-level "tax" on
+Green-box FPGA applications that do not require such function.
+Instead, they can be added as needed with essentially no
+overhead to the FPGA Application.
+
+The ARK PMD also supports optional user extensions, through dynamic linking.
+The ARK PMD user extensions are a feature of Arkville’s DPDK
+net/ark poll mode driver, allowing users to add their
+own code to extend the net/ark functionality without
+having to make source code changes to the driver. One motivation for
+this capability is that while DPDK provides a rich set of functions
+to interact with NIC-like capabilities (e.g. MAC addresses and statistics),
+the Arkville RTL IP does not include a MAC. Users can supply their
+own MAC or custom FPGA applications, which may require control from
+the PMD. The user extension is the means providing the control
+between the user's FPGA application and the existing DPDK features via
+the PMD.
+
+Device Parameters
+-------------------
+
+The ARK PMD supports device parameters that are used for packet
+routing and for internal packet generation and packet checking. This
+section describes the supported parameters. These features are
+primarily used for diagnostics, testing, and performance verification
+under the guidance of an Arkville specialist. The nominal use of
+Arkville does not require any configuration using these parameters.
+
+"Pkt_dir"
+
+The Packet Director controls connectivity between Arkville's internal
+hardware components. The features of the Pkt_dir are only used for
+diagnostics and testing; it is not intended for nominal use. The full
+set of features are not published at this level.
+
+Format:
+Pkt_dir=0x00110F10
+
+"Pkt_gen"
+
+The packet generator parameter takes a file as its argument. The file
+contains configuration parameters used internally for regression
+testing and are not intended to be published at this level. The
+packet generator is an internal Arkville hardware component.
+
+Format:
+Pkt_gen=./config/pg.conf
+
+"Pkt_chkr"
+
+The packet checker parameter takes a file as its argument. The file
+contains configuration parameters used internally for regression
+testing and are not intended to be published at this level. The
+packet checker is an internal Arkville hardware component.
+
+Format:
+Pkt_chkr=./config/pc.conf
+
+
+Data Path Interface
+-------------------
+
+Ingress RX and Egress TX operation is by the nominal DPDK API .
+The driver supports single-port, multi-queue for both RX and TX.
+
+Refer to ``ark_ethdev.h`` for the list of supported methods to
+act upon RX and TX Queues.
+
+Configuration Information
+-------------------------
+
+**DPDK Configuration Parameters**
+
+ The following configuration options are available for the ARK PMD:
+
+ * **CONFIG_RTE_LIBRTE_ARK_PMD** (default y): Enables or disables inclusion
+ of the ARK PMD driver in the DPDK compilation.
+
+ * **CONFIG_RTE_LIBRTE_ARK_PAD_TX** (default y): When enabled TX
+ packets are padded to 60 bytes to support downstream MACS.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_RX** (default n): Enables or disables debug
+ logging and internal checking of RX ingress logic within the ARK PMD driver.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_TX** (default n): Enables or disables debug
+ logging and internal checking of TX egress logic within the ARK PMD driver.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS** (default n): Enables or disables debug
+ logging of detailed packet and performance statistics gathered in
+ the PMD and FPGA.
+
+ * **CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE** (default n): Enables or disables debug
+ logging of detailed PMD events and status.
+
+
+Building DPDK
+-------------
+
+See the :ref:`DPDK Getting Started Guide for Linux <linux_gsg>` for
+instructions on how to build DPDK.
+
+By default the ARK PMD library will be built into the DPDK library.
+
+For configuring and using UIO and VFIO frameworks, please also refer :ref:`the
+documentation that comes with DPDK suite <linux_gsg>`.
+
+Supported ARK RTL PCIe Instances
+--------------------------------
+
+ARK PMD supports the following Arkville RTL PCIe instances including:
+
+* ``1d6c:100d`` - AR-ARKA-FX0 [Arkville 32B DPDK Data Mover]
+* ``1d6c:100e`` - AR-ARKA-FX1 [Arkville 64B DPDK Data Mover]
+
+Supported Operating Systems
+---------------------------
+
+Any Linux distribution fulfilling the conditions described in ``System Requirements``
+section of :ref:`the DPDK documentation <linux_gsg>` or refer to *DPDK
+Release Notes*. ARM and PowerPC architectures are not supported at this time.
+
+
+Supported Features
+------------------
+
+* Dynamic ARK PMD extensions
+* Multiple receive and transmit queues
+* Jumbo frames up to 9K
+* Hardware Statistics
+
+Unsupported Features
+--------------------
+
+Features that may be part of, or become part of, the Arkville RTL IP that are
+not currently supported or exposed by the ARK PMD include:
+
+* PCIe SR-IOV Virtual Functions (VFs)
+* Arkville's Packet Generator Control and Status
+* Arkville's Packet Director Control and Status
+* Arkville's Packet Checker Control and Status
+* Arkville's Timebase Management
+
+Pre-Requisites
+--------------
+
+#. Prepare the system as recommended by DPDK suite. This includes environment
+ variables, hugepages configuration, tool-chains and configuration
+
+#. Insert igb_uio kernel module using the command 'modprobe igb_uio'
+
+#. Bind the intended ARK device to igb_uio module
+
+At this point the system should be ready to run DPDK applications. Once the
+application runs to completion, the ARK PMD can be detached from igb_uio if necessary.
+
+Usage Example
+-------------
+
+Follow instructions available in the document
+:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to launch
+**testpmd** with Atomic Rules ARK devices managed by librte_pmd_ark.
+
+Example output:
+
+.. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:01:00.0 on NUMA socket -1
+ EAL: probe driver: 1d6c:100e rte_ark_pmd
+ EAL: PCI memory mapped at 0x7f9b6c400000
+ PMD: eth_ark_dev_init(): Initializing 0:2:0.1
+ ARKP PMD CommitID: 378f3a67
+ Configuring Port 0 (socket 0)
+ Port 0: DC:3C:F6:00:00:01
+ Checking link statuses...
+ Port 0 Link Up - speed 100000 Mbps - full-duplex
+ Done
+ testpmd>
diff --git a/doc/guides/nics/avp.rst b/doc/guides/nics/avp.rst
new file mode 100644
index 00000000..1fcba66c
--- /dev/null
+++ b/doc/guides/nics/avp.rst
@@ -0,0 +1,111 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Wind River Systems, Inc. rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+AVP Poll Mode Driver
+=================================================================
+
+The Accelerated Virtual Port (AVP) device is a shared memory based device
+only available on `virtualization platforms <http://www.windriver.com/products/titanium-cloud/>`_
+from Wind River Systems. The Wind River Systems virtualization platform
+currently uses QEMU/KVM as its hypervisor and as such provides support for all
+of the QEMU supported virtual and/or emulated devices (e.g., virtio, e1000,
+etc.). The platform offers the virtio device type as the default device when
+launching a virtual machine or creating a virtual machine port. The AVP device
+is a specialized device available to customers that require increased
+throughput and decreased latency to meet the demands of their performance
+focused applications.
+
+The AVP driver binds to any AVP PCI devices that have been exported by the Wind
+River Systems QEMU/KVM hypervisor. As a user of the DPDK driver API it
+supports a subset of the full Ethernet device API to enable the application to
+use the standard device configuration functions and packet receive/transmit
+functions.
+
+These devices enable optimized packet throughput by bypassing QEMU and
+delivering packets directly to the virtual switch via a shared memory
+mechanism. This provides DPDK applications running in virtual machines with
+significantly improved throughput and latency over other device types.
+
+The AVP device implementation is integrated with the QEMU/KVM live-migration
+mechanism to allow applications to seamlessly migrate from one hypervisor node
+to another with minimal packet loss.
+
+
+Features and Limitations of the AVP PMD
+---------------------------------------
+
+The AVP PMD driver provides the following functionality.
+
+* Receive and transmit of both simple and chained mbuf packets,
+
+* Chained mbufs may include up to 5 chained segments,
+
+* Up to 8 receive and transmit queues per device,
+
+* Only a single MAC address is supported,
+
+* The MAC address cannot be modified,
+
+* The maximum receive packet length is 9238 bytes,
+
+* VLAN header stripping and inserting,
+
+* Promiscuous mode
+
+* VM live-migration
+
+* PCI hotplug insertion and removal
+
+
+Prerequisites
+-------------
+
+The following prerequisites apply:
+
+* A virtual machine running in a Wind River Systems virtualization
+ environment and configured with at least one neutron port defined with a
+ vif-model set to "avp".
+
+
+Launching a VM with an AVP type network attachment
+--------------------------------------------------
+
+The following example will launch a VM with three network attachments. The
+first attachment will have a default vif-model of "virtio". The next two
+network attachments will have a vif-model of "avp" and may be used with a DPDK
+application which is built to include the AVP PMD driver.
+
+.. code-block:: console
+
+ nova boot --flavor small --image my-image \
+ --nic net-id=${NETWORK1_UUID} \
+ --nic net-id=${NETWORK2_UUID},vif-model=avp \
+ --nic net-id=${NETWORK3_UUID},vif-model=avp \
+ --security-group default my-instance1
diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst
index 6d1768a5..fbfc048e 100644
--- a/doc/guides/nics/bnx2x.rst
+++ b/doc/guides/nics/bnx2x.rst
@@ -96,9 +96,11 @@ Config File Options
The following options can be modified in the ``.config`` file. Please note that
enabling debugging options may affect system performance.
-- ``CONFIG_RTE_LIBRTE_BNX2X_PMD`` (default **y**)
+- ``CONFIG_RTE_LIBRTE_BNX2X_PMD`` (default **n**)
- Toggle compilation of bnx2x driver.
+ Toggle compilation of bnx2x driver. To use bnx2x PMD set this config parameter
+ to 'y'. Also, in order for firmware binary to load user will need zlib devel
+ package installed.
- ``CONFIG_RTE_LIBRTE_BNX2X_DEBUG`` (default **n**)
@@ -123,143 +125,14 @@ enabling debugging options may affect system performance.
.. _bnx2x_driver-compilation:
-Driver Compilation
-~~~~~~~~~~~~~~~~~~
-
-BNX2X PMD for Linux x86_64 gcc target, run the following "make"
-command::
-
- cd <DPDK-source-directory>
- make config T=x86_64-native-linuxapp-gcc install
-
-To compile BNX2X PMD for Linux x86_64 clang target, run the following "make"
-command::
-
- cd <DPDK-source-directory>
- make config T=x86_64-native-linuxapp-clang install
-
-To compile BNX2X PMD for Linux i686 gcc target, run the following "make"
-command::
-
- cd <DPDK-source-directory>
- make config T=i686-native-linuxapp-gcc install
-
-To compile BNX2X PMD for Linux i686 gcc target, run the following "make"
-command:
-
-.. code-block:: console
-
- cd <DPDK-source-directory>
- make config T=i686-native-linuxapp-gcc install
-
-To compile BNX2X PMD for FreeBSD x86_64 clang target, run the following "gmake"
-command::
-
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-clang install
-
-To compile BNX2X PMD for FreeBSD x86_64 gcc target, run the following "gmake"
-command::
-
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-gcc install -Wl,-rpath=/usr/local/lib/gcc49 CC=gcc49
-
-To compile BNX2X PMD for FreeBSD x86_64 gcc target, run the following "gmake"
-command:
-
-.. code-block:: console
-
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-gcc install -Wl,-rpath=/usr/local/lib/gcc49 CC=gcc49
-
-Linux
------
-
-.. _bnx2x_Linux-installation:
-
-Linux Installation
-~~~~~~~~~~~~~~~~~~
-
-Sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section demonstrates how to launch ``testpmd`` with QLogic 578xx
-devices managed by ``librte_pmd_bnx2x`` in Linux operating system.
-
-#. Request huge pages:
-
- .. code-block:: console
-
- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages/nr_hugepages
-
-#. Load ``igb_uio`` or ``vfio-pci`` driver:
-
- .. code-block:: console
-
- insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-
- or
-
- .. code-block:: console
-
- modprobe vfio-pci
-
-#. Bind the QLogic adapters to ``igb_uio`` or ``vfio-pci`` loaded in the
- previous step::
-
- ./tools/dpdk-devbind.py --bind igb_uio 0000:84:00.0 0000:84:00.1
-
- or
-
- Setup VFIO permissions for regular users and then bind to ``vfio-pci``:
-
- .. code-block:: console
-
- sudo chmod a+x /dev/vfio
-
- sudo chmod 0666 /dev/vfio/*
-
- ./tools/dpdk-devbind.py --bind vfio-pci 0000:84:00.0 0000:84:00.1
-
-#. Start ``testpmd`` with basic parameters:
-
- .. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -- -i
-
- Example output:
-
- .. code-block:: console
+Driver compilation and testing
+------------------------------
- [...]
- EAL: PCI device 0000:84:00.0 on NUMA socket 1
- EAL: probe driver: 14e4:168e rte_bnx2x_pmd
- EAL: PCI memory mapped at 0x7f14f6fe5000
- EAL: PCI memory mapped at 0x7f14f67e5000
- EAL: PCI memory mapped at 0x7f15fbd9b000
- EAL: PCI device 0000:84:00.1 on NUMA socket 1
- EAL: probe driver: 14e4:168e rte_bnx2x_pmd
- EAL: PCI memory mapped at 0x7f14f5fe5000
- EAL: PCI memory mapped at 0x7f14f57e5000
- EAL: PCI memory mapped at 0x7f15fbd4f000
- Interactive-mode selected
- Configuring Port 0 (socket 0)
- PMD: bnx2x_dev_tx_queue_setup(): fp[00] req_bd=512, thresh=512,
- usable_bd=1020, total_bd=1024,
- tx_pages=4
- PMD: bnx2x_dev_rx_queue_setup(): fp[00] req_bd=128, thresh=0,
- usable_bd=510, total_bd=512,
- rx_pages=1, cq_pages=8
- PMD: bnx2x_print_adapter_info():
- [...]
- Checking link statuses...
- Port 0 Link Up - speed 10000 Mbps - full-duplex
- Port 1 Link Up - speed 10000 Mbps - full-duplex
- Done
- testpmd>
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
SR-IOV: Prerequisites and sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
This section provides instructions to configure SR-IOV with Linux OS.
@@ -311,7 +184,6 @@ This section provides instructions to configure SR-IOV with Linux OS.
echo 2 > /sys/devices/pci0000:00/0000:00:03.0/0000:81:00.0/sriov_numvfs
-
#. Assign VF MAC address:
Assign MAC address to the VF using iproute2 utility. The syntax is:
@@ -323,9 +195,45 @@ This section provides instructions to configure SR-IOV with Linux OS.
ip link set ens5f0 vf 0 mac 52:54:00:2f:9d:e8
-
#. PCI Passthrough:
The VF devices may be passed through to the guest VM using virt-manager or
virsh etc. bnx2x PMD should be used to bind the VF devices in the guest VM
using the instructions outlined in the Application notes below.
+
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:84:00.0 on NUMA socket 1
+ EAL: probe driver: 14e4:168e rte_bnx2x_pmd
+ EAL: PCI memory mapped at 0x7f14f6fe5000
+ EAL: PCI memory mapped at 0x7f14f67e5000
+ EAL: PCI memory mapped at 0x7f15fbd9b000
+ EAL: PCI device 0000:84:00.1 on NUMA socket 1
+ EAL: probe driver: 14e4:168e rte_bnx2x_pmd
+ EAL: PCI memory mapped at 0x7f14f5fe5000
+ EAL: PCI memory mapped at 0x7f14f57e5000
+ EAL: PCI memory mapped at 0x7f15fbd4f000
+ Interactive-mode selected
+ Configuring Port 0 (socket 0)
+ PMD: bnx2x_dev_tx_queue_setup(): fp[00] req_bd=512, thresh=512,
+ usable_bd=1020, total_bd=1024,
+ tx_pages=4
+ PMD: bnx2x_dev_rx_queue_setup(): fp[00] req_bd=128, thresh=0,
+ usable_bd=510, total_bd=512,
+ rx_pages=1, cq_pages=8
+ PMD: bnx2x_print_adapter_info():
+ [...]
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index ad33cd5d..9826b350 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -32,10 +32,10 @@ BNXT Poll Mode Driver
The bnxt poll mode library (**librte_pmd_bnxt**) implements support for:
- * **Broadcom NetXtreme-C®/NetXtreme-E® BCM5730X and BCM5740X family of
+ * **Broadcom NetXtreme-C®/NetXtreme-E® BCM5730X and BCM574XX family of
Ethernet Network Controllers**
- These adapters support Standards compliant 10/25/50Gbps 30MPPS
+ These adapters support Standards compliant 10/25/50/100Gbps 30MPPS
full-duplex throughput.
Information about the NetXtreme family of adapters can be found in the
diff --git a/doc/guides/nics/build_and_test.rst b/doc/guides/nics/build_and_test.rst
new file mode 100644
index 00000000..2d70af88
--- /dev/null
+++ b/doc/guides/nics/build_and_test.rst
@@ -0,0 +1,179 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Cavium, Inc.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _pmd_build_and_test:
+
+Compiling and testing a PMD for a NIC
+=====================================
+
+This section demonstrates how to compile and run a Poll Mode Driver (PMD) for
+the available Network Interface Cards in DPDK using TestPMD.
+
+TestPMD is one of the reference applications distributed with the DPDK. Its main
+purpose is to forward packets between Ethernet ports on a network interface and
+as such is the best way to test a PMD.
+
+Refer to the :ref:`testpmd application user guide <testpmd_ug>` for detailed
+information on how to build and run testpmd.
+
+Driver Compilation
+------------------
+
+To compile a PMD for a platform, run make with appropriate target as shown below.
+Use "make" command in Linux and "gmake" in FreeBSD. This will also build testpmd.
+
+To check available targets:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make showconfigs
+
+Example output:
+
+.. code-block:: console
+
+ arm-armv7a-linuxapp-gcc
+ arm64-armv8a-linuxapp-gcc
+ arm64-dpaa2-linuxapp-gcc
+ arm64-thunderx-linuxapp-gcc
+ arm64-xgene1-linuxapp-gcc
+ i686-native-linuxapp-gcc
+ i686-native-linuxapp-icc
+ ppc_64-power8-linuxapp-gcc
+ x86_64-native-bsdapp-clang
+ x86_64-native-bsdapp-gcc
+ x86_64-native-linuxapp-clang
+ x86_64-native-linuxapp-gcc
+ x86_64-native-linuxapp-icc
+ x86_x32-native-linuxapp-gcc
+
+To compile a PMD for Linux x86_64 gcc target, run the following "make" command:
+
+.. code-block:: console
+
+ make install T=x86_64-native-linuxapp-gcc
+
+Use ARM (ThunderX, DPAA, X-Gene) or PowerPC target for respective platform.
+
+For more information, refer to the :ref:`Getting Started Guide for Linux <linux_gsg>`
+or :ref:`Getting Started Guide for FreeBSD <freebsd_gsg>` depending on your platform.
+
+Running testpmd in Linux
+------------------------
+
+This section demonstrates how to setup and run ``testpmd`` in Linux.
+
+#. Mount huge pages:
+
+ .. code-block:: console
+
+ mkdir /mnt/huge
+ mount -t hugetlbfs nodev /mnt/huge
+
+#. Request huge pages:
+
+ Hugepage memory should be reserved as per application requirement. Check
+ hugepage size configured in the system and calculate the number of pages
+ required.
+
+ To reserve 1024 pages of 2MB:
+
+ .. code-block:: console
+
+ echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
+
+ .. note::
+
+ Check ``/proc/meminfo`` to find system hugepage size:
+
+ .. code-block:: console
+
+ grep "Hugepagesize:" /proc/meminfo
+
+ Example output:
+
+ .. code-block:: console
+
+ Hugepagesize: 2048 kB
+
+#. Load ``igb_uio`` or ``vfio-pci`` driver:
+
+ .. code-block:: console
+
+ modprobe uio
+ insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+
+ or
+
+ .. code-block:: console
+
+ modprobe vfio-pci
+
+#. Setup VFIO permissions for regular users before binding to ``vfio-pci``:
+
+ .. code-block:: console
+
+ sudo chmod a+x /dev/vfio
+
+ sudo chmod 0666 /dev/vfio/*
+
+#. Bind the adapters to ``igb_uio`` or ``vfio-pci`` loaded in the previous step:
+
+ .. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind igb_uio DEVICE1 DEVICE2 ...
+
+ Or setup VFIO permissions for regular users and then bind to ``vfio-pci``:
+
+ .. code-block:: console
+
+ ./usertools/dpdk-devbind.py --bind vfio-pci DEVICE1 DEVICE2 ...
+
+ .. note::
+
+ DEVICE1, DEVICE2 are specified via PCI "domain:bus:slot.func" syntax or
+ "bus:slot.func" syntax.
+
+#. Start ``testpmd`` with basic parameters:
+
+ .. code-block:: console
+
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -- -i
+
+ Successful execution will show initialization messages from EAL, PMD and
+ testpmd application. A prompt will be displayed at the end for user commands
+ as interactive mode (``-i``) is on.
+
+ .. code-block:: console
+
+ testpmd>
+
+ Refer to the :ref:`testpmd runtime functions <testpmd_runtime>` for a list
+ of available commands.
diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst
index d8236b08..a205b43f 100644
--- a/doc/guides/nics/cxgbe.rst
+++ b/doc/guides/nics/cxgbe.rst
@@ -125,24 +125,11 @@ enabling debugging options may affect system performance.
.. _driver-compilation:
-Driver Compilation
-~~~~~~~~~~~~~~~~~~
-
-To compile CXGBE PMD for Linux x86_64 gcc target, run the following "make"
-command:
-
-.. code-block:: console
-
- cd <DPDK-source-directory>
- make config T=x86_64-native-linuxapp-gcc install
-
-To compile CXGBE PMD for FreeBSD x86_64 clang target, run the following "gmake"
-command:
-
-.. code-block:: console
+Driver compilation and testing
+------------------------------
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-clang install
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
Linux
-----
@@ -218,13 +205,6 @@ Running testpmd
This section demonstrates how to launch **testpmd** with Chelsio T5
devices managed by librte_pmd_cxgbe in Linux operating system.
-#. Change to DPDK source directory where the target has been compiled in
- section :ref:`driver-compilation`:
-
- .. code-block:: console
-
- cd <DPDK-source-directory>
-
#. Load the kernel module:
.. code-block:: console
@@ -255,60 +235,16 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
modprobe -ar cxgb4 csiostor
-#. Request huge pages:
-
- .. code-block:: console
-
- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages/nr_hugepages
-
-#. Mount huge pages:
-
- .. code-block:: console
-
- mkdir /mnt/huge
- mount -t hugetlbfs nodev /mnt/huge
-
-#. Load igb_uio or vfio-pci driver:
-
- .. code-block:: console
-
- insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-
- or
-
- .. code-block:: console
-
- modprobe vfio-pci
-
-#. Bind the Chelsio T5 adapters to igb_uio or vfio-pci loaded in the previous
- step:
+#. Running testpmd
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind igb_uio 0000:02:00.4
-
- or
-
- Setup VFIO permissions for regular users and then bind to vfio-pci:
-
- .. code-block:: console
-
- sudo chmod a+x /dev/vfio
-
- sudo chmod 0666 /dev/vfio/*
-
- ./tools/dpdk-devbind.py --bind vfio-pci 0000:02:00.4
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
.. note::
Currently, CXGBE PMD only supports the binding of PF4 for Chelsio T5 NICs.
-#. Start testpmd with basic parameters:
-
- .. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0000:02:00.4 -- -i
-
Example output:
.. code-block:: console
@@ -334,10 +270,10 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
Done
testpmd>
-.. note::
+ .. note::
- Flow control pause TX/RX is disabled by default and can be enabled via
- testpmd. Refer section :ref:`flow-control` for more details.
+ Flow control pause TX/RX is disabled by default and can be enabled via
+ testpmd. Refer section :ref:`flow-control` for more details.
FreeBSD
-------
@@ -509,7 +445,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system.
.. code-block:: console
- ./x86_64-native-bsdapp-clang/app/testpmd -c 0xf -n 4 -w 0000:02:00.4 -- -i
+ ./x86_64-native-bsdapp-clang/app/testpmd -l 0-3 -n 4 -w 0000:02:00.4 -- -i
Example output:
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
new file mode 100644
index 00000000..1ca27d45
--- /dev/null
+++ b/doc/guides/nics/dpaa2.rst
@@ -0,0 +1,594 @@
+.. BSD LICENSE
+ Copyright (C) NXP. 2016.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of NXP nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+DPAA2 Poll Mode Driver
+======================
+
+The DPAA2 NIC PMD (**librte_pmd_dpaa2**) provides poll mode driver
+support for the inbuilt NIC found in the **NXP DPAA2** SoC family.
+
+More information can be found at `NXP Official Website
+<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
+
+NXP DPAA2 (Data Path Acceleration Architecture Gen2)
+----------------------------------------------------
+
+This section provides an overview of the NXP DPAA2 architecture
+and how it is integrated into the DPDK.
+
+Contents summary
+
+- DPAA2 overview
+- Overview of DPAA2 objects
+- DPAA2 driver architecture overview
+
+.. _dpaa2_overview:
+
+DPAA2 Overview
+~~~~~~~~~~~~~~
+
+Reference: `FSL MC BUS in Linux Kernel <https://www.kernel.org/doc/readme/drivers-staging-fsl-mc-README.txt>`_.
+
+DPAA2 is a hardware architecture designed for high-speed network
+packet processing. DPAA2 consists of sophisticated mechanisms for
+processing Ethernet packets, queue management, buffer management,
+autonomous L2 switching, virtual Ethernet bridging, and accelerator
+(e.g. crypto) sharing.
+
+A DPAA2 hardware component called the Management Complex (or MC) manages the
+DPAA2 hardware resources. The MC provides an object-based abstraction for
+software drivers to use the DPAA2 hardware.
+
+The MC uses DPAA2 hardware resources such as queues, buffer pools, and
+network ports to create functional objects/devices such as network
+interfaces, an L2 switch, or accelerator instances.
+
+The MC provides memory-mapped I/O command interfaces (MC portals)
+which DPAA2 software drivers use to operate on DPAA2 objects:
+
+The diagram below shows an overview of the DPAA2 resource management
+architecture:
+
+.. code-block:: console
+
+ +--------------------------------------+
+ | OS |
+ | DPAA2 drivers |
+ | | |
+ +-----------------------------|--------+
+ |
+ | (create,discover,connect
+ | config,use,destroy)
+ |
+ DPAA2 |
+ +------------------------| mc portal |-+
+ | | |
+ | +- - - - - - - - - - - - -V- - -+ |
+ | | | |
+ | | Management Complex (MC) | |
+ | | | |
+ | +- - - - - - - - - - - - - - - -+ |
+ | |
+ | Hardware Hardware |
+ | Resources Objects |
+ | --------- ------- |
+ | -queues -DPRC |
+ | -buffer pools -DPMCP |
+ | -Eth MACs/ports -DPIO |
+ | -network interface -DPNI |
+ | profiles -DPMAC |
+ | -queue portals -DPBP |
+ | -MC portals ... |
+ | ... |
+ | |
+ +--------------------------------------+
+
+The MC mediates operations such as create, discover,
+connect, configuration, and destroy. Fast-path operations
+on data, such as packet transmit/receive, are not mediated by
+the MC and are done directly using memory mapped regions in
+DPIO objects.
+
+Overview of DPAA2 Objects
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The section provides a brief overview of some key DPAA2 objects.
+A simple scenario is described illustrating the objects involved
+in creating a network interfaces.
+
+DPRC (Datapath Resource Container)
+
+ A DPRC is a container object that holds all the other
+ types of DPAA2 objects. In the example diagram below there
+ are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
+ in the container.
+
+.. code-block:: console
+
+ +---------------------------------------------------------+
+ | DPRC |
+ | |
+ | +-------+ +-------+ +-------+ +-------+ +-------+ |
+ | | DPMCP | | DPIO | | DPBP | | DPNI | | DPMAC | |
+ | +-------+ +-------+ +-------+ +---+---+ +---+---+ |
+ | | DPMCP | | DPIO | |
+ | +-------+ +-------+ |
+ | | DPMCP | |
+ | +-------+ |
+ | |
+ +---------------------------------------------------------+
+
+From the point of view of an OS, a DPRC behaves similar to a plug and
+play bus, like PCI. DPRC commands can be used to enumerate the contents
+of the DPRC, discover the hardware objects present (including mappable
+regions and interrupts).
+
+.. code-block:: console
+
+ DPRC.1 (bus)
+ |
+ +--+--------+-------+-------+-------+
+ | | | | |
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
+ DPMCP.2 DPIO.2
+ DPMCP.3
+
+Hardware objects can be created and destroyed dynamically, providing
+the ability to hot plug/unplug objects in and out of the DPRC.
+
+A DPRC has a mappable MMIO region (an MC portal) that can be used
+to send MC commands. It has an interrupt for status events (like
+hotplug).
+
+All objects in a container share the same hardware "isolation context".
+This means that with respect to an IOMMU the isolation granularity
+is at the DPRC (container) level, not at the individual object
+level.
+
+DPRCs can be defined statically and populated with objects
+via a config file passed to the MC when firmware starts
+it. There is also a Linux user space tool called "restool"
+that can be used to create/destroy containers and objects
+dynamically.
+
+DPAA2 Objects for an Ethernet Network Interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
+queuing mechanisms, configuration mechanisms, buffer management,
+physical ports, and interrupts. DPAA2 uses a more granular approach
+utilizing multiple hardware objects. Each object provides specialized
+functions. Groups of these objects are used by software to provide
+Ethernet network interface functionality. This approach provides
+efficient use of finite hardware resources, flexibility, and
+performance advantages.
+
+The diagram below shows the objects needed for a simple
+network interface configuration on a system with 2 CPUs.
+
+.. code-block:: console
+
+ +---+---+ +---+---+
+ CPU0 CPU1
+ +---+---+ +---+---+
+ | |
+ +---+---+ +---+---+
+ DPIO DPIO
+ +---+---+ +---+---+
+ \ /
+ \ /
+ \ /
+ +---+---+
+ DPNI --- DPBP,DPMCP
+ +---+---+
+ |
+ |
+ +---+---+
+ DPMAC
+ +---+---+
+ |
+ port/PHY
+
+Below the objects are described. For each object a brief description
+is provided along with a summary of the kinds of operations the object
+supports and a summary of key resources of the object (MMIO regions
+and IRQs).
+
+DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
+hardware device that connects to an Ethernet PHY and allows
+physical transmission and reception of Ethernet frames.
+
+- MMIO regions: none
+- IRQs: DPNI link change
+- commands: set link up/down, link config, get stats, IRQ config, enable, reset
+
+DPNI (Datapath Network Interface): contains TX/RX queues,
+network interface configuration, and RX buffer pool configuration
+mechanisms. The TX/RX queues are in memory and are identified by
+queue number.
+
+- MMIO regions: none
+- IRQs: link state
+- commands: port config, offload config, queue config, parse/classify config, IRQ config, enable, reset
+
+DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
+packets and do hardware buffer pool management operations. The DPAA2
+architecture separates the mechanism to access queues (the DPIO object)
+from the queues themselves. The DPIO provides an MMIO interface to
+enqueue/dequeue packets. To enqueue something a descriptor is written
+to the DPIO MMIO region, which includes the target queue number.
+There will typically be one DPIO assigned to each CPU. This allows all
+CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
+expected to be shared by different DPAA2 drivers.
+
+- MMIO regions: queue operations, buffer management
+- IRQs: data availability, congestion notification, buffer pool depletion
+- commands: IRQ config, enable, reset
+
+DPBP (Datapath Buffer Pool): represents a hardware buffer
+pool.
+
+- MMIO regions: none
+- IRQs: none
+- commands: enable, reset
+
+DPMCP (Datapath MC Portal): provides an MC command portal.
+Used by drivers to send commands to the MC to manage
+objects.
+
+- MMIO regions: MC command portal
+- IRQs: command completion
+- commands: IRQ config, enable, reset
+
+Object Connections
+~~~~~~~~~~~~~~~~~~
+
+Some objects have explicit relationships that must
+be configured:
+
+- DPNI <--> DPMAC
+- DPNI <--> DPNI
+- DPNI <--> L2-switch-port
+
+A DPNI must be connected to something such as a DPMAC,
+another DPNI, or L2 switch port. The DPNI connection
+is made via a DPRC command.
+
+.. code-block:: console
+
+ +-------+ +-------+
+ | DPNI | | DPMAC |
+ +---+---+ +---+---+
+ | |
+ +==========+
+
+- DPNI <--> DPBP
+
+A network interface requires a 'buffer pool' (DPBP object) which provides
+a list of pointers to memory where received Ethernet data is to be copied.
+The Ethernet driver configures the DPBPs associated with the network
+interface.
+
+Interrupts
+~~~~~~~~~~
+
+All interrupts generated by DPAA2 objects are message
+interrupts. At the hardware level message interrupts
+generated by devices will normally have 3 components--
+1) a non-spoofable 'device-id' expressed on the hardware
+bus, 2) an address, 3) a data value.
+
+In the case of DPAA2 devices/objects, all objects in the
+same container/DPRC share the same 'device-id'.
+For ARM-based SoC this is the same as the stream ID.
+
+
+DPAA2 DPDK - Poll Mode Driver Overview
+--------------------------------------
+
+This section provides an overview of the drivers for
+DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
+drivers and 2) functional object drivers (such as Ethernet).
+
+As described previously, a DPRC is a container that holds the other
+types of DPAA2 objects. It is functionally similar to a plug-and-play
+bus controller.
+
+Each object in the DPRC is a Linux "device" and is bound to a driver.
+The diagram below shows the dpaa2 drivers involved in a networking
+scenario and the objects bound to each driver. A brief description
+of each driver follows.
+
+.. code-block: console
+
+
+ +------------+
+ | DPDK DPAA2 |
+ | PMD |
+ +------------+ +------------+
+ | Ethernet |.......| Mempool |
+ . . . . . . . . . | (DPNI) | | (DPBP) |
+ . +---+---+----+ +-----+------+
+ . ^ | .
+ . | |<enqueue, .
+ . | | dequeue> .
+ . | | .
+ . +---+---V----+ .
+ . . . . . . . . . . .| DPIO driver| .
+ . . | (DPIO) | .
+ . . +-----+------+ .
+ . . | QBMAN | .
+ . . | Driver | .
+ +----+------+-------+ +-----+----- | .
+ | dpaa2 bus | | .
+ | VFIO fslmc-bus |....................|.....................
+ | | |
+ | /bus/fslmc | |
+ +-------------------+ |
+ |
+ ========================== HARDWARE =====|=======================
+ DPIO
+ |
+ DPNI---DPBP
+ |
+ DPMAC
+ |
+ PHY
+ =========================================|========================
+
+
+A brief description of each driver is provided below.
+
+DPAA2 bus driver
+~~~~~~~~~~~~~~~~
+
+The DPAA2 bus driver is a rte_bus driver which scans the fsl-mc bus.
+Key functions include:
+
+- Reading the container and setting up vfio group
+- Scanning and parsing the various MC objects and adding them to
+ their respective device list.
+
+Additionally, it also provides the object driver for generic MC objects.
+
+DPIO driver
+~~~~~~~~~~~
+
+The DPIO driver is bound to DPIO objects and provides services that allow
+other drivers such as the Ethernet driver to enqueue and dequeue data for
+their respective objects.
+Key services include:
+
+- Data availability notifications
+- Hardware queuing operations (enqueue and dequeue of data)
+- Hardware buffer pool management
+
+To transmit a packet the Ethernet driver puts data on a queue and
+invokes a DPIO API. For receive, the Ethernet driver registers
+a data availability notification callback. To dequeue a packet
+a DPIO API is used.
+
+There is typically one DPIO object per physical CPU for optimum
+performance, allowing different CPUs to simultaneously enqueue
+and dequeue data.
+
+The DPIO driver operates on behalf of all DPAA2 drivers
+active -- Ethernet, crypto, compression, etc.
+
+DPBP based Mempool driver
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The DPBP driver is bound to a DPBP objects and provides sevices to
+create a hardware offloaded packet buffer mempool.
+
+DPAA2 NIC Driver
+~~~~~~~~~~~~~~~~
+The Ethernet driver is bound to a DPNI and implements the kernel
+interfaces needed to connect the DPAA2 network interface to
+the network stack.
+
+Each DPNI corresponds to a DPDK network interface.
+
+Features
+^^^^^^^^
+
+Features of the DPAA2 PMD are:
+
+- Multiple queues for TX and RX
+- Receive Side Scaling (RSS)
+- Packet type information
+- Checksum offload
+- Promiscuous mode
+
+Supported DPAA2 SoCs
+--------------------
+
+- LS2080A/LS2040A
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+Prerequisites
+-------------
+
+There are three main pre-requisities for executing DPAA2 PMD on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/4.9-2017.01/aarch64-linux-gnu>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+As an alternative method, DPAA2 PMD can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+- **DPDK Helper Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK helper repository.
+
+ `DPDK Helper Scripts <https://github.com/qoriq-open-source/dpdk-helper>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+**.
+- MC Firmware version **10.0.0** and higher.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+.. note::
+
+ Some part of fslmc bus code (mc flib - object library) routines are
+ dual licensed (BSD & GPLv2).
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_FSLMC_BUS`` (default ``n``)
+
+ By default it is enabled only for defconfig_arm64-dpaa2-* config.
+ Toggle compilation of the ``librte_bus_fslmc`` driver.
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_PMD`` (default ``n``)
+
+ By default it is enabled only for defconfig_arm64-dpaa2-* config.
+ Toggle compilation of the ``librte_pmd_dpaa2`` driver.
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER`` (default ``n``)
+
+ Toggle display of generic debugging messages
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA`` (default ``y``)
+
+ Toggle to use physical address vs virtual address for hardware accelerators.
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT`` (default ``n``)
+
+ Toggle display of initialization related messages.
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX`` (default ``n``)
+
+ Toggle display of receive fast path run-time message
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX`` (default ``n``)
+
+ Toggle display of transmit fast path run-time message
+
+- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE`` (default ``n``)
+
+ Toggle display of transmit fast path buffer free run-time message
+
+Driver compilation and testing
+------------------------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ ./arm64-dpaa2-linuxapp-gcc/testpmd -c 0xff -n 1 \
+ -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx
+
+ .....
+ EAL: Registered [pci] bus.
+ EAL: Registered [fslmc] bus.
+ EAL: Detected 8 lcore(s)
+ EAL: Probing VFIO support...
+ EAL: VFIO support initialized
+ .....
+ PMD: DPAA2: Processing Container = dprc.2
+ EAL: fslmc: DPRC contains = 51 devices
+ EAL: fslmc: Bus scan completed
+ .....
+ Configuring Port 0 (socket 0)
+ Port 0: 00:00:00:00:00:01
+ Configuring Port 1 (socket 0)
+ Port 1: 00:00:00:00:00:02
+ .....
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
+
+Limitations
+-----------
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
+``Supported DPAA2 SoCs``.
+
+Maximum packet length
+~~~~~~~~~~~~~~~~~~~~~
+
+The DPAA2 SoC family support a maximum of a 10240 jumbo frame. The value
+is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
+up to 10240 bytes can still reach the host interface.
diff --git a/doc/guides/nics/ena.rst b/doc/guides/nics/ena.rst
index 073b35ae..d19912e9 100644
--- a/doc/guides/nics/ena.rst
+++ b/doc/guides/nics/ena.rst
@@ -200,52 +200,23 @@ application runs to completion, the ENA can be detached from igb_uio if necessar
Usage example
-------------
-This section demonstrates how to launch **testpmd** with Amazon ENA
-devices managed by librte_pmd_ena.
-
-#. Load the kernel modules:
-
- .. code-block:: console
-
- modprobe uio
- insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-
- .. note::
-
- Currently Amazon ENA PMD driver depends on igb_uio user space I/O kernel module
-
-#. Mount and request huge pages:
-
- .. code-block:: console
-
- mount -t hugetlbfs nodev /mnt/hugepages
- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
-
-#. Bind UIO driver to ENA device (using provided by DPDK binding tool):
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind=igb_uio 0000:02:00.1
-
-#. Start testpmd with basic parameters:
-
- .. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -- -i
-
- Example output:
-
- .. code-block:: console
-
- [...]
- EAL: PCI device 0000:02:00.1 on NUMA socket -1
- EAL: probe driver: 1d0f:ec20 rte_ena_pmd
- EAL: PCI memory mapped at 0x7f9b6c400000
- PMD: eth_ena_dev_init(): Initializing 0:2:0.1
- Interactive-mode selected
- Configuring Port 0 (socket 0)
- Port 0: 00:00:00:11:00:01
- Checking link statuses...
- Port 0 Link Up - speed 10000 Mbps - full-duplex
- Done
- testpmd>
+Follow instructions available in the document
+:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to launch
+**testpmd** with Amazon ENA devices managed by librte_pmd_ena.
+
+Example output:
+
+.. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:02:00.1 on NUMA socket -1
+ EAL: probe driver: 1d0f:ec20 rte_ena_pmd
+ EAL: PCI memory mapped at 0x7f9b6c400000
+ PMD: eth_ena_dev_init(): Initializing 0:2:0.1
+ Interactive-mode selected
+ Configuring Port 0 (socket 0)
+ Port 0: 00:00:00:11:00:01
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index c535b589..89a30158 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright (c) 2015, Cisco Systems, Inc.
+ Copyright (c) 2017, Cisco Systems, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -71,9 +71,9 @@ Configuration information
- The number of RQs configured in the vNIC should be greater or
equal to *twice* the value of the expected nb_rx_q parameter in
- the call to rte_eth_dev_configure(). With the addition of rx
+ the call to rte_eth_dev_configure(). With the addition of Rx
scatter, a pair of RQs on the vnic is needed for each receive
- queue used by DPDK, even if rx scatter is not being used.
+ queue used by DPDK, even if Rx scatter is not being used.
Having a vNIC with only 1 RQ is not a valid configuration, and
will fail with an error message.
@@ -99,7 +99,7 @@ Configuration information
gives the application the greatest amount of flexibility in its
queue configuration.
- - *Note*: Since the introduction of rx scatter, for performance
+ - *Note*: Since the introduction of Rx scatter, for performance
reasons, this PMD uses two RQs on the vNIC per receive queue in
DPDK. One RQ holds descriptors for the start of a packet the
second RQ holds the descriptors for the rest of the fragments of
@@ -135,11 +135,86 @@ of the server.
With advanced filters, perfect matching of all fields of IPv4, IPv6 headers
as well as TCP, UDP and SCTP L4 headers is available through flow director.
-Masking of these feilds for partial match is also supported.
+Masking of these fields for partial match is also supported.
Without advanced filter support, the flow director is limited to IPv4
perfect filtering of the 5-tuple with no masking of fields supported.
+SR-IOV mode utilization
+-----------------------
+
+UCS blade servers configured with dynamic vNIC connection policies in UCS
+manager are capable of supporting assigned devices on virtual machines (VMs)
+through a KVM hypervisor. Assigned devices, also known as 'passthrough'
+devices, are SR-IOV virtual functions (VFs) on the host which are exposed
+to VM instances.
+
+The Cisco Virtual Machine Fabric Extender (VM-FEX) gives the VM a dedicated
+interface on the Fabric Interconnect (FI). Layer 2 switching is done at
+the FI. This may eliminate the requirement for software switching on the
+host to route intra-host VM traffic.
+
+Please refer to `Creating a Dynamic vNIC Connection Policy
+<http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/sw/vm_fex/vmware/gui/config_guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide_chapter_010.html#task_433E01651F69464783A68E66DA8A47A5>`_
+for information on configuring SR-IOV Adapter policies using UCS manager.
+
+Once the policies are in place and the host OS is rebooted, VFs should be
+visible on the host, E.g.:
+
+.. code-block:: console
+
+ # lspci | grep Cisco | grep Ethernet
+ 0d:00.0 Ethernet controller: Cisco Systems Inc VIC Ethernet NIC (rev a2)
+ 0d:00.1 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.2 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.3 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.4 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.5 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.6 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+ 0d:00.7 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+
+Enable Intel IOMMU on the host and install KVM and libvirt. A VM instance should
+be created with an assigned device. When using libvirt, this configuration can
+be done within the domain (i.e. VM) config file. For example this entry maps
+host VF 0d:00:01 into the VM.
+
+.. code-block:: console
+
+ <interface type='hostdev' managed='yes'>
+ <mac address='52:54:00:ac:ff:b6'/>
+ <source>
+ <address type='pci' domain='0x0000' bus='0x0d' slot='0x00' function='0x1'/>
+ </source>
+
+Alternatively, the configuration can be done in a separate file using the
+``network`` keyword. These methods are described in the libvirt documentation for
+`Network XML format <https://libvirt.org/formatnetwork.html>`_.
+
+When the VM instance is started, the ENIC KVM driver will bind the host VF to
+vfio, complete provisioning on the FI and bring up the link.
+
+.. note::
+
+ It is not possible to use a VF directly from the host because it is not
+ fully provisioned until the hypervisor brings up the VM that it is assigned
+ to.
+
+In the VM instance, the VF will now be visible. E.g., here the VF 00:04.0 is
+seen on the VM instance and should be available for binding to a DPDK.
+
+.. code-block:: console
+
+ # lspci | grep Ether
+ 00:04.0 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
+
+Follow the normal DPDK install procedure, binding the VF to either ``igb_uio``
+or ``vfio`` in non-IOMMU mode.
+
+Please see :ref:`Limitations <enic_limitations>` for limitations in
+the use of SR-IOV.
+
+.. _enic_limitations:
+
Limitations
-----------
@@ -169,12 +244,31 @@ Limitations
- Flow director features are not supported on generation 1 Cisco VIC adapters
(M81KR and P81E)
-How to build the suite?
------------------------
-The build instructions for the DPDK suite should be followed. By default
-the ENIC PMD library will be built into the DPDK library.
+- **SR-IOV**
+
+ - KVM hypervisor support only. VMware has not been tested.
+ - Requires VM-FEX, and so is only available on UCS managed servers connected
+ to Fabric Interconnects. It is not on standalone C-Series servers.
+ - VF devices are not usable directly from the host. They can only be used
+ as assigned devices on VM instances.
+ - Currently, unbind of the ENIC kernel mode driver 'enic.ko' on the VM
+ instance may hang. As a workaround, enic.ko should blacklisted or removed
+ from the boot process.
+ - pci_generic cannot be used as the uio module in the VM. igb_uio or
+ vfio in non-IOMMU mode can be used.
+ - The number of RQs in UCSM dynamic vNIC configurations must be at least 2.
+ - The number of SR-IOV devices is limited to 256. Components on target system
+ might limit this number to fewer than 256.
+
+How to build the suite
+----------------------
-For configuring and using UIO and VFIO frameworks, please refer the
+Refer to the document :ref:`compiling and testing a PMD for a NIC
+<pmd_build_and_test>` for details.
+
+By default the ENIC PMD library will be built into the DPDK library.
+
+For configuring and using UIO and VFIO frameworks, please refer to the
documentation that comes with DPDK suite.
Supported Cisco VIC adapters
@@ -196,11 +290,13 @@ ENIC PMD supports all recent generations of Cisco VIC adapters including:
Supported Operating Systems
---------------------------
+
Any Linux distribution fulfilling the conditions described in Dependencies
section of DPDK documentation.
Supported features
------------------
+
- Unicast, multicast and broadcast transmission and reception
- Receive queue polling
- Port Hardware Statistics
@@ -216,9 +312,11 @@ Supported features
- IPV4, IPV6 and TCP RSS hashing
- Scattered Rx
- MTU update
+- SR-IOV on UCS managed servers connected to Fabric Interconnects.
-Known bugs and Unsupported features in this release
+Known bugs and unsupported features in this release
---------------------------------------------------
+
- Signature or flex byte based flow direction
- Drop feature of flow direction
- VLAN based flow direction
@@ -229,6 +327,7 @@ Known bugs and Unsupported features in this release
Prerequisites
-------------
+
- Prepare the system as recommended by DPDK suite. This includes environment
variables, hugepages configuration, tool-chains and configuration
- Insert vfio-pci kernel module using the command 'modprobe vfio-pci' if the
@@ -238,9 +337,8 @@ Prerequisites
- DPDK suite should be configured based on the user's decision to use VFIO or
UIO framework
- If the vNIC device(s) to be used is bound to the kernel mode Ethernet driver
- (enic), use 'ifconfig' to bring the interface down. The dpdk-devbind.py tool
- can then be used to unbind the device's bus id from the enic kernel mode
- driver.
+ use 'ifconfig' to bring the interface down. The dpdk-devbind.py tool can
+ then be used to unbind the device's bus id from the ENIC kernel mode driver.
- Bind the intended vNIC to vfio-pci in case the user wants ENIC PMD to use
VFIO framework using dpdk-devbind.py.
- Bind the intended vNIC to igb_uio in case the user wants ENIC PMD to use
@@ -271,10 +369,12 @@ libraries and the initialization time of the application.
Additional Reference
--------------------
+
- http://www.cisco.com/c/en/us/products/servers-unified-computing
Contact Information
-------------------
+
Any questions or bugs should be reported to DPDK community and to the ENIC PMD
maintainers:
diff --git a/doc/guides/nics/features/ark.ini b/doc/guides/nics/features/ark.ini
new file mode 100644
index 00000000..31a35279
--- /dev/null
+++ b/doc/guides/nics/features/ark.ini
@@ -0,0 +1,14 @@
+;
+; Supported features of the 'ark' poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Queue start/stop = Y
+Jumbo frame = Y
+Scattered Rx = Y
+Basic stats = Y
+Stats per queue = Y
+Linux UIO = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/avp.ini b/doc/guides/nics/features/avp.ini
new file mode 100644
index 00000000..ceb69939
--- /dev/null
+++ b/doc/guides/nics/features/avp.ini
@@ -0,0 +1,16 @@
+;
+; Supported features of the 'AVP' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Jumbo frame = Y
+Scattered Rx = Y
+Promiscuous mode = Y
+Unicast MAC filter = Y
+VLAN offload = Y
+Basic stats = Y
+Stats per queue = Y
+Linux UIO = Y
+x86-64 = Y
diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index f1bf9bf2..cafc6c70 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -3,14 +3,17 @@
;
; This file defines the features that are valid for inclusion in
; the other driver files and also the order that they appear in
-; the features table in the documentation.
+; the features table in the documentation. The feature description
+; string should not exceed feature_str_len defined in conf.py.
;
[Features]
Speed capabilities =
Link status =
Link status event =
+Removal event =
Queue status event =
Rx interrupt =
+Free Tx mbuf on demand =
Queue start/stop =
MTU update =
Jumbo frame =
@@ -36,6 +39,7 @@ Flexible filter =
Hash filter =
Flow director =
Flow control =
+Flow API =
Rate limitation =
Traffic mirroring =
CRC offload =
@@ -43,13 +47,17 @@ VLAN offload =
QinQ offload =
L3 checksum offload =
L4 checksum offload =
+MACsec offload =
Inner L3 checksum =
Inner L4 checksum =
Packet type parsing =
Timesync =
+Rx descriptor status =
+Tx descriptor status =
Basic stats =
Extended stats =
Stats per queue =
+FW version =
EEPROM dump =
Registers dump =
Multiprocess aware =
@@ -60,7 +68,6 @@ Other kdrv =
ARMv7 =
ARMv8 =
Power8 =
-TILE-Gx =
x86-32 =
x86-64 =
Usage doc =
diff --git a/doc/guides/nics/features/dpaa2.ini b/doc/guides/nics/features/dpaa2.ini
new file mode 100644
index 00000000..d43f4046
--- /dev/null
+++ b/doc/guides/nics/features/dpaa2.ini
@@ -0,0 +1,18 @@
+;
+; Supported features of the 'dpaa2' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Queue start/stop = Y
+MTU update = Y
+Promiscuous mode = Y
+RSS hash = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Packet type parsing = Y
+Basic stats = Y
+Linux VFIO = Y
+ARMv8 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/e1000.ini b/doc/guides/nics/features/e1000.ini
index 7f6d55c4..260d46da 100644
--- a/doc/guides/nics/features/e1000.ini
+++ b/doc/guides/nics/features/e1000.ini
@@ -7,6 +7,7 @@
Link status = Y
Link status event = Y
Rx interrupt = Y
+Free Tx mbuf on demand = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
@@ -20,6 +21,8 @@ VLAN offload = Y
QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
BSD nic_uio = Y
Linux UIO = Y
diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini
index 86576a75..94e7f3cb 100644
--- a/doc/guides/nics/features/enic.ini
+++ b/doc/guides/nics/features/enic.ini
@@ -10,10 +10,12 @@ Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
+TSO = Y
Promiscuous mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS hash = Y
+SR-IOV = Y
VLAN filter = Y
CRC offload = Y
VLAN offload = Y
diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini
index 0d143bca..ecabce0b 100644
--- a/doc/guides/nics/features/i40e.ini
+++ b/doc/guides/nics/features/i40e.ini
@@ -27,6 +27,7 @@ Tunnel filter = Y
Hash filter = Y
Flow director = Y
Flow control = Y
+Flow API = Y
Traffic mirroring = Y
CRC offload = Y
VLAN offload = Y
@@ -37,8 +38,11 @@ Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
+FW version = Y
Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
@@ -46,3 +50,4 @@ Linux VFIO = Y
x86-32 = Y
x86-64 = Y
ARMv8 = Y
+Power8 = Y
diff --git a/doc/guides/nics/features/i40e_vec.ini b/doc/guides/nics/features/i40e_vec.ini
index edd6b717..206f348b 100644
--- a/doc/guides/nics/features/i40e_vec.ini
+++ b/doc/guides/nics/features/i40e_vec.ini
@@ -29,6 +29,8 @@ Flow director = Y
Flow control = Y
Traffic mirroring = Y
Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Multiprocess aware = Y
@@ -38,3 +40,4 @@ Linux VFIO = Y
x86-32 = Y
x86-64 = Y
ARMv8 = Y
+Power8 = Y
diff --git a/doc/guides/nics/features/i40e_vf.ini b/doc/guides/nics/features/i40e_vf.ini
index 2f82c6b9..46e0d9fc 100644
--- a/doc/guides/nics/features/i40e_vf.ini
+++ b/doc/guides/nics/features/i40e_vf.ini
@@ -26,6 +26,8 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Multiprocess aware = Y
diff --git a/doc/guides/nics/features/i40e_vf_vec.ini b/doc/guides/nics/features/i40e_vf_vec.ini
index d6674f76..c2c6c19f 100644
--- a/doc/guides/nics/features/i40e_vf_vec.ini
+++ b/doc/guides/nics/features/i40e_vf_vec.ini
@@ -18,6 +18,8 @@ RSS key update = Y
RSS reta update = Y
VLAN filter = Y
Hash filter = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Multiprocess aware = Y
diff --git a/doc/guides/nics/features/igb.ini b/doc/guides/nics/features/igb.ini
index 9fafe72d..11450270 100644
--- a/doc/guides/nics/features/igb.ini
+++ b/doc/guides/nics/features/igb.ini
@@ -33,8 +33,11 @@ L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
+FW version = Y
EEPROM dump = Y
Registers dump = Y
BSD nic_uio = Y
diff --git a/doc/guides/nics/features/igb_vf.ini b/doc/guides/nics/features/igb_vf.ini
index b6178202..e641a2c9 100644
--- a/doc/guides/nics/features/igb_vf.ini
+++ b/doc/guides/nics/features/igb_vf.ini
@@ -17,6 +17,8 @@ QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Registers dump = Y
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 4a5667f0..4aa7af6d 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -29,6 +29,7 @@ SYN filter = Y
Tunnel filter = Y
Flow director = Y
Flow control = Y
+Flow API = Y
Rate limitation = Y
Traffic mirroring = Y
CRC offload = Y
@@ -36,13 +37,17 @@ VLAN offload = Y
QinQ offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+MACsec offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
+FW version = Y
EEPROM dump = Y
Registers dump = Y
Multiprocess aware = Y
diff --git a/doc/guides/nics/features/ixgbe_vec.ini b/doc/guides/nics/features/ixgbe_vec.ini
index e1773dd6..4da81182 100644
--- a/doc/guides/nics/features/ixgbe_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vec.ini
@@ -32,6 +32,8 @@ Flow control = Y
Rate limitation = Y
Traffic mirroring = Y
Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
diff --git a/doc/guides/nics/features/ixgbe_vf.ini b/doc/guides/nics/features/ixgbe_vf.ini
index bf28215d..b63e32ce 100644
--- a/doc/guides/nics/features/ixgbe_vf.ini
+++ b/doc/guides/nics/features/ixgbe_vf.ini
@@ -25,6 +25,8 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Registers dump = Y
diff --git a/doc/guides/nics/features/ixgbe_vf_vec.ini b/doc/guides/nics/features/ixgbe_vf_vec.ini
index 8b8c90ba..c994857e 100644
--- a/doc/guides/nics/features/ixgbe_vf_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vf_vec.ini
@@ -17,6 +17,8 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Registers dump = Y
diff --git a/doc/guides/nics/features/mpipe.ini b/doc/guides/nics/features/kni.ini
index ca609331..6deb66ae 100644
--- a/doc/guides/nics/features/mpipe.ini
+++ b/doc/guides/nics/features/kni.ini
@@ -1,6 +1,7 @@
;
-; Supported features of the 'mpipe' network poll mode driver.
+; Supported features of the 'kni' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Usage doc = Y
diff --git a/doc/guides/nics/features/liquidio.ini b/doc/guides/nics/features/liquidio.ini
new file mode 100644
index 00000000..49cc3566
--- /dev/null
+++ b/doc/guides/nics/features/liquidio.ini
@@ -0,0 +1,28 @@
+;
+; Supported features of the 'LiquidIO' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Link status event = Y
+Jumbo frame = Y
+Scattered Rx = Y
+Allmulticast mode = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+VLAN filter = Y
+CRC offload = Y
+VLAN offload = P
+L3 checksum offload = Y
+L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
+Basic stats = Y
+Extended stats = Y
+Multiprocess aware = Y
+Linux UIO = Y
+Linux VFIO = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini
index c9828f71..285f0ecf 100644
--- a/doc/guides/nics/features/mlx4.ini
+++ b/doc/guides/nics/features/mlx4.ini
@@ -6,6 +6,7 @@
[Features]
Link status = Y
Link status event = Y
+Removal event = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index f811e3fb..e228c412 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -7,10 +7,12 @@
Speed capabilities = Y
Link status = Y
Link status event = Y
+Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
+TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
@@ -21,11 +23,16 @@ RSS reta update = Y
SR-IOV = Y
VLAN filter = Y
Flow director = Y
+Flow API = Y
CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Stats per queue = Y
Multiprocess aware = Y
diff --git a/doc/guides/nics/features/nfp.ini b/doc/guides/nics/features/nfp.ini
index d9671512..a1281d2a 100644
--- a/doc/guides/nics/features/nfp.ini
+++ b/doc/guides/nics/features/nfp.ini
@@ -4,3 +4,26 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Speed capabilities = Y
+Link status = Y
+Link status event = Y
+Rx interrupt = Y
+Queue start/stop = Y
+MTU update = Y
+Jumbo frame = Y
+Promiscuous mode = Y
+TSO = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+SR-IOV = Y
+Flow control = Y
+VLAN offload = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Basic stats = Y
+Stats per queue = Y
+Linux UIO = Y
+Linux VFIO = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/pcap.ini b/doc/guides/nics/features/pcap.ini
index 8245cbfb..28e64880 100644
--- a/doc/guides/nics/features/pcap.ini
+++ b/doc/guides/nics/features/pcap.ini
@@ -10,7 +10,6 @@ Multiprocess aware = Y
ARMv7 = Y
ARMv8 = Y
Power8 = Y
-TILE-Gx = Y
x86-32 = Y
x86-64 = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini
index 7d75030a..fba5dc33 100644
--- a/doc/guides/nics/features/qede.ini
+++ b/doc/guides/nics/features/qede.ini
@@ -23,6 +23,9 @@ CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Tunnel filter = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Packet type parsing = Y
Basic stats = Y
Extended stats = Y
@@ -31,3 +34,7 @@ Multiprocess aware = Y
Linux UIO = Y
x86-64 = Y
Usage doc = Y
+N-tuple filter = Y
+Flow director = Y
+LRO = Y
+TSO = Y
diff --git a/doc/guides/nics/features/qede_vf.ini b/doc/guides/nics/features/qede_vf.ini
index acb1b991..21ec40fa 100644
--- a/doc/guides/nics/features/qede_vf.ini
+++ b/doc/guides/nics/features/qede_vf.ini
@@ -31,4 +31,6 @@ Stats per queue = Y
Multiprocess aware = Y
Linux UIO = Y
x86-64 = Y
+LRO = Y
+TSO = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/sfc_efx.ini b/doc/guides/nics/features/sfc_efx.ini
new file mode 100644
index 00000000..7957b5e9
--- /dev/null
+++ b/doc/guides/nics/features/sfc_efx.ini
@@ -0,0 +1,34 @@
+;
+; Supported features of the 'sfc_efx' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+Link status event = Y
+Queue start/stop = Y
+MTU update = Y
+Jumbo frame = Y
+Scattered Rx = Y
+TSO = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Multicast MAC filter = Y
+RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+SR-IOV = Y
+Flow control = Y
+Flow API = Y
+VLAN offload = P
+L3 checksum offload = Y
+L4 checksum offload = Y
+Packet type parsing = Y
+Basic stats = Y
+Extended stats = Y
+FW version = Y
+BSD nic_uio = Y
+Linux UIO = Y
+Linux VFIO = Y
+x86-64 = Y
diff --git a/doc/guides/nics/features/tap.ini b/doc/guides/nics/features/tap.ini
new file mode 100644
index 00000000..3efae758
--- /dev/null
+++ b/doc/guides/nics/features/tap.ini
@@ -0,0 +1,26 @@
+;
+; Supported features of the 'tap' driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Link status = Y
+Link status event = Y
+Jumbo frame = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Basic stats = Y
+Flow API = Y
+MTU update = Y
+Multicast MAC filter = Y
+Speed capabilities = Y
+Unicast MAC filter = Y
+Packet type parsing = Y
+Flow control = Y
+Other kdrv = Y
+ARMv7 = Y
+ARMv8 = Y
+Power8 = Y
+x86-32 = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/vhost.ini b/doc/guides/nics/features/vhost.ini
index 23166fba..dffd1f49 100644
--- a/doc/guides/nics/features/vhost.ini
+++ b/doc/guides/nics/features/vhost.ini
@@ -6,6 +6,7 @@
[Features]
Link status = Y
Link status event = Y
+Free Tx mbuf on demand = Y
Queue status event = Y
Basic stats = Y
Extended stats = Y
diff --git a/doc/guides/nics/features/virtio.ini b/doc/guides/nics/features/virtio.ini
index 1d996c65..8e3aca1d 100644
--- a/doc/guides/nics/features/virtio.ini
+++ b/doc/guides/nics/features/virtio.ini
@@ -5,6 +5,7 @@
;
[Features]
Link status = Y
+Rx interrupt = Y
Queue start/stop = Y
Scattered Rx = Y
Promiscuous mode = Y
@@ -14,6 +15,7 @@ Multicast MAC filter = Y
VLAN filter = Y
Basic stats = Y
Stats per queue = Y
+Extended stats = Y
Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
@@ -23,3 +25,4 @@ ARMv8 = Y
x86-32 = Y
x86-64 = Y
Usage doc = Y
+MTU update = Y
diff --git a/doc/guides/nics/features/virtio_vec.ini b/doc/guides/nics/features/virtio_vec.ini
index 6dc7cf02..ec93f5c4 100644
--- a/doc/guides/nics/features/virtio_vec.ini
+++ b/doc/guides/nics/features/virtio_vec.ini
@@ -5,6 +5,7 @@
;
[Features]
Link status = Y
+Rx interrupt = Y
Queue start/stop = Y
Promiscuous mode = Y
Allmulticast mode = Y
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index 5780268f..4d3c7ca0 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -64,6 +64,7 @@ Features of the I40E PMD are:
- SR-IOV VF
- Hot plug
- IEEE1588/802.1AS timestamping
+- VF Daemon (VFD) - EXPERIMENTAL
Prerequisites
@@ -104,11 +105,6 @@ Please note that enabling debugging options may affect system performance.
Toggle the use of Vector PMD instead of normal RX/TX path.
To enable vPMD for RX, bulk allocation for Rx must be allowed.
-- ``CONFIG_RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE`` (default ``y``)
-
- Toggle to enable RX ``olflags``.
- This is only meaningful when Vector PMD is used.
-
- ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` (default ``n``)
Toggle to use a 16-byte RX descriptor, by default the RX descriptor is 32 byte.
@@ -130,82 +126,15 @@ Please note that enabling debugging options may affect system performance.
Interrupt Throttling interval.
-Driver Compilation
-~~~~~~~~~~~~~~~~~~
-
-To compile the I40E PMD see :ref:`Getting Started Guide for Linux <linux_gsg>` or
-:ref:`Getting Started Guide for FreeBSD <freebsd_gsg>` depending on your platform.
-
-
-Linux
------
-
-
-Running testpmd
-~~~~~~~~~~~~~~~
-
-This section demonstrates how to launch ``testpmd`` with Intel XL710/X710
-devices managed by ``librte_pmd_i40e`` in the Linux operating system.
-
-#. Load ``igb_uio`` or ``vfio-pci`` driver:
-
- .. code-block:: console
-
- modprobe uio
- insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
-
- or
-
- .. code-block:: console
-
- modprobe vfio-pci
-
-#. Bind the XL710/X710 adapters to ``igb_uio`` or ``vfio-pci`` loaded in the previous step:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind igb_uio 0000:83:00.0
-
- Or setup VFIO permissions for regular users and then bind to ``vfio-pci``:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind vfio-pci 0000:83:00.0
-
-#. Start ``testpmd`` with basic parameters:
-
- .. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 83:00.0 -- -i
-
- Example output:
-
- .. code-block:: console
-
- ...
- EAL: PCI device 0000:83:00.0 on NUMA socket 1
- EAL: probe driver: 8086:1572 rte_i40e_pmd
- EAL: PCI memory mapped at 0x7f7f80000000
- EAL: PCI memory mapped at 0x7f7f80800000
- PMD: eth_i40e_dev_init(): FW 5.0 API 1.5 NVM 05.00.02 eetrack 8000208a
- Interactive-mode selected
- Configuring Port 0 (socket 0)
- ...
-
- PMD: i40e_dev_rx_queue_setup(): Rx Burst Bulk Alloc Preconditions are
- satisfied.Rx Burst Bulk Alloc function will be used on port=0, queue=0.
-
- ...
- Port 0: 68:05:CA:26:85:84
- Checking link statuses...
- Port 0 Link Up - speed 10000 Mbps - full-duplex
- Done
+Driver compilation and testing
+------------------------------
- testpmd>
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
SR-IOV: Prerequisites and sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
#. Load the kernel module:
@@ -254,6 +183,37 @@ SR-IOV: Prerequisites and sample Application Notes
#. Assign VF to VM, and bring up the VM.
Please see the documentation for the *I40E/IXGBE/IGB Virtual Function Driver*.
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ ...
+ EAL: PCI device 0000:83:00.0 on NUMA socket 1
+ EAL: probe driver: 8086:1572 rte_i40e_pmd
+ EAL: PCI memory mapped at 0x7f7f80000000
+ EAL: PCI memory mapped at 0x7f7f80800000
+ PMD: eth_i40e_dev_init(): FW 5.0 API 1.5 NVM 05.00.02 eetrack 8000208a
+ Interactive-mode selected
+ Configuring Port 0 (socket 0)
+ ...
+
+ PMD: i40e_dev_rx_queue_setup(): Rx Burst Bulk Alloc Preconditions are
+ satisfied.Rx Burst Bulk Alloc function will be used on port=0, queue=0.
+
+ ...
+ Port 0: 68:05:CA:26:85:84
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+
+ testpmd>
+
Sample Application Notes
------------------------
@@ -267,7 +227,7 @@ To start ``testpmd``, and add vlan 10 to port 0:
.. code-block:: console
- ./app/testpmd -c ffff -n 4 -- -i --forward-mode=mac
+ ./app/testpmd -l 0-15 -n 4 -- -i --forward-mode=mac
...
testpmd> set promisc 0 off
@@ -302,7 +262,7 @@ Start ``testpmd`` with ``--disable-rss`` and ``--pkt-filter-mode=perfect``:
.. code-block:: console
- ./app/testpmd -c ffff -n 4 -- -i --disable-rss --pkt-filter-mode=perfect \
+ ./app/testpmd -l 0-15 -n 4 -- -i --disable-rss --pkt-filter-mode=perfect \
--rxq=8 --txq=8 --nb-cores=8 --nb-ports=1
Add a rule to direct ``ipv4-udp`` packet whose ``dst_ip=2.2.2.5, src_ip=2.2.2.3, src_port=32, dst_port=32`` to queue 1:
@@ -444,8 +404,8 @@ is used as the VF driver, DPDK cannot choose 16 byte receive descriptor. That
is to say, user should keep ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n`` in
config file.
-Link down with i40e kernel driver after DPDK application exist
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Link down with i40e kernel driver after DPDK application exit
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
After DPDK application quit, and the device is bound back to Linux i40e
kernel driver, the link cannot be up after ``ifconfig <dev> up``.
@@ -459,3 +419,31 @@ Receive packets with Ethertype 0x88A8
Due to the FW limitation, PF can receive packets with Ethertype 0x88A8
only when floating VEB is disabled.
+
+Incorrect Rx statistics when packet is oversize
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When a packet is over maximum frame size, the packet is dropped.
+However the Rx statistics, when calling `rte_eth_stats_get` incorrectly
+shows it as received.
+
+VF & TC max bandwidth setting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The per VF max bandwidth and per TC max bandwidth cannot be enabled in parallel.
+The dehavior is different when handling per VF and per TC max bandwidth setting.
+When enabling per VF max bandwidth, SW will check if per TC max bandwidth is
+enabled. If so, return failure.
+When enabling per TC max bandwidth, SW will check if per VF max bandwidth
+is enabled. If so, disable per VF max bandwidth and continue with per TC max
+bandwidth setting.
+
+TC TX scheduling mode setting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There're 2 TX scheduling modes for TCs, round robin and strict priority mode.
+If a TC is set to strict priority mode, it can consume unlimited bandwidth.
+It means if APP has set the max bandwidth for that TC, it comes to no
+effect.
+It's suggested to set the strict priority mode for a TC that is latency
+sensitive but no consuming much bandwidth.
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 92d56a59..240d0824 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -36,9 +36,13 @@ Network Interface Controller Drivers
:numbered:
overview
+ build_and_test
+ ark
+ avp
bnx2x
bnxt
cxgbe
+ dpaa2
e1000em
ena
enic
@@ -46,11 +50,15 @@ Network Interface Controller Drivers
i40e
ixgbe
intel_vf
+ kni
+ liquidio
mlx4
mlx5
nfp
qede
+ sfc_efx
szedata2
+ tap
thunderx
virtio
vhost
diff --git a/doc/guides/nics/intel_vf.rst b/doc/guides/nics/intel_vf.rst
index 9fe42093..1e83bf6e 100644
--- a/doc/guides/nics/intel_vf.rst
+++ b/doc/guides/nics/intel_vf.rst
@@ -124,12 +124,12 @@ However:
The above is an important consideration to take into account when targeting specific packets to a selected port.
-Intel® Fortville 10/40 Gigabit Ethernet Controller VF Infrastructure
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Intel® X710/XL710 Gigabit Ethernet Controller VF Infrastructure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In a virtualized environment, the programmer can enable a maximum of *128 Virtual Functions (VF)*
-globally per Intel® Fortville 10/40 Gigabit Ethernet Controller NIC device.
-Each VF can have a maximum of 16 queue pairs.
+globally per Intel® X710/XL710 Gigabit Ethernet Controller NIC device.
+The number of queue pairs of each VF can be configured by ``CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF`` in ``config`` file.
The Physical Function in host could be either configured by the Linux* i40e driver
(in the case of the Linux Kernel-based Virtual Machine [KVM]) or by DPDK PMD PF driver.
When using both DPDK PMD PF/VF drivers, the whole NIC will be taken over by DPDK based application.
@@ -156,44 +156,6 @@ For example,
Launch the DPDK testpmd/example or your own host daemon application using the DPDK PMD library.
-* Using the DPDK PMD PF ixgbe driver to enable VF RSS:
-
- Same steps as above to install the modules of uio, igb_uio, specify max_vfs for PCI device, and
- launch the DPDK testpmd/example or your own host daemon application using the DPDK PMD library.
-
- The available queue number(at most 4) per VF depends on the total number of pool, which is
- determined by the max number of VF at PF initialization stage and the number of queue specified
- in config:
-
- * If the max number of VF is set in the range of 1 to 32:
-
- If the number of rxq is specified as 4(e.g. '--rxq 4' in testpmd), then there are totally 32
- pools(ETH_32_POOLS), and each VF could have 4 or less(e.g. 2) queues;
-
- If the number of rxq is specified as 2(e.g. '--rxq 2' in testpmd), then there are totally 32
- pools(ETH_32_POOLS), and each VF could have 2 queues;
-
- * If the max number of VF is in the range of 33 to 64:
-
- If the number of rxq is 4 ('--rxq 4' in testpmd), then error message is expected as rxq is not
- correct at this case;
-
- If the number of rxq is 2 ('--rxq 2' in testpmd), then there is totally 64 pools(ETH_64_POOLS),
- and each VF have 2 queues;
-
- On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
- or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated(max_vfs >= 1).
- It also needs config VF RSS information like hash function, RSS key, RSS key length.
-
- .. code-block:: console
-
- testpmd -c 0xffff -n 4 -- --coremask=<core-mask> --rxq=4 --txq=4 -i
-
- The limitation for VF RSS on Intel® 82599 10 Gigabit Ethernet Controller is:
- The hash and key are shared among PF and all VF, the RETA table with 128 entries is also shared
- among PF and all VF; So it could not to provide a method to query the hash and reta content per
- VF on guest, while, if possible, please query them on host(PF) for the shared RETA information.
-
Virtual Function enumeration is performed in the following sequence by the Linux* pci driver for a dual-port NIC.
When you enable the four Virtual Functions with the above command, the four enabled functions have a Function#
represented by (Bus#, Device#, Function#) in sequence starting from 0 to 3.
@@ -207,6 +169,9 @@ However:
The above is an important consideration to take into account when targeting specific packets to a selected port.
+ For Intel® X710/XL710 Gigabit Ethernet Controller, queues are in pairs. One queue pair means one receive queue and
+ one transmit queue. The default number of queue pairs per VF is 4, and can be 16 in maximum.
+
Intel® 82599 10 Gigabit Ethernet Controller VF Infrastructure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -241,6 +206,42 @@ For example,
Launch the DPDK testpmd/example or your own host daemon application using the DPDK PMD library.
+* Using the DPDK PMD PF ixgbe driver to enable VF RSS:
+
+ Same steps as above to install the modules of uio, igb_uio, specify max_vfs for PCI device, and
+ launch the DPDK testpmd/example or your own host daemon application using the DPDK PMD library.
+
+ The available queue number (at most 4) per VF depends on the total number of pool, which is
+ determined by the max number of VF at PF initialization stage and the number of queue specified
+ in config:
+
+ * If the max number of VFs (max_vfs) is set in the range of 1 to 32:
+
+ If the number of Rx queues is specified as 4 (``--rxq=4`` in testpmd), then there are totally 32
+ pools (ETH_32_POOLS), and each VF could have 4 Rx queues;
+
+ If the number of Rx queues is specified as 2 (``--rxq=2`` in testpmd), then there are totally 32
+ pools (ETH_32_POOLS), and each VF could have 2 Rx queues;
+
+ * If the max number of VFs (max_vfs) is in the range of 33 to 64:
+
+ If the number of Rx queues in specified as 4 (``--rxq=4`` in testpmd), then error message is expected
+ as ``rxq`` is not correct at this case;
+
+ If the number of rxq is 2 (``--rxq=2`` in testpmd), then there is totally 64 pools (ETH_64_POOLS),
+ and each VF have 2 Rx queues;
+
+ On host, to enable VF RSS functionality, rx mq mode should be set as ETH_MQ_RX_VMDQ_RSS
+ or ETH_MQ_RX_RSS mode, and SRIOV mode should be activated (max_vfs >= 1).
+ It also needs config VF RSS information like hash function, RSS key, RSS key length.
+
+.. note::
+
+ The limitation for VF RSS on Intel® 82599 10 Gigabit Ethernet Controller is:
+ The hash and key are shared among PF and all VF, the RETA table with 128 entries is also shared
+ among PF and all VF; So it could not to provide a method to query the hash and reta content per
+ VF on guest, while, if possible, please query them on host for the shared RETA information.
+
Virtual Function enumeration is performed in the following sequence by the Linux* pci driver for a dual-port NIC.
When you enable the four Virtual Functions with the above command, the four enabled functions have a Function#
represented by (Bus#, Device#, Function#) in sequence starting from 0 to 3.
@@ -513,7 +514,7 @@ The setup procedure is as follows:
.. code-block:: console
make install T=x86_64-native-linuxapp-gcc
- ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -- -i
#. Finally, access the Guest OS using vncviewer with the localhost:5900 port and check the lspci command output in the Guest OS.
The virtual functions will be listed as available for use.
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 3b6851b6..696ff693 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -95,9 +95,6 @@ Other features are supported using optional MACRO configuration. They include:
* HW extend dual VLAN
-* Enabled by RX_OLFLAGS (RTE_IXGBE_RX_OLFLAGS_ENABLE=y)
-
-
To guarantee the constraint, configuration flags in dev_conf.rxmode will be checked:
* hw_vlan_strip
@@ -129,7 +126,7 @@ The tx_rs_thresh value must be greater than or equal to RTE_PMD_IXGBE_TX_MAX_BUR
but less or equal to RTE_IXGBE_TX_MAX_FREE_BUF_SZ.
Consequently, by default the tx_rs_thresh value is in the range 32 to 64.
-Feature not Supported by RX Vector PMD
+Feature not Supported by TX Vector PMD
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TX vPMD only works when txq_flags is set to IXGBE_SIMPLE_FLAGS.
@@ -148,45 +145,33 @@ The following MACROs are used for these three features:
* ETH_TXQ_FLAGS_NOXSUMTCP
Application Programming Interface
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
In DPDK release v16.11 an API for ixgbe specific functions has been added to the ixgbe PMD.
The declarations for the API functions are in the header ``rte_pmd_ixgbe.h``.
Sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-testpmd
-^^^^^^^
-
-By default, using CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=y:
-
-.. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 300 -n 4 -- -i --burst=32 --rxfreet=32 --mbcache=250 --txpt=32 --rxht=8 --rxwt=0 --txfreet=32 --txrst=32 --txqflags=0xf01
-
-When CONFIG_RTE_IXGBE_RX_OLFLAGS_ENABLE=n, better performance can be achieved:
-
-.. code-block:: console
-
- ./x86_64-native-linuxapp-gcc/app/testpmd -c 300 -n 4 -- -i --burst=32 --rxfreet=32 --mbcache=250 --txpt=32 --rxht=8 --rxwt=0 --txfreet=32 --txrst=32 --txqflags=0xf01 --disable-hw-vlan
+------------------------
l3fwd
-^^^^^
+~~~~~
When running l3fwd with vPMD, there is one thing to note.
In the configuration, ensure that port_conf.rxmode.hw_ip_checksum=0.
Otherwise, by default, RX vPMD is disabled.
load_balancer
-^^^^^^^^^^^^^
+~~~~~~~~~~~~~
As in the case of l3fwd, set configure port_conf.rxmode.hw_ip_checksum=0 to enable vPMD.
In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
+Limitations or Known issues
+---------------------------
+
Malicious Driver Detection not Supported
-----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Intel x550 series NICs support a feature called MDD (Malicious
Driver Detection) which checks the behavior of the VF driver.
@@ -200,14 +185,17 @@ There's significant performance impact to support MDD. DPDK should check if
the advanced context descriptor should be set and set it. And DPDK has to ask
the info about the header length from the upper layer, because parsing the
packet itself is not acceptable. So, it's too expensive to support MDD.
-When using kernel PF + DPDK VF on x550, please make sure using the kernel
-driver that disables MDD or can disable MDD. (Some kernel driver can use
-this CLI 'insmod ixgbe.ko MDD=0,0' to disable MDD. Some kernel driver disables
-it by default.)
+When using kernel PF + DPDK VF on x550, please make sure to use a kernel
+PF driver that disables MDD or can disable MDD.
+
+Some kernel drivers already disable MDD by default while some kernels can use
+the command ``insmod ixgbe.ko MDD=0,0`` to disable MDD. Each "0" in the
+command refers to a port. For example, if there are 6 ixgbe ports, the command
+should be changed to ``insmod ixgbe.ko MDD=0,0,0,0,0,0``.
Statistics
-----------
+~~~~~~~~~~
The statistics of ixgbe hardware must be polled regularly in order for it to
remain consistent. Running a DPDK application without polling the statistics will
@@ -230,6 +218,15 @@ be calculated as follows:
In order to ensure valid results, it is recommended to poll every 4 minutes.
+MTU setting
+~~~~~~~~~~~
+
+Although the user can set the MTU separately on PF and VF ports, the ixgbe NIC
+only supports one global MTU per physical port.
+So when the user sets different MTUs on PF and VF ports in one physical port,
+the real MTU for all these PF and VF ports is the largest value set.
+This behavior is based on the kernel driver behavior.
+
Supported Chipsets and NICs
---------------------------
diff --git a/doc/guides/nics/kni.rst b/doc/guides/nics/kni.rst
new file mode 100644
index 00000000..77542b56
--- /dev/null
+++ b/doc/guides/nics/kni.rst
@@ -0,0 +1,197 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+KNI Poll Mode Driver
+======================
+
+KNI PMD is wrapper to the :ref:`librte_kni <kni>` library.
+
+This PMD enables using KNI without having a KNI specific application,
+any forwarding application can use PMD interface for KNI.
+
+Sending packets to any DPDK controlled interface or sending to the
+Linux networking stack will be transparent to the DPDK application.
+
+To create a KNI device ``net_kni#`` device name should be used, and this
+will create ``kni#`` Linux virtual network interface.
+
+There is no physical device backend for the virtual KNI device.
+
+Packets sent to the KNI Linux interface will be received by the DPDK
+application, and DPDK application may forward packets to a physical NIC
+or to a virtual device (like another KNI interface or PCAP interface).
+
+To forward any traffic from physical NIC to the Linux networking stack,
+an application should control a physical port and create one virtual KNI port,
+and forward between two.
+
+Using this PMD requires KNI kernel module be inserted.
+
+
+Usage
+-----
+
+EAL ``--vdev`` argument can be used to create KNI device instance, like::
+
+ testpmd --vdev=net_kni0 --vdev=net_kn1 -- -i
+
+Above command will create ``kni0`` and ``kni1`` Linux network interfaces,
+those interfaces can be controlled by standard Linux tools.
+
+When testpmd forwarding starts, any packets sent to ``kni0`` interface
+forwarded to the ``kni1`` interface and vice versa.
+
+There is no hard limit on number of interfaces that can be created.
+
+
+Default interface configuration
+-------------------------------
+
+``librte_kni`` can create Linux network interfaces with different features,
+feature set controlled by a configuration struct, and KNI PMD uses a fixed
+configuration:
+
+ .. code-block:: console
+
+ Interface name: kni#
+ force bind kernel thread to a core : NO
+ mbuf size: MAX_PACKET_SZ
+
+KNI control path is not supported with the PMD, since there is no physical
+backend device by default.
+
+
+PMD arguments
+-------------
+
+``no_request_thread``, by default PMD creates a phtread for each KNI interface
+to handle Linux network interface control commands, like ``ifconfig kni0 up``
+
+With ``no_request_thread`` option, pthread is not created and control commands
+not handled by PMD.
+
+By default request thread is enabled. And this argument should not be used
+most of the time, unless this PMD used with customized DPDK application to handle
+requests itself.
+
+Argument usage::
+
+ testpmd --vdev "net_kni0,no_request_thread=1" -- -i
+
+
+PMD log messages
+----------------
+
+If KNI kernel module (rte_kni.ko) not inserted, following error log printed::
+
+ "KNI: KNI subsystem has not been initialized. Invoke rte_kni_init() first"
+
+
+PMD testing
+-----------
+
+It is possible to test PMD quickly using KNI kernel module loopback feature:
+
+* Insert KNI kernel module with loopback support:
+
+ .. code-block:: console
+
+ insmod build/kmod/rte_kni.ko lo_mode=lo_mode_fifo_skb
+
+* Start testpmd with no physical device but two KNI virtual devices:
+
+ .. code-block:: console
+
+ ./testpmd --vdev net_kni0 --vdev net_kni1 -- -i
+
+ .. code-block:: console
+
+ ...
+ Configuring Port 0 (socket 0)
+ KNI: pci: 00:00:00 c580:b8
+ Port 0: 1A:4A:5B:7C:A2:8C
+ Configuring Port 1 (socket 0)
+ KNI: pci: 00:00:00 600:b9
+ Port 1: AE:95:21:07:93:DD
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
+
+* Observe Linux interfaces
+
+ .. code-block:: console
+
+ $ ifconfig kni0 && ifconfig kni1
+ kni0: flags=4098<BROADCAST,MULTICAST> mtu 1500
+ ether ae:8e:79:8e:9b:c8 txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+ kni1: flags=4098<BROADCAST,MULTICAST> mtu 1500
+ ether 9e:76:43:53:3e:9b txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+
+
+* Start forwarding with tx_first:
+
+ .. code-block:: console
+
+ testpmd> start tx_first
+
+* Quit and check forwarding stats:
+
+ .. code-block:: console
+
+ testpmd> quit
+ Telling cores to stop...
+ Waiting for lcores to finish...
+
+ ---------------------- Forward statistics for port 0 ----------------------
+ RX-packets: 35637905 RX-dropped: 0 RX-total: 35637905
+ TX-packets: 35637947 TX-dropped: 0 TX-total: 35637947
+ ----------------------------------------------------------------------------
+
+ ---------------------- Forward statistics for port 1 ----------------------
+ RX-packets: 35637915 RX-dropped: 0 RX-total: 35637915
+ TX-packets: 35637937 TX-dropped: 0 TX-total: 35637937
+ ----------------------------------------------------------------------------
+
+ +++++++++++++++ Accumulated forward statistics for all ports+++++++++++++++
+ RX-packets: 71275820 RX-dropped: 0 RX-total: 71275820
+ TX-packets: 71275884 TX-dropped: 0 TX-total: 71275884
+ ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
diff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst
new file mode 100644
index 00000000..f04cb16d
--- /dev/null
+++ b/doc/guides/nics/liquidio.rst
@@ -0,0 +1,223 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Cavium, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+LiquidIO VF Poll Mode Driver
+============================
+
+The LiquidIO VF PMD library (librte_pmd_lio) provides poll mode driver support for
+Cavium LiquidIO® II server adapter VFs. PF management and VF creation can be
+done using kernel driver.
+
+More information can be found at `Cavium Official Website
+<http://cavium.com/LiquidIO_Adapters.html>`_.
+
+Supported LiquidIO Adapters
+-----------------------------
+
+- LiquidIO II CN2350 210SV/225SV
+- LiquidIO II CN2360 210SV/225SV
+
+
+Pre-Installation Configuration
+------------------------------
+
+The following options can be modified in the ``config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_LIO_PMD`` (default ``y``)
+
+ Toggle compilation of LiquidIO PMD.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_DRIVER`` (default ``n``)
+
+ Toggle display of generic debugging messages.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_INIT`` (default ``n``)
+
+ Toggle display of initialization related messages.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_RX`` (default ``n``)
+
+ Toggle display of receive fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_TX`` (default ``n``)
+
+ Toggle display of transmit fast path run-time messages.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX`` (default ``n``)
+
+ Toggle display of mailbox messages.
+
+- ``CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS`` (default ``n``)
+
+ Toggle display of register reads and writes.
+
+
+SR-IOV: Prerequisites and Sample Application Notes
+--------------------------------------------------
+
+This section provides instructions to configure SR-IOV with Linux OS.
+
+#. Verify SR-IOV and ARI capabilities are enabled on the adapter using ``lspci``:
+
+ .. code-block:: console
+
+ lspci -s <slot> -vvv
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ Capabilities: [148 v1] Alternative Routing-ID Interpretation (ARI)
+ [...]
+ Capabilities: [178 v1] Single Root I/O Virtualization (SR-IOV)
+ [...]
+ Kernel driver in use: LiquidIO
+
+#. Load the kernel module:
+
+ .. code-block:: console
+
+ modprobe liquidio
+
+#. Bring up the PF ports:
+
+ .. code-block:: console
+
+ ifconfig p4p1 up
+ ifconfig p4p2 up
+
+#. Change PF MTU if required:
+
+ .. code-block:: console
+
+ ifconfig p4p1 mtu 9000
+ ifconfig p4p2 mtu 9000
+
+#. Create VF device(s):
+
+ Echo number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
+ of the parent PF.
+
+ .. code-block:: console
+
+ echo 1 > /sys/bus/pci/devices/0000:03:00.0/sriov_numvfs
+ echo 1 > /sys/bus/pci/devices/0000:03:00.1/sriov_numvfs
+
+#. Assign VF MAC address:
+
+ Assign MAC address to the VF using iproute2 utility. The syntax is::
+
+ ip link set <PF iface> vf <VF id> mac <macaddr>
+
+ Example output:
+
+ .. code-block:: console
+
+ ip link set p4p1 vf 0 mac F2:A8:1B:5E:B4:66
+
+#. Assign VF(s) to VM.
+
+ The VF devices may be passed through to the guest VM using qemu or
+ virt-manager or virsh etc.
+
+ Example qemu guest launch command:
+
+ .. code-block:: console
+
+ ./qemu-system-x86_64 -name lio-vm -machine accel=kvm \
+ -cpu host -m 4096 -smp 4 \
+ -drive file=<disk_file>,if=none,id=disk1,format=<type> \
+ -device virtio-blk-pci,scsi=off,drive=disk1,id=virtio-disk1,bootindex=1 \
+ -device vfio-pci,host=03:00.3 -device vfio-pci,host=03:08.3
+
+#. Running testpmd
+
+ Refer to the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run
+ ``testpmd`` application.
+
+ .. note::
+
+ Use ``igb_uio`` instead of ``vfio-pci`` in VM.
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:03:00.3 on NUMA socket 0
+ EAL: probe driver: 177d:9712 net_liovf
+ EAL: using IOMMU type 1 (Type 1)
+ PMD: net_liovf[03:00.3]INFO: DEVICE : CN23XX VF
+ EAL: PCI device 0000:03:08.3 on NUMA socket 0
+ EAL: probe driver: 177d:9712 net_liovf
+ PMD: net_liovf[03:08.3]INFO: DEVICE : CN23XX VF
+ Interactive-mode selected
+ USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0
+ Configuring Port 0 (socket 0)
+ PMD: net_liovf[03:00.3]INFO: Starting port 0
+ Port 0: F2:A8:1B:5E:B4:66
+ Configuring Port 1 (socket 0)
+ PMD: net_liovf[03:08.3]INFO: Starting port 1
+ Port 1: 32:76:CC:EE:56:D7
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
+
+
+Limitations
+-----------
+
+VF MTU
+~~~~~~
+
+VF MTU is limited by PF MTU. Raise PF value before configuring VF for larger packet size.
+
+VLAN offload
+~~~~~~~~~~~~
+
+Tx VLAN insertion is not supported and consequently VLAN offload feature is
+marked partial.
+
+Ring size
+~~~~~~~~~
+
+Number of descriptors for Rx/Tx ring should be in the range 128 to 512.
+
+CRC striping
+~~~~~~~~~~~~
+
+LiquidIO adapters strip ethernet FCS of every packet coming to the host
+interface. So, CRC will be stripped even when the ``rxmode.hw_strip_crc``
+member is set to 0 in ``struct rte_eth_conf``.
diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst
index 49f46263..f1f26d4f 100644
--- a/doc/guides/nics/mlx4.rst
+++ b/doc/guides/nics/mlx4.rst
@@ -162,6 +162,12 @@ Run-time configuration
- **ethtool** operations on related kernel interfaces also affect the PMD.
+- ``port`` parameter [int]
+
+ This parameter provides a physical port to probe and can be specified multiple
+ times for additional ports. All ports are probed by default if left
+ unspecified.
+
Kernel module parameters
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -238,8 +244,8 @@ DPDK and must be installed separately:
Currently supported by DPDK:
-- Mellanox OFED **3.1**.
-- Firmware version **2.35.5100** and higher.
+- Mellanox OFED **4.0-2.0.0.0**.
+- Firmware version **2.40.7000**.
- Supported architectures: **x86_64** and **POWER8**.
Getting Mellanox OFED
@@ -262,6 +268,11 @@ required from that distribution.
this DPDK release was developed and tested against is strongly
recommended. Please check the `prerequisites`_.
+Supported NICs
+--------------
+
+* Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2*40G)
+
Usage example
-------------
@@ -338,7 +349,7 @@ devices managed by librte_pmd_mlx4.
.. code-block:: console
- testpmd -c 0xff00 -n 4 -w 0000:83:00.0 -w 0000:84:00.0 -- --rxq=2 --txq=2 -i
+ testpmd -l 8-15 -n 4 -w 0000:83:00.0 -w 0000:84:00.0 -- --rxq=2 --txq=2 -i
Example output:
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 98d13419..da6dc278 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -30,10 +30,10 @@
MLX5 poll mode driver
=====================
-The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support for
-**Mellanox ConnectX-4** and **Mellanox ConnectX-4 Lx** families of
-10/25/40/50/100 Gb/s adapters as well as their virtual functions (VF) in
-SR-IOV context.
+The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support
+for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** and **Mellanox
+ConnectX-5** families of 10/25/40/50/100 Gb/s adapters as well as their
+virtual functions (VF) in SR-IOV context.
Information and documentation about these adapters can be found on the
`Mellanox website <http://www.mellanox.com>`__. Help is also provided by the
@@ -86,16 +86,19 @@ Features
- Hardware checksum offloads.
- Flow director (RTE_FDIR_MODE_PERFECT, RTE_FDIR_MODE_PERFECT_MAC_VLAN and
RTE_ETH_FDIR_REJECT).
+- Flow API.
- Secondary process TX is supported.
- KVM and VMware ESX SR-IOV modes are supported.
- RSS hash result is supported.
+- Hardware TSO.
+- Hardware checksum TX offload for VXLAN and GRE.
Limitations
-----------
- Inner RSS for VXLAN frames is not supported yet.
- Port statistics through software counters only.
-- Hardware checksum offloads for VXLAN inner header are not supported yet.
+- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
- Secondary process RX is not supported.
Configuration
@@ -180,13 +183,47 @@ Run-time configuration
- ``txq_mpw_en`` parameter [int]
- A nonzero value enables multi-packet send. This feature allows the TX
- burst function to pack up to five packets in two descriptors in order to
- save PCI bandwidth and improve performance at the cost of a slightly
- higher CPU usage.
+ A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and
+ enhanced multi-packet send (Enhanced MPS) for ConnectX-5. MPS allows the
+ TX burst function to pack up multiple packets in a single descriptor
+ session in order to save PCI bandwidth and improve performance at the
+ cost of a slightly higher CPU usage. When ``txq_inline`` is set along
+ with ``txq_mpw_en``, TX burst function tries to copy entire packet data
+ on to TX descriptor instead of including pointer of packet only if there
+ is enough room remained in the descriptor. ``txq_inline`` sets
+ per-descriptor space for either pointers or inlined packets. In addition,
+ Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
+ in the same descriptor.
- It is currently only supported on the ConnectX-4 Lx family of adapters.
- Enabled by default.
+ This option cannot be used in conjunction with ``tso`` below. When ``tso``
+ is set, ``txq_mpw_en`` is disabled.
+
+ It is currently only supported on the ConnectX-4 Lx and ConnectX-5
+ families of adapters. Enabled by default.
+
+- ``txq_mpw_hdr_dseg_en`` parameter [int]
+
+ A nonzero value enables including two pointers in the first block of TX
+ descriptor. This can be used to lessen CPU load for memory copy.
+
+ Effective only when Enhanced MPS is supported. Disabled by default.
+
+- ``txq_max_inline_len`` parameter [int]
+
+ Maximum size of packet to be inlined. This limits the size of packet to
+ be inlined. If the size of a packet is larger than configured value, the
+ packet isn't inlined even though there's enough space remained in the
+ descriptor. Instead, the packet is included with pointer.
+
+ Effective only when Enhanced MPS is supported. The default value is 256.
+
+- ``tso`` parameter [int]
+
+ A nonzero value enables hardware TSO.
+ When hardware TSO is enabled, packets marked with TCP segmentation
+ offload will be divided into segments by the hardware.
+
+ Disabled by default.
Prerequisites
-------------
@@ -207,8 +244,8 @@ DPDK and must be installed separately:
- **libmlx5**
- Low-level user space driver library for Mellanox ConnectX-4 devices,
- it is automatically loaded by libibverbs.
+ Low-level user space driver library for Mellanox ConnectX-4/ConnectX-5
+ devices, it is automatically loaded by libibverbs.
This library basically implements send/receive calls to the hardware
queues.
@@ -222,14 +259,15 @@ DPDK and must be installed separately:
Unlike most other PMDs, these modules must remain loaded and bound to
their devices:
- - mlx5_core: hardware driver managing Mellanox ConnectX-4 devices and
- related Ethernet kernel network devices.
+ - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5
+ devices and related Ethernet kernel network devices.
- mlx5_ib: InifiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
- Mellanox OFED releases include firmware updates for ConnectX-4 adapters.
+ Mellanox OFED releases include firmware updates for ConnectX-4/ConnectX-5
+ adapters.
Because each release provides new features, these updates must be applied to
match the kernel modules and libraries they come with.
@@ -241,12 +279,13 @@ DPDK and must be installed separately:
Currently supported by DPDK:
-- Mellanox OFED **3.4-1.0.0.0**.
-
+- Mellanox OFED version: **4.0-2.0.0.0**
- firmware version:
- - ConnectX-4: **12.17.1010**
- - ConnectX-4 Lx: **14.17.1010**
+ - ConnectX-4: **12.18.2000**
+ - ConnectX-4 Lx: **14.18.2000**
+ - ConnectX-5: **16.19.1200**
+ - ConnectX-5 Ex: **16.19.1200**
Getting Mellanox OFED
~~~~~~~~~~~~~~~~~~~~~
@@ -268,6 +307,29 @@ required from that distribution.
this DPDK release was developed and tested against is strongly
recommended. Please check the `prerequisites`_.
+Supported NICs
+--------------
+
+* Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+* Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+* Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+* Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+* Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT (1x40G)
+* Mellanox(R) ConnectX(R)-4 40G MCX413A-BCAT (1x40G)
+* Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+* Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT (1x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX413A-GCAT (1x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT (2x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX416A-BCAT (2x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX416A-GCAT (2x50G)
+* Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+* Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+* Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+* Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+* Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+* Mellanox(R) ConnectX(R)-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
Notes for testpmd
-----------------
@@ -288,8 +350,8 @@ behavior as librte_pmd_mlx4:
Usage example
-------------
-This section demonstrates how to launch **testpmd** with Mellanox ConnectX-4
-devices managed by librte_pmd_mlx5.
+This section demonstrates how to launch **testpmd** with Mellanox
+ConnectX-4/ConnectX-5 devices managed by librte_pmd_mlx5.
#. Load the kernel modules:
@@ -356,7 +418,7 @@ devices managed by librte_pmd_mlx5.
.. code-block:: console
- testpmd -c 0xff00 -n 4 -w 05:00.0 -w 05:00.1 -w 06:00.0 -w 06:00.1 -- --rxq=2 --txq=2 -i
+ testpmd -l 8-15 -n 4 -w 05:00.0 -w 05:00.1 -w 06:00.0 -w 06:00.1 -- --rxq=2 --txq=2 -i
Example output:
diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
index 4ef6e026..c732fb1f 100644
--- a/doc/guides/nics/nfp.rst
+++ b/doc/guides/nics/nfp.rst
@@ -68,10 +68,12 @@ Building the software
---------------------
Netronome's PMD code is provided in the **drivers/net/nfp** directory.
-Because Netronome´s BSP dependencies the driver is disabled by default
-in DPDK build using **common_linuxapp configuration** file. Enabling the
-driver or if you use another configuration file and want to have NFP
-support, this variable is needed:
+Although NFP PMD has Netronome´s BSP dependencies, it is possible to
+compile it along with other DPDK PMDs even if no BSP was installed before.
+Of course, a DPDK app will require such a BSP installed for using the
+NFP PMD.
+
+Default PMD configuration is at **common_linuxapp configuration** file:
- **CONFIG_RTE_LIBRTE_NFP_PMD=y**
@@ -79,85 +81,15 @@ Once DPDK is built all the DPDK apps and examples include support for
the NFP PMD.
-System configuration
---------------------
-
-Using the NFP PMD is not different to using other PMDs. Usual steps are:
-
-#. **Configure hugepages:** All major Linux distributions have the hugepages
- functionality enabled by default. By default this allows the system uses for
- working with transparent hugepages. But in this case some hugepages need to
- be created/reserved for use with the DPDK through the hugetlbfs file system.
- First the virtual file system need to be mounted:
-
- .. code-block:: console
-
- mount -t hugetlbfs none /mnt/hugetlbfs
-
- The command uses the common mount point for this file system and it needs to
- be created if necessary.
-
- Configuring hugepages is performed via sysfs:
-
- .. code-block:: console
-
- /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
-
- This sysfs file is used to specify the number of hugepages to reserve.
- For example:
-
- .. code-block:: console
-
- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
-
- This will reserve 2GB of memory using 1024 2MB hugepages. The file may be
- read to see if the operation was performed correctly:
-
- .. code-block:: console
-
- cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
-
- The number of unused hugepages may also be inspected.
-
- Before executing the DPDK app it should match the value of nr_hugepages.
-
- .. code-block:: console
-
- cat /sys/kernel/mm/hugepages/hugepages-2048kB/free_hugepages
-
- The hugepages reservation should be performed at system initialization and
- it is usual to use a kernel parameter for configuration. If the reservation
- is attempted on a busy system it will likely fail. Reserving memory for
- hugepages may be done adding the following to the grub kernel command line:
-
- .. code-block:: console
-
- default_hugepagesz=1M hugepagesz=2M hugepages=1024
-
- This will reserve 2GBytes of memory using 2Mbytes huge pages.
-
- Finally, for a NUMA system the allocation needs to be made on the correct
- NUMA node. In a DPDK app there is a master core which will (usually) perform
- memory allocation. It is important that some of the hugepages are reserved
- on the NUMA memory node where the network device is attached. This is because
- of a restriction in DPDK by which TX and RX descriptors rings must be created
- on the master code.
-
- Per-node allocation of hugepages may be inspected and controlled using sysfs.
- For example:
-
- .. code-block:: console
+Driver compilation and testing
+------------------------------
- cat /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
- For a NUMA system there will be a specific hugepage directory per node
- allowing control of hugepage reservation. A common problem may occur when
- hugepages reservation is performed after the system has been working for
- some time. Configuration using the global sysfs hugepage interface will
- succeed but the per-node allocations may be unsatisfactory.
- The number of hugepages that need to be reserved depends on how the app uses
- TX and RX descriptors, and packets mbufs.
+System configuration
+--------------------
#. **Enable SR-IOV on the NFP-6xxx device:** The current NFP PMD works with
Virtual Functions (VFs) on a NFP device. Make sure that one of the Physical
@@ -189,62 +121,3 @@ Using the NFP PMD is not different to using other PMDs. Usual steps are:
-k option shows the device driver, if any, that devices are bound to.
Depending on the modules loaded at this point the new PCI devices may be
bound to nfp_netvf driver.
-
-#. **To install the uio kernel module (manually):** All major Linux
- distributions have support for this kernel module so it is straightforward
- to install it:
-
- .. code-block:: console
-
- modprobe uio
-
- The module should now be listed by the lsmod command.
-
-#. **To install the igb_uio kernel module (manually):** This module is part
- of DPDK sources and configured by default (CONFIG_RTE_EAL_IGB_UIO=y).
-
- .. code-block:: console
-
- modprobe igb_uio.ko
-
- The module should now be listed by the lsmod command.
-
- Depending on which NFP modules are loaded, it could be necessary to
- detach NFP devices from the nfp_netvf module. If this is the case the
- device needs to be unbound, for example:
-
- .. code-block:: console
-
- echo 0000:03:08.0 > /sys/bus/pci/devices/0000:03:08.0/driver/unbind
-
- lspci -d19ee: -k
-
- The output of lspci should now show that 0000:03:08.0 is not bound to
- any driver.
-
- The next step is to add the NFP PCI ID to the IGB UIO driver:
-
- .. code-block:: console
-
- echo 19ee 6003 > /sys/bus/pci/drivers/igb_uio/new_id
-
- And then to bind the device to the igb_uio driver:
-
- .. code-block:: console
-
- echo 0000:03:08.0 > /sys/bus/pci/drivers/igb_uio/bind
-
- lspci -d19ee: -k
-
- lspci should show that device bound to igb_uio driver.
-
-#. **Using scripts to install and bind modules:** DPDK provides scripts which are
- useful for installing the UIO modules and for binding the right device to those
- modules avoiding doing so manually:
-
- * **dpdk-setup.sh**
- * **dpdk-devbind.py**
-
- Configuration may be performed by running dpdk-setup.sh which invokes
- dpdk-devbind.py as needed. Executing dpdk-setup.sh will display a menu of
- configuration options.
diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst
index 2c7f5eb9..757a3c90 100644
--- a/doc/guides/nics/overview.rst
+++ b/doc/guides/nics/overview.rst
@@ -50,28 +50,6 @@ Most of these differences are summarized below.
.. _table_net_pmd_features:
-.. raw:: html
-
- <style>
- table#id1 th {
- font-size: 80%;
- white-space: pre-wrap;
- text-align: center;
- vertical-align: top;
- padding: 2px;
- }
- table#id1 th:first-child {
- vertical-align: bottom;
- }
- table#id1 td {
- font-size: 70%;
- padding: 1px;
- }
- table#id1 td:first-child {
- padding-left: 1em;
- }
- </style>
-
.. include:: overview_table.txt
.. Note::
diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
index 79c95255..5e4f5f60 100644
--- a/doc/guides/nics/pcap_ring.rst
+++ b/doc/guides/nics/pcap_ring.rst
@@ -69,7 +69,7 @@ Device name and stream options must be separated by commas as shown below:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c f -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,stream_opt0=..,stream_opt1=..' \
--vdev='net_pcap1,stream_opt0=..'
@@ -122,7 +122,7 @@ Read packets from one pcap file and write them to another:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_pcap=file_tx.pcap' \
-- --port-topology=chained
@@ -130,7 +130,7 @@ Read packets from a network interface and write them to a pcap file:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,rx_iface=eth0,tx_pcap=file_tx.pcap' \
-- --port-topology=chained
@@ -138,7 +138,7 @@ Read packets from a pcap file and write them to a network interface:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_iface=eth1' \
-- --port-topology=chained
@@ -146,7 +146,7 @@ Forward packets through two network interfaces:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1;iface=eth1'
Using libpcap-based PMD with the testpmd Application
@@ -171,7 +171,7 @@ Otherwise, the first 512 packets from the input pcap file will be discarded by t
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,rx_pcap=file_rx.pcap,tx_pcap=file_tx.pcap' \
-- --port-topology=chained --no-flush-rx
@@ -185,7 +185,7 @@ Multiple devices may be specified, separated by commas.
.. code-block:: console
- ./testpmd -c E -n 4 --vdev=net_ring0 --vdev=net_ring1 -- -i
+ ./testpmd -l 1-3 -n 4 --vdev=net_ring0 --vdev=net_ring1 -- -i
EAL: Detected lcore 1 as core 1 on socket 0
...
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index d22ecdd9..afe2df89 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -32,7 +32,7 @@ QEDE Poll Mode Driver
======================
The QEDE poll mode driver library (**librte_pmd_qede**) implements support
-for **QLogic FastLinQ QL4xxxx 25G/40G/100G CNA** family of adapters as well
+for **QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G CNA** family of adapters as well
as their virtual functions (VF) in SR-IOV context. It is supported on
several standard Linux distros like RHEL7.x, SLES12.x and Ubuntu.
It is compile-tested under FreeBSD OS.
@@ -59,27 +59,29 @@ Supported Features
- MTU change
- Multiprocess aware
- Scatter-Gather
+- VXLAN tunneling offload
+- N-tuple filter and flow director (limited support)
+- LRO/TSO
Non-supported Features
----------------------
- SR-IOV PF
-- Tunneling offloads
-- LRO/TSO
+- GENEVE and NVGRE Tunneling offloads
- NPAR
Supported QLogic Adapters
-------------------------
-- QLogic FastLinQ QL4xxxx 10G/25G/40G/100G CNAs.
+- QLogic FastLinQ QL4xxxx 10G/25G/40G/50G/100G CNAs.
Prerequisites
-------------
-- Requires firmware version **8.10.x.** and management firmware
- version **8.10.x or higher**. Firmware may be available
+- Requires firmware version **8.18.x.** and management firmware
+ version **8.18.x or higher**. Firmware may be available
inbox in certain newer Linux distros under the standard directory
- ``E.g. /lib/firmware/qed/qed_init_values-8.10.9.0.bin``
+ ``E.g. /lib/firmware/qed/qed_init_values-8.18.9.0.bin``
- If the required firmware files are not available then visit
`QLogic Driver Download Center <http://driverdownloads.qlogic.com>`_.
@@ -118,72 +120,104 @@ enabling debugging options may affect system performance.
- ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
Gives absolute path of firmware file.
- ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.10.9.0.bin"``
+ ``Eg: "/lib/firmware/qed/qed_init_values_zipped-8.18.9.0.bin"``
Empty string indicates driver will pick up the firmware file
from the default location.
-Driver Compilation
-~~~~~~~~~~~~~~~~~~
+Driver compilation and testing
+------------------------------
-To compile QEDE PMD for Linux x86_64 gcc target, run the following ``make``
-command::
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
- cd <DPDK-source-directory>
- make config T=x86_64-native-linuxapp-gcc install
+SR-IOV: Prerequisites and Sample Application Notes
+--------------------------------------------------
-To compile QEDE PMD for Linux x86_64 clang target, run the following ``make``
-command::
+This section provides instructions to configure SR-IOV with Linux OS.
- cd <DPDK-source-directory>
- make config T=x86_64-native-linuxapp-clang install
+**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (QEDE) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher.
-To compile QEDE PMD for FreeBSD x86_64 clang target, run the following ``gmake``
-command::
+#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-clang install
+ .. code-block:: console
-To compile QEDE PMD for FreeBSD x86_64 gcc target, run the following ``gmake``
-command::
+ lspci -s <slot> -vvv
- cd <DPDK-source-directory>
- gmake config T=x86_64-native-bsdapp-gcc install -Wl,-rpath=\
- /usr/local/lib/gcc49 CC=gcc49
+ Example output:
+ .. code-block:: console
-Sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~
+ [...]
+ Capabilities: [1b8 v1] Alternative Routing-ID Interpretation (ARI)
+ [...]
+ Capabilities: [1c0 v1] Single Root I/O Virtualization (SR-IOV)
+ [...]
+ Kernel driver in use: igb_uio
-This section demonstrates how to launch ``testpmd`` with QLogic 4xxxx
-devices managed by ``librte_pmd_qede`` in Linux operating system.
+#. Load the kernel module:
-#. Request huge pages:
+ .. code-block:: console
+
+ modprobe qede
+
+ Example output:
+
+ .. code-block:: console
+
+ systemd-udevd[4848]: renamed network interface eth0 to ens5f0
+ systemd-udevd[4848]: renamed network interface eth1 to ens5f1
+
+#. Bring up the PF ports:
.. code-block:: console
- echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages/ \
- nr_hugepages
+ ifconfig ens5f0 up
+ ifconfig ens5f1 up
+
+#. Create VF device(s):
+
+ Echo the number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
+ of the parent PF.
-#. Load ``igb_uio`` driver:
+ Example output:
.. code-block:: console
- insmod ./x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+ echo 2 > /sys/devices/pci0000:00/0000:00:03.0/0000:81:00.0/sriov_numvfs
-#. Bind the QLogic 4xxxx adapters to ``igb_uio`` loaded in the
- previous step:
+
+#. Assign VF MAC address:
+
+ Assign MAC address to the VF using iproute2 utility. The syntax is::
+
+ ip link set <PF iface> vf <VF id> mac <macaddr>
+
+ Example output:
.. code-block:: console
- ./tools/dpdk-devbind.py --bind igb_uio 0000:84:00.0 0000:84:00.1 \
- 0000:84:00.2 0000:84:00.3
+ ip link set ens5f0 vf 0 mac 52:54:00:2f:9d:e8
+
-#. Start ``testpmd`` with basic parameters:
- (Enable QEDE_DEBUG_INFO=y to view informational messages)
+#. PCI Passthrough:
+
+ The VF devices may be passed through to the guest VM using ``virt-manager`` or
+ ``virsh``. QEDE PMD should be used to bind the VF devices in the guest VM
+ using the instructions from Driver compilation and testing section above.
+
+
+#. Running testpmd
+ (Enable QEDE_DEBUG_INFO=y to view informational messages):
+
+ Refer to the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run
+ ``testpmd`` application.
+
+ Example output:
.. code-block:: console
- testpmd -c 0xff1 -n 4 -- -i --nb-cores=8 --portmask=0xf --rxd=4096 \
+ testpmd -l 0,4-11 -n 4 -- -i --nb-cores=8 --portmask=0xf --rxd=4096 \
--txd=4096 --txfreet=4068 --enable-rx-cksum --rxq=4 --txq=4 \
--rss-ip --rss-udp
@@ -234,79 +268,3 @@ devices managed by ``librte_pmd_qede`` in Linux operating system.
Port 3 Link Up - speed 25000 Mbps - full-duplex
Done
testpmd>
-
-
-SR-IOV: Prerequisites and Sample Application Notes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This section provides instructions to configure SR-IOV with Linux OS.
-
-**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (QEDE) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher.
-
-#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
-
- .. code-block:: console
-
- lspci -s <slot> -vvv
-
- Example output:
-
- .. code-block:: console
-
- [...]
- Capabilities: [1b8 v1] Alternative Routing-ID Interpretation (ARI)
- [...]
- Capabilities: [1c0 v1] Single Root I/O Virtualization (SR-IOV)
- [...]
- Kernel driver in use: igb_uio
-
-#. Load the kernel module:
-
- .. code-block:: console
-
- modprobe qede
-
- Example output:
-
- .. code-block:: console
-
- systemd-udevd[4848]: renamed network interface eth0 to ens5f0
- systemd-udevd[4848]: renamed network interface eth1 to ens5f1
-
-#. Bring up the PF ports:
-
- .. code-block:: console
-
- ifconfig ens5f0 up
- ifconfig ens5f1 up
-
-#. Create VF device(s):
-
- Echo the number of VFs to be created into ``"sriov_numvfs"`` sysfs entry
- of the parent PF.
-
- Example output:
-
- .. code-block:: console
-
- echo 2 > /sys/devices/pci0000:00/0000:00:03.0/0000:81:00.0/sriov_numvfs
-
-
-#. Assign VF MAC address:
-
- Assign MAC address to the VF using iproute2 utility. The syntax is::
-
- ip link set <PF iface> vf <VF id> mac <macaddr>
-
- Example output:
-
- .. code-block:: console
-
- ip link set ens5f0 vf 0 mac 52:54:00:2f:9d:e8
-
-
-#. PCI Passthrough:
-
- The VF devices may be passed through to the guest VM using ``virt-manager`` or
- ``virsh``. QEDE PMD should be used to bind the VF devices in the guest VM
- using the instructions outlined in the Application notes above.
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
new file mode 100644
index 00000000..5f825e9a
--- /dev/null
+++ b/doc/guides/nics/sfc_efx.rst
@@ -0,0 +1,277 @@
+.. BSD LICENSE
+ Copyright (c) 2016 Solarflare Communications Inc.
+ All rights reserved.
+
+ This software was jointly developed between OKTET Labs (under contract
+ for Solarflare) and Solarflare Communications, Inc.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Solarflare libefx-based Poll Mode Driver
+========================================
+
+The SFC EFX PMD (**librte_pmd_sfc_efx**) provides poll mode driver support
+for **Solarflare SFN7xxx and SFN8xxx** family of 10/40 Gbps adapters.
+SFC EFX PMD has support for the latest Linux and FreeBSD operating systems.
+
+More information can be found at `Solarflare Communications website
+<http://solarflare.com>`_.
+
+
+Features
+--------
+
+SFC EFX PMD has support for:
+
+- Multiple transmit and receive queues
+
+- Link state information including link status change interrupt
+
+- IPv4/IPv6 TCP/UDP transmit checksum offload
+
+- Port hardware statistics
+
+- Extended statistics (see Solarflare Server Adapter User's Guide for
+ the statistics description)
+
+- Basic flow control
+
+- MTU update
+
+- Jumbo frames up to 9K
+
+- Promiscuous mode
+
+- Allmulticast mode
+
+- TCP segmentation offload (TSO)
+
+- Multicast MAC filter
+
+- IPv4/IPv6 TCP/UDP receive checksum offload
+
+- Received packet type information
+
+- Receive side scaling (RSS)
+
+- RSS hash
+
+- Scattered Rx DMA for packet that are larger that a single Rx descriptor
+
+- Deferred receive and transmit queue start
+
+- Transmit VLAN insertion (if running firmware variant supports it)
+
+- Flow API
+
+
+Non-supported Features
+----------------------
+
+The features not yet supported include:
+
+- Receive queue interupts
+
+- Priority-based flow control
+
+- Loopback
+
+- Configurable RX CRC stripping (always stripped)
+
+- Header split on receive
+
+- VLAN filtering
+
+- VLAN stripping
+
+- LRO
+
+
+Limitations
+-----------
+
+Due to requirements on receive buffer alignment and usage of the receive
+buffer for the auxiliary packet information provided by the NIC up to
+extra 269 (14 bytes prefix plus up to 255 bytes for end padding) bytes may be
+required in the receive buffer.
+It should be taken into account when mbuf pool for receive is created.
+
+
+Flow API support
+----------------
+
+Supported attributes:
+
+- Ingress
+
+Supported pattern items:
+
+- VOID
+
+- ETH (exact match of source/destination addresses, individual/group match
+ of destination address, EtherType)
+
+- VLAN (exact match of VID, double-tagging is supported)
+
+- IPV4 (exact match of source/destination addresses,
+ IP transport protocol)
+
+- IPV6 (exact match of source/destination addresses,
+ IP transport protocol)
+
+- TCP (exact match of source/destination ports)
+
+- UDP (exact match of source/destination ports)
+
+Supported actions:
+
+- VOID
+
+- QUEUE
+
+Validating flow rules depends on the firmware variant.
+
+Ethernet destinaton individual/group match
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Ethernet item supports I/G matching, if only the corresponding bit is set
+in the mask of destination address. If destinaton address in the spec is
+multicast, it matches all multicast (and broadcast) packets, oherwise it
+matches unicast packets that are not filtered by other flow rules.
+
+
+Supported NICs
+--------------
+
+- Solarflare Flareon [Ultra] Server Adapters:
+
+ - Solarflare SFN8522 Dual Port SFP+ Server Adapter
+
+ - Solarflare SFN8542 Dual Port QSFP+ Server Adapter
+
+ - Solarflare SFN7002F Dual Port SFP+ Server Adapter
+
+ - Solarflare SFN7004F Quad Port SFP+ Server Adapter
+
+ - Solarflare SFN7042Q Dual Port QSFP+ Server Adapter
+
+ - Solarflare SFN7122F Dual Port SFP+ Server Adapter
+
+ - Solarflare SFN7124F Quad Port SFP+ Server Adapter
+
+ - Solarflare SFN7142Q Dual Port QSFP+ Server Adapter
+
+ - Solarflare SFN7322F Precision Time Synchronization Server Adapter
+
+
+Prerequisites
+-------------
+
+- Requires firmware version:
+
+ - SFN7xxx: **4.7.1.1001** or higher
+
+ - SFN8xxx: **6.0.2.1004** or higher
+
+Visit `Solarflare Support Downloads <https://support.solarflare.com>`_ to get
+Solarflare Utilities (either Linux or FreeBSD) with the latest firmware.
+Follow instructions from Solarflare Server Adapter User's Guide to
+update firmware and configure the adapter.
+
+
+Pre-Installation Configuration
+------------------------------
+
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``.config`` file.
+Please note that enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_SFC_EFX_PMD`` (default **y**)
+
+ Enable compilation of Solarflare libefx-based poll-mode driver.
+
+- ``CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG`` (default **n**)
+
+ Enable compilation of the extra run-time consistency checks.
+
+
+Per-Device Parameters
+~~~~~~~~~~~~~~~~~~~~~
+
+The following per-device parameters can be passed via EAL PCI device
+whitelist option like "-w 02:00.0,arg1=value1,...".
+
+Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
+boolean parameters value.
+
+- ``rx_datapath`` [auto|efx|ef10] (default **auto**)
+
+ Choose receive datapath implementation.
+ **auto** allows the driver itself to make a choice based on firmware
+ features available and required by the datapath implementation.
+ **efx** chooses libefx-based datapath which supports Rx scatter.
+ **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
+ more efficient than libefx-based and provides richer packet type
+ classification, but lacks Rx scatter support.
+
+- ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**)
+
+ Choose transmit datapath implementation.
+ **auto** allows the driver itself to make a choice based on firmware
+ features available and required by the datapath implementation.
+ **efx** chooses libefx-based datapath which supports VLAN insertion
+ (full-feature firmware variant only), TSO and multi-segment mbufs.
+ **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
+ more efficient than libefx-based but has no VLAN insertion and TSO
+ support yet.
+ **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which
+ is even more faster then **ef10** but does not support multi-segment
+ mbufs.
+
+- ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
+
+ Choose hardware tunning to be optimized for either throughput or
+ low-latency.
+ **auto** allows NIC firmware to make a choice based on
+ installed licences and firmware variant configured using **sfboot**.
+
+- ``debug_init`` [bool] (default **n**)
+
+ Enable extra logging during device intialization and startup.
+
+- ``mcdi_logging`` [bool] (default **n**)
+
+ Enable extra logging of the communication with the NIC's management CPU.
+ The logging is done using RTE_LOG() with INFO level and PMD type.
+ The format is consumed by the Solarflare netlogdecode cross-platform tool.
+
+- ``stats_update_period_ms`` [long] (default **1000**)
+
+ Adjust period in milliseconds to update port hardware statistics.
+ The accepted range is 0 to 65535. The value of **0** may be used
+ to disable periodic statistics update. One should note that it's
+ only possible to set an arbitrary value on SFN8xxx provided that
+ firmware version is 6.2.1.1033 or higher, otherwise any positive
+ value will select a fixed update period of **1000** milliseconds
diff --git a/doc/guides/nics/szedata2.rst b/doc/guides/nics/szedata2.rst
index 741b4008..60080a9f 100644
--- a/doc/guides/nics/szedata2.rst
+++ b/doc/guides/nics/szedata2.rst
@@ -31,16 +31,16 @@
SZEDATA2 poll mode driver library
=================================
-The SZEDATA2 poll mode driver library implements support for cards from COMBO
-family (**COMBO-80G**, **COMBO-100G**).
-The SZEDATA2 PMD uses interface provided by libsze2 library to communicate
-with COMBO cards over sze2 layer.
+The SZEDATA2 poll mode driver library implements support for the Netcope
+FPGA Boards (**NFB-***), FPGA-based programmable NICs.
+The SZEDATA2 PMD uses interface provided by the libsze2 library to communicate
+with the NFB cards over the sze2 layer.
-More information about family of
-`COMBO cards <https://www.liberouter.org/technologies/cards/>`_
+More information about the
+`NFB cards <http://www.netcope.com/en/products/fpga-boards>`_
and used technology
-(`NetCOPE platform <https://www.liberouter.org/technologies/netcope/>`_) can be
-found on the `Liberouter website <https://www.liberouter.org/>`_.
+(`Netcope Development Kit <http://www.netcope.com/en/products/fpga-development-kit>`_)
+can be found on the `Netcope Technologies website <http://www.netcope.com/>`_.
.. note::
@@ -77,7 +77,7 @@ separately:
sharing of resources for user space applications.
Information about getting the dependencies can be found `here
-<https://www.liberouter.org/technologies/netcope/access-to-libsze2-library/>`_.
+<http://www.netcope.com/en/company/community-support/dpdk-libsze2>`_.
Configuration
-------------
@@ -117,7 +117,7 @@ transmit channel:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c 0xf -n 2 \
+ $RTE_TARGET/app/testpmd -l 0-3 -n 2 \
-- --port-topology=chained --rxq=2 --txq=2 --nb-cores=2 -i -a
Example output:
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
new file mode 100644
index 00000000..5c5ba535
--- /dev/null
+++ b/doc/guides/nics/tap.rst
@@ -0,0 +1,197 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Tun/Tap Poll Mode Driver
+========================
+
+The ``rte_eth_tap.c`` PMD creates a device using TUN/TAP interfaces on the
+local host. The PMD allows for DPDK and the host to communicate using a raw
+device interface on the host and in the DPDK application.
+
+The device created is a TAP device, which sends/receives packet in a raw
+format with a L2 header. The usage for a TAP PMD is for connectivity to the
+local host using a TAP interface. When the TAP PMD is initialized it will
+create a number of tap devices in the host accessed via ``ifconfig -a`` or
+``ip`` command. The commands can be used to assign and query the virtual like
+device.
+
+These TAP interfaces can be used with Wireshark or tcpdump or Pktgen-DPDK
+along with being able to be used as a network connection to the DPDK
+application. The method enable one or more interfaces is to use the
+``--vdev=net_tap0`` option on the DPDK application command line. Each
+``--vdev=net_tap1`` option give will create an interface named dtap0, dtap1,
+and so on.
+
+The interface name can be changed by adding the ``iface=foo0``, for example::
+
+ --vdev=net_tap0,iface=foo0 --vdev=net_tap1,iface=foo1, ...
+
+Also the speed of the interface can be changed from 10G to whatever number
+needed, but the interface does not enforce that speed, for example::
+
+ --vdev=net_tap0,iface=foo0,speed=25000
+
+It is possible to specify a remote netdevice to capture packets from by adding
+``remote=foo1``, for example::
+
+ --vdev=net_tap,iface=tap0,remote=foo1
+
+If a ``remote`` is set, the tap MAC address will be set to match the remote one
+just after netdevice creation. Using TC rules, traffic from the remote netdevice
+will be redirected to the tap. If the tap is in promiscuous mode, then all
+packets will be redirected. In allmulti mode, all multicast packets will be
+redirected.
+
+Using the remote feature is especially useful for capturing traffic from a
+netdevice that has no support in the DPDK. It is possible to add explicit
+rte_flow rules on the tap PMD to capture specific traffic (see next section for
+examples).
+
+After the DPDK application is started you can send and receive packets on the
+interface using the standard rx_burst/tx_burst APIs in DPDK. From the host
+point of view you can use any host tool like tcpdump, Wireshark, ping, Pktgen
+and others to communicate with the DPDK application. The DPDK application may
+not understand network protocols like IPv4/6, UDP or TCP unless the
+application has been written to understand these protocols.
+
+If you need the interface as a real network interface meaning running and has
+a valid IP address then you can do this with the following commands::
+
+ sudo ip link set dtap0 up; sudo ip addr add 192.168.0.250/24 dev dtap0
+ sudo ip link set dtap1 up; sudo ip addr add 192.168.1.250/24 dev dtap1
+
+Please change the IP addresses as you see fit.
+
+If routing is enabled on the host you can also communicate with the DPDK App
+over the internet via a standard socket layer application as long as you
+account for the protocol handing in the application.
+
+If you have a Network Stack in your DPDK application or something like it you
+can utilize that stack to handle the network protocols. Plus you would be able
+to address the interface using an IP address assigned to the internal
+interface.
+
+Flow API support
+----------------
+
+The tap PMD supports major flow API pattern items and actions, when running on
+linux kernels above 4.2 ("Flower" classifier required). Supported items:
+
+- eth: src and dst (with variable masks), and eth_type (0xffff mask).
+- vlan: vid, pcp, tpid, but not eid. (requires kernel 4.9)
+- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
+- udp/tcp: src and dst port (0xffff) mask.
+
+Supported actions:
+
+- DROP
+- QUEUE
+- PASSTHRU
+
+It is generally not possible to provide a "last" item. However, if the "last"
+item, once masked, is identical to the masked spec, then it is supported.
+
+Only IPv4/6 and MAC addresses can use a variable mask. All other items need a
+full mask (exact match).
+
+As rules are translated to TC, it is possible to show them with something like::
+
+ tc -s filter show dev tap1 parent 1:
+
+Examples of testpmd flow rules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Drop packets for destination IP 192.168.0.1::
+
+ testpmd> flow create 0 priority 1 ingress pattern eth / ipv4 dst is 1.1.1.1 \
+ / end actions drop / end
+
+Ensure packets from a given MAC address are received on a queue 2::
+
+ testpmd> flow create 0 priority 2 ingress pattern eth src is 06:05:04:03:02:01 \
+ / end actions queue index 2 / end
+
+Drop UDP packets in vlan 3::
+
+ testpmd> flow create 0 priority 3 ingress pattern eth / vlan vid is 3 / \
+ ipv4 proto is 17 / end actions drop / end
+
+Example
+-------
+
+The following is a simple example of using the TUN/TAP PMD with the Pktgen
+packet generator. It requires that the ``socat`` utility is installed on the
+test system.
+
+Build DPDK, then pull down Pktgen and build pktgen using the DPDK SDK/Target
+used to build the dpdk you pulled down.
+
+Run pktgen from the pktgen directory in a terminal with a commandline like the
+following::
+
+ sudo ./app/app/x86_64-native-linuxapp-gcc/app/pktgen -l 1-5 -n 4 \
+ --proc-type auto --log-level 8 --socket-mem 512,512 --file-prefix pg \
+ --vdev=net_tap0 --vdev=net_tap1 -b 05:00.0 -b 05:00.1 \
+ -b 04:00.0 -b 04:00.1 -b 04:00.2 -b 04:00.3 \
+ -b 81:00.0 -b 81:00.1 -b 81:00.2 -b 81:00.3 \
+ -b 82:00.0 -b 83:00.0 -- -T -P -m [2:3].0 -m [4:5].1 \
+ -f themes/black-yellow.theme
+
+.. Note:
+
+ Change the ``-b`` options to blacklist all of your physical ports. The
+ following command line is all one line.
+
+ Also, ``-f themes/black-yellow.theme`` is optional if the default colors
+ work on your system configuration. See the Pktgen docs for more
+ information.
+
+Verify with ``ifconfig -a`` command in a different xterm window, should have a
+``dtap0`` and ``dtap1`` interfaces created.
+
+Next set the links for the two interfaces to up via the commands below::
+
+ sudo ip link set dtap0 up; sudo ip addr add 192.168.0.250/24 dev dtap0
+ sudo ip link set dtap1 up; sudo ip addr add 192.168.1.250/24 dev dtap1
+
+Then use socat to create a loopback for the two interfaces::
+
+ sudo socat interface:dtap0 interface:dtap1
+
+Then on the Pktgen command line interface you can start sending packets using
+the commands ``start 0`` and ``start 1`` or you can start both at the same
+time with ``start all``. The command ``str`` is an alias for ``start all`` and
+``stp`` is an alias for ``stop all``.
+
+While running you should see the 64 byte counters increasing to verify the
+traffic is being looped back. You can use ``set all size XXX`` to change the
+size of the packets after you stop the traffic. Use pktgen ``help``
+command to see a list of all commands. You can also use the ``-f`` option to
+load commands at startup in command line or Lua script in pktgen.
diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
index 187c9a4a..4fa0039d 100644
--- a/doc/guides/nics/thunderx.rst
+++ b/doc/guides/nics/thunderx.rst
@@ -77,9 +77,8 @@ Config File Options
The following options can be modified in the ``config`` file.
Please note that enabling debugging options may affect system performance.
-- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD`` (default ``n``)
+- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD`` (default ``y``)
- By default it is enabled only for defconfig_arm64-thunderx-* config.
Toggle compilation of the ``librte_pmd_thunderx_nicvf`` driver.
- ``CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT`` (default ``n``)
@@ -102,95 +101,18 @@ Please note that enabling debugging options may affect system performance.
Toggle display of PF mailbox related run-time check messages
-Driver Compilation
-~~~~~~~~~~~~~~~~~~
-
-To compile the ThunderX NICVF PMD for Linux arm64 gcc target, run the
-following “make” command:
+Driver compilation and testing
+------------------------------
-.. code-block:: console
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
- cd <DPDK-source-directory>
- make config T=arm64-thunderx-linuxapp-gcc install
+To compile the ThunderX NICVF PMD for Linux arm64 gcc,
+use arm64-thunderx-linuxapp-gcc as target.
Linux
-----
-.. _thunderx_testpmd_example:
-
-Running testpmd
-~~~~~~~~~~~~~~~
-
-This section demonstrates how to launch ``testpmd`` with ThunderX NIC VF device
-managed by ``librte_pmd_thunderx_nicvf`` in the Linux operating system.
-
-#. Load ``vfio-pci`` driver:
-
- .. code-block:: console
-
- modprobe vfio-pci
-
- .. _thunderx_vfio_noiommu:
-
-#. Enable **VFIO-NOIOMMU** mode (optional):
-
- .. code-block:: console
-
- echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
-
- .. note::
-
- **VFIO-NOIOMMU** is required only when running in VM context and should not be enabled otherwise.
- See also :ref:`SR-IOV: Prerequisites and sample Application Notes <thunderx_sriov_example>`.
-
-#. Bind the ThunderX NIC VF device to ``vfio-pci`` loaded in the previous step:
-
- Setup VFIO permissions for regular users and then bind to ``vfio-pci``:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind vfio-pci 0002:01:00.2
-
-#. Start ``testpmd`` with basic parameters:
-
- .. code-block:: console
-
- ./arm64-thunderx-linuxapp-gcc/app/testpmd -c 0xf -n 4 -w 0002:01:00.2 \
- -- -i --disable-hw-vlan-filter --crc-strip --no-flush-rx \
- --port-topology=loop
-
- Example output:
-
- .. code-block:: console
-
- ...
-
- PMD: rte_nicvf_pmd_init(): librte_pmd_thunderx nicvf version 1.0
-
- ...
- EAL: probe driver: 177d:11 rte_nicvf_pmd
- EAL: using IOMMU type 1 (Type 1)
- EAL: PCI memory mapped at 0x3ffade50000
- EAL: Trying to map BAR 4 that contains the MSI-X table.
- Trying offsets: 0x40000000000:0x0000, 0x10000:0x1f0000
- EAL: PCI memory mapped at 0x3ffadc60000
- PMD: nicvf_eth_dev_init(): nicvf: device (177d:11) 2:1:0:2
- PMD: nicvf_eth_dev_init(): node=0 vf=1 mode=tns-bypass sqs=false
- loopback_supported=true
- PMD: nicvf_eth_dev_init(): Port 0 (177d:11) mac=a6:c6:d9:17:78:01
- Interactive-mode selected
- Configuring Port 0 (socket 0)
- ...
-
- PMD: nicvf_dev_configure(): Configured ethdev port0 hwcap=0x0
- Port 0: A6:C6:D9:17:78:01
- Checking link statuses...
- Port 0 Link Up - speed 10000 Mbps - full-duplex
- Done
- testpmd>
-
-.. _thunderx_sriov_example:
-
SR-IOV: Prerequisites and sample Application Notes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -249,58 +171,10 @@ This section provides instructions to configure SR-IOV with Linux OS.
Unless ``thunder-nicvf`` driver is in use make sure your kernel config includes ``CONFIG_THUNDER_NIC_VF`` setting.
-#. Verify PF/VF bind using ``dpdk-devbind.py``:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --status
-
- Example output:
-
- .. code-block:: console
-
- ...
- 0002:01:00.0 'Device a01e' if= drv=thunder-nic unused=vfio-pci
- 0002:01:00.1 'Device 0011' if=eth0 drv=thunder-nicvf unused=vfio-pci
- 0002:01:00.2 'Device 0011' if=eth1 drv=thunder-nicvf unused=vfio-pci
- ...
-
-#. Load ``vfio-pci`` driver:
-
- .. code-block:: console
-
- modprobe vfio-pci
-
-#. Bind VF devices to ``vfio-pci`` using ``dpdk-devbind.py``:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --bind vfio-pci 0002:01:00.1
- ./tools/dpdk-devbind.py --bind vfio-pci 0002:01:00.2
-
-#. Verify VF bind using ``dpdk-devbind.py``:
-
- .. code-block:: console
-
- ./tools/dpdk-devbind.py --status
-
- Example output:
-
- .. code-block:: console
-
- ...
- 0002:01:00.1 'Device 0011' drv=vfio-pci unused=
- 0002:01:00.2 'Device 0011' drv=vfio-pci unused=
- ...
- 0002:01:00.0 'Device a01e' if= drv=thunder-nic unused=vfio-pci
- ...
-
#. Pass VF device to VM context (PCIe Passthrough):
The VF devices may be passed through to the guest VM using qemu or
virt-manager or virsh etc.
- ``librte_pmd_thunderx_nicvf`` or ``thunder-nicvf`` should be used to bind
- the VF devices in the guest VM in :ref:`VFIO-NOIOMMU <thunderx_vfio_noiommu>` mode.
Example qemu guest launch command:
@@ -321,8 +195,55 @@ This section provides instructions to configure SR-IOV with Linux OS.
-serial stdio \
-mem-path /dev/huge
-#. Refer to section :ref:`Running testpmd <thunderx_testpmd_example>` for instruction
- how to launch ``testpmd`` application.
+#. Enable **VFIO-NOIOMMU** mode (optional):
+
+ .. code-block:: console
+
+ echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+
+ .. note::
+
+ **VFIO-NOIOMMU** is required only when running in VM context and should not be enabled otherwise.
+
+#. Running testpmd:
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ ./arm64-thunderx-linuxapp-gcc/app/testpmd -l 0-3 -n 4 -w 0002:01:00.2 \
+ -- -i --disable-hw-vlan-filter --disable-crc-strip --no-flush-rx \
+ --port-topology=loop
+
+ ...
+
+ PMD: rte_nicvf_pmd_init(): librte_pmd_thunderx nicvf version 1.0
+
+ ...
+ EAL: probe driver: 177d:11 rte_nicvf_pmd
+ EAL: using IOMMU type 1 (Type 1)
+ EAL: PCI memory mapped at 0x3ffade50000
+ EAL: Trying to map BAR 4 that contains the MSI-X table.
+ Trying offsets: 0x40000000000:0x0000, 0x10000:0x1f0000
+ EAL: PCI memory mapped at 0x3ffadc60000
+ PMD: nicvf_eth_dev_init(): nicvf: device (177d:11) 2:1:0:2
+ PMD: nicvf_eth_dev_init(): node=0 vf=1 mode=tns-bypass sqs=false
+ loopback_supported=true
+ PMD: nicvf_eth_dev_init(): Port 0 (177d:11) mac=a6:c6:d9:17:78:01
+ Interactive-mode selected
+ Configuring Port 0 (socket 0)
+ ...
+
+ PMD: nicvf_dev_configure(): Configured ethdev port0 hwcap=0x0
+ Port 0: A6:C6:D9:17:78:01
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
Multiple Queue Set per DPDK port configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -352,7 +273,7 @@ driver' list, secondary VFs are on the remaining on the remaining part of the li
.. note::
The VNIC driver in the multiqueue setup works differently than other drivers like `ixgbe`.
- We need to bind separately each specific queue set device with the ``tools/dpdk-devbind.py`` utility.
+ We need to bind separately each specific queue set device with the ``usertools/dpdk-devbind.py`` utility.
.. note::
@@ -372,7 +293,7 @@ on a non-NUMA machine.
.. code-block:: console
- # tools/dpdk-devbind.py --status
+ # usertools/dpdk-devbind.py --status
Network devices using DPDK-compatible driver
============================================
@@ -416,17 +337,17 @@ We will choose four secondary queue sets from the ending of the list (0002:01:01
.. code-block:: console
- tools/dpdk-devbind.py -b vfio-pci 0002:01:00.2
- tools/dpdk-devbind.py -b vfio-pci 0002:01:00.3
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:00.2
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:00.3
#. Bind four primary VFs to the ``vfio-pci`` driver:
.. code-block:: console
- tools/dpdk-devbind.py -b vfio-pci 0002:01:01.7
- tools/dpdk-devbind.py -b vfio-pci 0002:01:02.0
- tools/dpdk-devbind.py -b vfio-pci 0002:01:02.1
- tools/dpdk-devbind.py -b vfio-pci 0002:01:02.2
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:01.7
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:02.0
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:02.1
+ usertools/dpdk-devbind.py -b vfio-pci 0002:01:02.2
The nicvf thunderx driver will make use of attached secondary VFs automatically during the interface configuration stage.
diff --git a/doc/guides/nics/vhost.rst b/doc/guides/nics/vhost.rst
index 6b30b54e..e651a166 100644
--- a/doc/guides/nics/vhost.rst
+++ b/doc/guides/nics/vhost.rst
@@ -92,7 +92,7 @@ This section demonstrates vhost PMD with testpmd DPDK sample application.
.. code-block:: console
- ./testpmd -c f -n 4 --vdev 'net_vhost0,iface=/tmp/sock0,queues=1' -- -i
+ ./testpmd -l 0-3 -n 4 --vdev 'net_vhost0,iface=/tmp/sock0,queues=1' -- -i
Other basic DPDK preparations like hugepage enabling here.
Please refer to the *DPDK Getting Started Guide* for detailed instructions.
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index 54310157..91bedea6 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -87,6 +87,8 @@ In this release, the virtio PMD driver provides the basic functionality of packe
* Virtio supports Link State interrupt.
+* Virtio supports Rx interrupt (so far, only support 1:1 mapping for queue/interrupt).
+
* Virtio supports software vlan stripping and inserting.
* Virtio supports using port IO to get PCI resource when uio/igb_uio module is not available.
@@ -128,7 +130,7 @@ Host2VM communication example
.. code-block:: console
- examples/kni/build/app/kni -c 0xf -n 4 -- -p 0x1 -P --config="(0,1,3)"
+ examples/kni/build/app/kni -l 0-3 -n 4 -- -p 0x1 -P --config="(0,1,3)"
This command generates one network device vEth0 for physical port.
If specify more physical ports, the generated network device will be vEth1, vEth2, and so on.
@@ -172,7 +174,7 @@ Host2VM communication example
modprobe uio
echo 512 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
modprobe uio_pci_generic
- python tools/dpdk-devbind.py -b uio_pci_generic 00:03.0
+ python usertools/dpdk-devbind.py -b uio_pci_generic 00:03.0
We use testpmd as the forwarding application in this example.
@@ -273,4 +275,62 @@ The corresponding callbacks are:
Example of using the vector version of the virtio poll mode driver in
``testpmd``::
- testpmd -c 0x7 -n 4 -- -i --txqflags=0xF01 --rxq=1 --txq=1 --nb-cores=1
+ testpmd -l 0-2 -n 4 -- -i --txqflags=0xF01 --rxq=1 --txq=1 --nb-cores=1
+
+
+Interrupt mode
+--------------
+
+.. _virtio_interrupt_mode:
+
+There are three kinds of interrupts from a virtio device over PCI bus: config
+interrupt, Rx interrupts, and Tx interrupts. Config interrupt is used for
+notification of device configuration changes, especially link status (lsc).
+Interrupt mode is translated into Rx interrupts in the context of DPDK.
+
+Prerequisites for Rx interrupts
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To support Rx interrupts,
+#. Check if guest kernel supports VFIO-NOIOMMU:
+
+ Linux started to support VFIO-NOIOMMU since 4.8.0. Make sure the guest
+ kernel is compiled with:
+
+ .. code-block:: console
+
+ CONFIG_VFIO_NOIOMMU=y
+
+#. Properly set msix vectors when starting VM:
+
+ Enable multi-queue when starting VM, and specify msix vectors in qemu
+ cmdline. (N+1) is the minimum, and (2N+2) is mostly recommended.
+
+ .. code-block:: console
+
+ $(QEMU) ... -device virtio-net-pci,mq=on,vectors=2N+2 ...
+
+#. In VM, insert vfio module in NOIOMMU mode:
+
+ .. code-block:: console
+
+ modprobe vfio enable_unsafe_noiommu_mode=1
+ modprobe vfio-pci
+
+#. In VM, bind the virtio device with vfio-pci:
+
+ .. code-block:: console
+
+ python usertools/dpdk-devbind.py -b vfio-pci 00:03.0
+
+Example
+~~~~~~~
+
+Here we use l3fwd-power as an example to show how to get started.
+
+ Example:
+
+ .. code-block:: console
+
+ $ l3fwd-power -l 0-1 -- -p 1 -P --config="(0,0,1)" \
+ --no-numa --parse-ptype
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index ca3f551b..4f98f28c 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -70,11 +70,11 @@ From the command line using the --vdev EAL option
--vdev 'cryptodev_aesni_mb_pmd0,max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0'
-Our using the rte_eal_vdev_init API within the application code.
+Our using the rte_vdev_init API within the application code.
.. code-block:: c
- rte_eal_vdev_init("cryptodev_aesni_mb_pmd",
+ rte_vdev_init("cryptodev_aesni_mb_pmd",
"max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0")
All virtual Crypto devices support the following initialization parameters:
diff --git a/doc/guides/prog_guide/dev_kit_build_system.rst b/doc/guides/prog_guide/dev_kit_build_system.rst
index 19de1563..ad032c5f 100644
--- a/doc/guides/prog_guide/dev_kit_build_system.rst
+++ b/doc/guides/prog_guide/dev_kit_build_system.rst
@@ -367,7 +367,7 @@ Variables that Can be Set/Overridden in a Makefile Only
* POSTCLEAN: A list of actions to be taken after cleaning. The user should use += to append data in this variable.
-* DEPDIR-y: Only used in the development kit framework to specify if the build of the current directory depends on build of another one.
+* DEPDIRS-$(DIR): Only used in the development kit framework to specify if the build of the current directory depends on build of another one.
This is needed to support parallel builds correctly.
Variables that can be Set/Overridden by the User on the Command Line Only
diff --git a/doc/guides/prog_guide/dev_kit_root_make_help.rst b/doc/guides/prog_guide/dev_kit_root_make_help.rst
index fb3520e1..d7c41064 100644
--- a/doc/guides/prog_guide/dev_kit_root_make_help.rst
+++ b/doc/guides/prog_guide/dev_kit_root_make_help.rst
@@ -152,34 +152,6 @@ Documentation Targets
Generate the guides documentation in pdf.
-
-Deps Targets
-------------
-
-* depdirs
-
- This target is implicitly called by make config.
- Typically, there is no need for a user to call it,
- except if DEPDIRS-y variables have been updated in Makefiles.
- It will generate the file $(RTE_OUTPUT)/.depdirs.
-
- Example:
-
- .. code-block:: console
-
- make depdirs O=mybuild
-
-* depgraph
-
- This command generates a dot graph of dependencies.
- It can be displayed to debug circular dependency issues, or just to understand the dependencies.
-
- Example:
-
- .. code-block:: console
-
- make depgraph O=mybuild > /tmp/graph.dot && dotty /tmp/ graph.dot
-
Misc Targets
------------
diff --git a/doc/guides/prog_guide/efd_lib.rst b/doc/guides/prog_guide/efd_lib.rst
new file mode 100644
index 00000000..3f90fa9a
--- /dev/null
+++ b/doc/guides/prog_guide/efd_lib.rst
@@ -0,0 +1,455 @@
+.. BSD LICENSE
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _Efd_Library:
+
+Elastic Flow Distributor Library
+================================
+
+Introduction
+------------
+
+In Data Centers today, clustering and scheduling of distributed workloads
+is a very common task. Many workloads require a deterministic
+partitioning of a flat key space among a cluster of machines. When a
+packet enters the cluster, the ingress node will direct the packet to
+its handling node. For example, data-centers with disaggregated storage
+use storage metadata tables to forward I/O requests to the correct back end
+storage cluster, stateful packet inspection will use match incoming
+flows to signatures in flow tables to send incoming packets to their
+intended deep packet inspection (DPI) devices, and so on.
+
+EFD is a distributor library that uses perfect hashing to determine a
+target/value for a given incoming flow key. It has the following
+advantages: first, because it uses perfect hashing it does not store the
+key itself and hence lookup performance is not dependent on the key
+size. Second, the target/value can be any arbitrary value hence the
+system designer and/or operator can better optimize service rates and
+inter-cluster network traffic locating. Third, since the storage
+requirement is much smaller than a hash-based flow table (i.e. better
+fit for CPU cache), EFD can scale to millions of flow keys. Finally,
+with the current optimized library implementation, performance is fully
+scalable with any number of CPU cores.
+
+Flow Based Distribution
+-----------------------
+
+Computation Based Schemes
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Flow distribution and/or load balancing can be simply done using a
+stateless computation, for instance using round-robin or a simple
+computation based on the flow key as an input. For example, a hash
+function can be used to direct a certain flow to a target based on
+the flow key (e.g. ``h(key) mod n``) where h(key) is the hash value of the
+flow key and n is the number of possible targets.
+
+.. _figure_efd1:
+
+.. figure:: img/efd_i1.*
+
+ Load Balancing Using Front End Node
+
+In this scheme (:numref:`figure_efd1`), the front end server/distributor/load balancer
+extracts the flow key from the input packet and applies a computation to determine where
+this flow should be directed. Intuitively, this scheme is very simple
+and requires no state to be kept at the front end node, and hence,
+storage requirements are minimum.
+
+.. _figure_efd2:
+
+.. figure:: img/efd_i2.*
+
+ Consistent Hashing
+
+A widely used flow distributor that belongs to the same category of
+computation-based schemes is ``consistent hashing``, shown in :numref:`figure_efd2`.
+Target destinations (shown in red) are hashed into the same space as the flow
+keys (shown in blue), and keys are mapped to the nearest target in a clockwise
+fashion. Dynamically adding and removing targets with consistent hashing
+requires only K/n keys to be remapped on average, where K is the number of
+keys, and n is the number of targets. In contrast, in a traditional hash-based
+scheme, a change in the number of targets causes nearly all keys to be
+remapped.
+
+Although computation-based schemes are simple and need very little
+storage requirement, they suffer from the drawback that the system
+designer/operator can’t fully control the target to assign a specific
+key, as this is dictated by the hash function.
+Deterministically co-locating of keys together (for example, to minimize
+inter-server traffic or to optimize for network traffic conditions,
+target load, etc.) is simply not possible.
+
+Flow-Table Based Schemes
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using a Flow-Table based scheme to handle flow distribution/load
+balancing, in contrast with computation-based schemes, the system designer
+has the flexibility of assigning a given flow to any given
+target. The flow table (e.g. DPDK RTE Hash Library) will simply store
+both the flow key and the target value.
+
+.. _figure_efd3:
+
+.. figure:: img/efd_i3.*
+
+ Table Based Flow Distribution
+
+As shown in :numref:`figure_efd3`, when doing a lookup, the flow-table
+is indexed with the hash of the flow key and the keys (more than one is possible,
+because of hash collision) stored in this index and corresponding values
+are retrieved. The retrieved key(s) is matched with the input flow key
+and if there is a match the value (target id) is returned.
+
+The drawback of using a hash table for flow distribution/load balancing
+is the storage requirement, since the flow table need to store keys,
+signatures and target values. This doesn't allow this scheme to scale to
+millions of flow keys. Large tables will usually not fit in
+the CPU cache, and hence, the lookup performance is degraded because of
+the latency to access the main memory.
+
+EFD Based Scheme
+~~~~~~~~~~~~~~~~
+
+EFD combines the advantages of both flow-table based and computation-based
+schemes. It doesn't require the large storage necessary for
+flow-table based schemes (because EFD doesn't store the key as explained
+below), and it supports any arbitrary value for any given key.
+
+.. _figure_efd4:
+
+.. figure:: img/efd_i4.*
+
+ Searching for Perfect Hash Function
+
+The basic idea of EFD is when a given key is to be inserted, a family of
+hash functions is searched until the correct hash function that maps the
+input key to the correct value is found, as shown in :numref:`figure_efd4`.
+However, rather than explicitly storing all keys and their associated values,
+EFD stores only indices of hash functions that map keys to values, and
+thereby consumes much less space than conventional flow-based tables.
+The lookup operation is very simple, similar to a computational-based
+scheme: given an input key the lookup operation is reduced to hashing
+that key with the correct hash function.
+
+.. _figure_efd5:
+
+.. figure:: img/efd_i5.*
+
+ Divide and Conquer for Millions of Keys
+
+Intuitively, finding a hash function that maps each of a large number
+(millions) of input keys to the correct output value is effectively
+impossible, as a result EFD, as shown in :numref:`figure_efd5`,
+breaks the problem into smaller pieces (divide and conquer).
+EFD divides the entire input key set into many small groups.
+Each group consists of approximately 20-28 keys (a configurable parameter
+for the library), then, for each small group, a brute force search to find
+a hash function that produces the correct outputs for each key in the group.
+
+It should be mentioned that, since the online lookup table for EFD
+doesn't store the key itself, the size of the EFD table is independent
+of the key size and hence EFD lookup performance which is almost
+constant irrespective of the length of the key which is a highly
+desirable feature especially for longer keys.
+
+In summary, EFD is a set separation data structure that supports millions of
+keys. It is used to distribute a given key to an intended target. By itself
+EFD is not a FIB data structure with an exact match the input flow key.
+
+.. _Efd_example:
+
+Example of EFD Library Usage
+----------------------------
+
+EFD can be used along the data path of many network functions and middleboxes.
+As previously mentioned, it can used as an index table for
+<key,value> pairs, meta-data for objects, a flow-level load balancer, etc.
+:numref:`figure_efd6` shows an example of using EFD as a flow-level load
+balancer, where flows are received at a front end server before being forwarded
+to the target back end server for processing. The system designer would
+deterministically co-locate flows together in order to minimize cross-server
+interaction.
+(For example, flows requesting certain webpage objects are co-located
+together, to minimize forwarding of common objects across servers).
+
+.. _figure_efd6:
+
+.. figure:: img/efd_i6.*
+
+ EFD as a Flow-Level Load Balancer
+
+As shown in :numref:`figure_efd6`, the front end server will have an EFD table that
+stores for each group what is the perfect hash index that satisfies the
+correct output. Because the table size is small and fits in cache (since
+keys are not stored), it sustains a large number of flows (N*X, where N
+is the maximum number of flows served by each back end server of the X
+possible targets).
+
+With an input flow key, the group id is computed (for example, using
+last few bits of CRC hash) and then the EFD table is indexed with the
+group id to retrieve the corresponding hash index to use. Once the index
+is retrieved the key is hashed using this hash function and the result
+will be the intended correct target where this flow is supposed to be
+processed.
+
+It should be noted that as a result of EFD not matching the exact key but
+rather distributing the flows to a target back end node based on the
+perfect hash index, a key that has not been inserted before
+will be distributed to a valid target. Hence, a local table which stores
+the flows served at each node is used and is
+exact matched with the input key to rule out new never seen before
+flows.
+
+.. _Efd_api:
+
+Library API Overview
+--------------------
+
+The EFD library API is created with a very similar semantics of a
+hash-index or a flow table. The application creates an EFD table for a
+given maximum number of flows, a function is called to insert a flow key
+with a specific target value, and another function is used to retrieve
+target values for a given individual flow key or a bulk of keys.
+
+EFD Table Create
+~~~~~~~~~~~~~~~~
+
+The function ``rte_efd_create()`` is used to create and return a pointer
+to an EFD table that is sized to hold up to num_flows key.
+The online version of the EFD table (the one that does
+not store the keys and is used for lookups) will be allocated and
+created in the last level cache (LLC) of the socket defined by the
+online_socket_bitmask, while the offline EFD table (the one that
+stores the keys and is used for key inserts and for computing the
+perfect hashing) is allocated and created in the LLC of the socket
+defined by offline_socket_bitmask. It should be noted, that for
+highest performance the socket id should match that where the thread is
+running, i.e. the online EFD lookup table should be created on the same
+socket as where the lookup thread is running.
+
+EFD Insert and Update
+~~~~~~~~~~~~~~~~~~~~~
+
+The EFD function to insert a key or update a key to a new value is
+``rte_efd_update()``. This function will update an existing key to
+a new value (target) if the key has already been inserted
+before, or will insert the <key,value> pair if this key has not been inserted
+before. It will return 0 upon success. It will return
+``EFD_UPDATE_WARN_GROUP_FULL (1)`` if the operation is insert, and the
+last available space in the key's group was just used. It will return
+``EFD_UPDATE_FAILED (2)`` when the insertion or update has failed (either it
+failed to find a suitable perfect hash or the group was full). The function
+will return ``EFD_UPDATE_NO_CHANGE (3)`` if there is no change to the EFD
+table (i.e, same value already exists).
+
+.. Note::
+
+ This function is not multi-thread safe and should only be called
+ from one thread.
+
+EFD Lookup
+~~~~~~~~~~
+
+To lookup a certain key in an EFD table, the function ``rte_efd_lookup()``
+is used to return the value associated with single key.
+As previously mentioned, if the key has been inserted, the correct value
+inserted is returned, if the key has not been inserted before,
+a ‘random’ value (based on hashing of the key) is returned.
+For better performance and to decrease the overhead of
+function calls per key, it is always recommended to use a bulk lookup
+function (simultaneous lookup of multiple keys) instead of a single key
+lookup function. ``rte_efd_lookup_bulk()`` is the bulk lookup function,
+that looks up num_keys simultaneously stored in the key_list and the
+corresponding return values will be returned in the value_list.
+
+.. Note::
+
+ This function is multi-thread safe, but there should not be other threads
+ writing in the EFD table, unless locks are used.
+
+EFD Delete
+~~~~~~~~~~
+
+To delete a certain key in an EFD table, the function
+``rte_efd_delete()`` can be used. The function returns zero upon success
+when the key has been found and deleted. Socket_id is the parameter to
+use to lookup the existing value, which is ideally the caller's socket id.
+The previous value associated with this key will be returned
+in the prev_value argument.
+
+.. Note::
+
+ This function is not multi-thread safe and should only be called
+ from one thread.
+
+.. _Efd_internals:
+
+Library Internals
+-----------------
+
+This section provides the brief high-level idea and an overview
+of the library internals to accompany the RFC. The intent of this
+section is to explain to readers the high-level implementation of
+insert, lookup and group rebalancing in the EFD library.
+
+Insert Function Internals
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As previously mentioned the EFD divides the whole set of keys into
+groups of a manageable size (e.g. 28 keys) and then searches for the
+perfect hash that satisfies the intended target value for each key. EFD
+stores two version of the <key,value> table:
+
+- Offline Version (in memory): Only used for the insertion/update
+ operation, which is less frequent than the lookup operation. In the
+ offline version the exact keys for each group is stored. When a new
+ key is added, the hash function is updated that will satisfy the
+ value for the new key together with the all old keys already inserted
+ in this group.
+
+- Online Version (in cache): Used for the frequent lookup operation. In
+ the online version, as previously mentioned, the keys are not stored
+ but rather only the hash index for each group.
+
+.. _figure_efd7:
+
+.. figure:: img/efd_i7.*
+
+ Group Assignment
+
+:numref:`figure_efd7` depicts the group assignment for 7 flow keys as an example.
+Given a flow key, a hash function (in our implementation CRC hash) is
+used to get the group id. As shown in the figure, the groups can be
+unbalanced. (We highlight group rebalancing further below).
+
+.. _figure_efd8:
+
+.. figure:: img/efd_i8.*
+
+ Perfect Hash Search - Assigned Keys & Target Value
+
+Focusing on one group that has four keys, :numref:`figure_efd8` depicts the search
+algorithm to find the perfect hash function. Assuming that the target
+value bit for the keys is as shown in the figure, then the online EFD
+table will store a 16 bit hash index and 16 bit lookup table per group
+per value bit.
+
+.. _figure_efd9:
+
+.. figure:: img/efd_i9.*
+
+ Perfect Hash Search - Satisfy Target Values
+
+For a given keyX, a hash function ``(h(keyX, seed1) + index * h(keyX, seed2))``
+is used to point to certain bit index in the 16bit lookup_table value,
+as shown in :numref:`figure_efd9`.
+The insert function will brute force search for all possible values for the
+hash index until a non conflicting lookup_table is found.
+
+.. _figure_efd10:
+
+.. figure:: img/efd_i10.*
+
+ Finding Hash Index for Conflict Free lookup_table
+
+For example, since both key3 and key7 have a target bit value of 1, it
+is okay if the hash function of both keys point to the same bit in the
+lookup table. A conflict will occur if a hash index is used that maps
+both Key4 and Key7 to the same index in the lookup_table,
+as shown in :numref:`figure_efd10`, since their target value bit are not the same.
+Once a hash index is found that produces a lookup_table with no
+contradictions, this index is stored for this group. This procedure is
+repeated for each bit of target value.
+
+Lookup Function Internals
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The design principle of EFD is that lookups are much more frequent than
+inserts, and hence, EFD's design optimizes for the lookups which are
+faster and much simpler than the slower insert procedure (inserts are
+slow, because of perfect hash search as previously discussed).
+
+.. _figure_efd11:
+
+.. figure:: img/efd_i11.*
+
+ EFD Lookup Operation
+
+:numref:`figure_efd11` depicts the lookup operation for EFD. Given an input key,
+the group id is computed (using CRC hash) and then the hash index for this
+group is retrieved from the EFD table. Using the retrieved hash index,
+the hash function ``h(key, seed1) + index *h(key, seed2)`` is used which will
+result in an index in the lookup_table, the bit corresponding to this
+index will be the target value bit. This procedure is repeated for each
+bit of the target value.
+
+Group Rebalancing Function Internals
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When discussing EFD inserts and lookups, the discussion is simplified by
+assuming that a group id is simply a result of hash function. However,
+since hashing in general is not perfect and will not always produce a
+uniform output, this simplified assumption will lead to unbalanced
+groups, i.e., some group will have more keys than other groups.
+Typically, and to minimize insert time with an increasing number of keys,
+it is preferable that all groups will have a balanced number of keys, so
+the brute force search for the perfect hash terminates with a valid hash
+index. In order to achieve this target, groups are rebalanced during
+runtime inserts, and keys are moved around from a busy group to a less
+crowded group as the more keys are inserted.
+
+.. _figure_efd12:
+
+.. figure:: img/efd_i12.*
+
+ Runtime Group Rebalancing
+
+:numref:`figure_efd12` depicts the high level idea of group rebalancing, given an
+input key the hash result is split into two parts a chunk id and 8-bit
+bin id. A chunk contains 64 different groups and 256 bins (i.e. for any
+given bin it can map to 4 distinct groups). When a key is inserted, the
+bin id is computed, for example in :numref:`figure_efd12` bin_id=2,
+and since each bin can be mapped to one of four different groups (2 bit storage),
+the four possible mappings are evaluated and the one that will result in a
+balanced key distribution across these four is selected the mapping result
+is stored in these two bits.
+
+
+.. _Efd_references:
+
+References
+-----------
+
+1- EFD is based on collaborative research work between Intel and
+Carnegie Mellon University (CMU), interested readers can refer to the paper
+“Scaling Up Clustered Network Appliances with ScaleBricks;” Dong Zhou et al.
+at SIGCOMM 2015 (`http://conferences.sigcomm.org/sigcomm/2015/pdf/papers/p241.pdf`)
+for more information.
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 10a10a88..fff1c063 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -178,8 +178,8 @@ The EAL also allows timed callbacks to be used in the same way as for NIC interr
.. note::
- In DPDK PMD, the only interrupts handled by the dedicated host thread are those for link status change,
- i.e. link up and link down notification.
+ In DPDK PMD, the only interrupts handled by the dedicated host thread are those for link status change
+ (link up and link down notification) and for sudden device removal.
+ RX Interrupt Event
@@ -207,6 +207,23 @@ The eth_dev driver takes responsibility to program the latter mapping.
The RX interrupt are controlled/enabled/disabled by ethdev APIs - 'rte_eth_dev_rx_intr_*'. They return failure if the PMD
hasn't support them yet. The intr_conf.rxq flag is used to turn on the capability of RX interrupt per device.
++ Device Removal Event
+
+This event is triggered by a device being removed at a bus level. Its
+underlying resources may have been made unavailable (i.e. PCI mappings
+unmapped). The PMD must make sure that on such occurrence, the application can
+still safely use its callbacks.
+
+This event can be subscribed to in the same way one would subscribe to a link
+status change event. The execution context is thus the same, i.e. it is the
+dedicated interrupt host thread.
+
+Considering this, it is likely that an application would want to close a
+device having emitted a Device Removal Event. In such case, calling
+``rte_eth_dev_close()`` can trigger it to unregister its own Device Removal Event
+callback. Care must be taken not to close the device from the interrupt handler
+context. It is necessary to reschedule such closing operation.
+
Blacklisting
~~~~~~~~~~~~
@@ -352,11 +369,6 @@ Known Issues
3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.
- ``RTE_RING_PAUSE_REP_COUNT`` is defined for rte_ring to reduce contention. It's mainly for case 2, a yield is issued after number of times pause repeat.
-
- It adds a sched_yield() syscall if the thread spins for too long while waiting on the other thread to finish its operations on the ring.
- This gives the preempted thread a chance to proceed and finish with the ring enqueue/dequeue operation.
-
+ rte_timer
Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
diff --git a/doc/guides/prog_guide/extend_dpdk.rst b/doc/guides/prog_guide/extend_dpdk.rst
index 51f0b5c2..73d81999 100644
--- a/doc/guides/prog_guide/extend_dpdk.rst
+++ b/doc/guides/prog_guide/extend_dpdk.rst
@@ -39,14 +39,14 @@ Example: Adding a New Library libfoo
To add a new library to the DPDK, proceed as follows:
-#. Add a new configuration option:
+#. Add a new configuration option:
.. code-block:: bash
for f in config/\*; do \
echo CONFIG_RTE_LIBFOO=y >> $f; done
-#. Create a new directory with sources:
+#. Create a new directory with sources:
.. code-block:: console
@@ -54,7 +54,7 @@ To add a new library to the DPDK, proceed as follows:
touch ${RTE_SDK}/lib/libfoo/foo.c
touch ${RTE_SDK}/lib/libfoo/foo.h
-#. Add a foo() function in libfoo.
+#. Add a foo() function in libfoo.
Definition is in foo.c:
@@ -71,7 +71,7 @@ To add a new library to the DPDK, proceed as follows:
extern void foo(void);
-#. Update lib/Makefile:
+#. Update lib/Makefile:
.. code-block:: console
@@ -79,7 +79,7 @@ To add a new library to the DPDK, proceed as follows:
# add:
# DIRS-$(CONFIG_RTE_LIBFOO) += libfoo
-#. Create a new Makefile for this library, for example, derived from mempool Makefile:
+#. Create a new Makefile for this library, for example, derived from mempool Makefile:
.. code-block:: console
@@ -91,11 +91,11 @@ To add a new library to the DPDK, proceed as follows:
# rte_mempool -> foo
-#. Update mk/DPDK.app.mk, and add -lfoo in LDLIBS variable when the option is enabled.
- This will automatically add this flag when linking a DPDK application.
+#. Update mk/DPDK.app.mk, and add -lfoo in LDLIBS variable when the option is enabled.
+ This will automatically add this flag when linking a DPDK application.
-#. Build the DPDK with the new library (we only show a specific target here):
+#. Build the DPDK with the new library (we only show a specific target here):
.. code-block:: console
@@ -104,7 +104,7 @@ To add a new library to the DPDK, proceed as follows:
make
-#. Check that the library is installed:
+#. Check that the library is installed:
.. code-block:: console
diff --git a/doc/guides/prog_guide/img/efd_i1.svg b/doc/guides/prog_guide/img/efd_i1.svg
new file mode 100644
index 00000000..7f8fcb3b
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i1.svg
@@ -0,0 +1,130 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i1.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="3.25609in" height="3.375in"
+ viewBox="0 0 234.439 243" xml:space="preserve" color-interpolation-filters="sRGB" class="st10">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {marker-end:url(#mrkr5-12);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:6}
+ .st6 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.70422535211268}
+ .st7 {stroke:#5b9bd5;stroke-dasharray:2.25,4.5;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.25}
+ .st8 {marker-end:url(#mrkr5-39);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st9 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st10 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-12" class="st6" v:arrowType="5" v:arrowSize="2" v:setback="2.485" refX="-2.485" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-1.42,-1.42) "/>
+ </marker>
+ <marker id="mrkr5-39" class="st9" v:arrowType="5" v:arrowSize="2" v:setback="4.69" refX="-4.69" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape2-1" v:mID="2" v:groupContext="shape" transform="translate(77.718,-113.348)">
+ <title>Square</title>
+ <desc>LB</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="18" cy="225" width="36" height="36"/>
+ <g id="shadow2-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="207" width="36" height="36" class="st2"/>
+ </g>
+ <rect x="0" y="207" width="36" height="36" class="st3"/>
+ <text x="13.18" y="228" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>LB</text> </g>
+ <g id="shape3-7" v:mID="3" v:groupContext="shape" transform="translate(37.0513,-131.348)">
+ <title>Sheet.3</title>
+ <path d="M0 243 L25.76 243" class="st5"/>
+ </g>
+ <g id="shape4-13" v:mID="4" v:groupContext="shape" transform="translate(167.718,-178.598)">
+ <title>Square.4</title>
+ <desc>Target 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="220.5" width="45" height="45"/>
+ <g id="shadow4-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="198" width="45" height="45" class="st2"/>
+ </g>
+ <rect x="0" y="198" width="45" height="45" class="st3"/>
+ <text x="5.74" y="223.5" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target 1</text> </g>
+ <g id="shape5-19" v:mID="5" v:groupContext="shape" transform="translate(167.718,-121.005)">
+ <title>Square.5</title>
+ <desc>Target 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="220.5" width="45" height="45"/>
+ <g id="shadow5-20" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="198" width="45" height="45" class="st2"/>
+ </g>
+ <rect x="0" y="198" width="45" height="45" class="st3"/>
+ <text x="5.74" y="223.5" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target 2</text> </g>
+ <g id="shape7-25" v:mID="7" v:groupContext="shape" transform="translate(167.718,-23.3478)">
+ <title>Square.7</title>
+ <desc>Target N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="220.5" width="45" height="45"/>
+ <g id="shadow7-26" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="198" width="45" height="45" class="st2"/>
+ </g>
+ <rect x="0" y="198" width="45" height="45" class="st3"/>
+ <text x="5.05" y="223.5" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target N</text> </g>
+ <g id="shape8-31" v:mID="8" v:groupContext="shape" transform="translate(433.218,132.402) rotate(90)">
+ <title>Sheet.8</title>
+ <path d="M0 243 L34.59 243" class="st7"/>
+ </g>
+ <g id="shape9-34" v:mID="9" v:groupContext="shape" transform="translate(-78.4279,-37.1059) rotate(-52.2532)">
+ <title>Sheet.9</title>
+ <path d="M0 243 L81.18 243" class="st8"/>
+ </g>
+ <g id="shape11-40" v:mID="11" v:groupContext="shape" transform="translate(60.3469,-125.414) rotate(-12.6875)">
+ <title>Sheet.11</title>
+ <path d="M0 243 L48.32 243" class="st8"/>
+ </g>
+ <g id="shape12-45" v:mID="12" v:groupContext="shape" transform="translate(319.172,-18.1081) rotate(57.7244)">
+ <title>Sheet.12</title>
+ <path d="M0 243 L94.09 243" class="st8"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i10.svg b/doc/guides/prog_guide/img/efd_i10.svg
new file mode 100644
index 00000000..d26ec61e
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i10.svg
@@ -0,0 +1,384 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i11.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="9.76715in" height="2.82917in"
+ viewBox="0 0 703.234 203.701" xml:space="preserve" color-interpolation-filters="sRGB" class="st15">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st2 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st3 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st4 {fill:#000000;font-family:Arial;font-size:0.998566em}
+ .st5 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st6 {fill:#000000;font-family:Arial;font-size:0.918686em;font-style:italic}
+ .st7 {fill:#000000;font-family:Arial;font-size:0.918686em}
+ .st8 {fill:#7e8d96;font-family:Arial;font-size:0.998566em;font-weight:bold}
+ .st9 {fill:#00b050;font-family:Arial;font-size:0.998566em;font-weight:bold}
+ .st10 {fill:#ff0000;font-family:Arial;font-size:0.998566em;font-weight:bold}
+ .st11 {fill:#004280;stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st12 {fill:#ffffff;font-family:Arial;font-size:1.49785em}
+ .st13 {stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st14 {fill:#004280;stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0149927}
+ .st15 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(19.0195,-93.4328)">
+ <title>Sheet.3</title>
+ <path d="M0 182.93 C0 180.64 1.87 178.78 4.16 178.78 L109.18 178.78 C111.47 178.78 113.33 180.64 113.33 182.93 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.16 203.7 C1.87 203.7 0 201.84 0 199.55 L0 182.93 Z"
+ class="st1"/>
+ </g>
+ <g id="shape4-3" v:mID="4" v:groupContext="shape" transform="translate(19.0195,-93.4328)">
+ <title>Sheet.4</title>
+ <path d="M0 182.93 C0 180.64 1.87 178.78 4.16 178.78 L109.18 178.78 C111.47 178.78 113.33 180.64 113.33 182.93 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.16 203.7 C1.87 203.7 0 201.84 0 199.55 L0 182.93 Z"
+ class="st2"/>
+ </g>
+ <g id="shape5-5" v:mID="5" v:groupContext="shape" transform="translate(19.0195,-96.9057)">
+ <title>Sheet.5</title>
+ <desc>Key1: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="58.0575" cy="196.509" width="116.12" height="14.3829"/>
+ <path d="M116.11 189.32 L0 189.32 L0 203.7 L116.11 203.7 L116.11 189.32" class="st3"/>
+ <text x="15.59" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key1: Value = 0</text> </g>
+ <g id="shape6-9" v:mID="6" v:groupContext="shape" transform="translate(19.0195,-68.6284)">
+ <title>Sheet.6</title>
+ <path d="M0 182.93 C0 180.64 1.87 178.78 4.16 178.78 L109.18 178.78 C111.47 178.78 113.33 180.64 113.33 182.93 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.16 203.7 C1.87 203.7 0 201.84 0 199.55 L0 182.93 Z"
+ class="st1"/>
+ </g>
+ <g id="shape7-11" v:mID="7" v:groupContext="shape" transform="translate(19.0195,-68.6284)">
+ <title>Sheet.7</title>
+ <path d="M0 182.93 C0 180.64 1.87 178.78 4.16 178.78 L109.18 178.78 C111.47 178.78 113.33 180.64 113.33 182.93 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.16 203.7 C1.87 203.7 0 201.84 0 199.55 L0 182.93 Z"
+ class="st2"/>
+ </g>
+ <g id="shape8-13" v:mID="8" v:groupContext="shape" transform="translate(19.0195,-72.0832)">
+ <title>Sheet.8</title>
+ <desc>Key3: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="58.0575" cy="196.509" width="116.12" height="14.3829"/>
+ <path d="M116.11 189.32 L0 189.32 L0 203.7 L116.11 203.7 L116.11 189.32" class="st3"/>
+ <text x="15.59" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key3: Value = 1</text> </g>
+ <g id="shape9-17" v:mID="9" v:groupContext="shape" transform="translate(19.0195,-43.5843)">
+ <title>Sheet.9</title>
+ <path d="M0 182.84 C-0 180.53 1.88 178.66 4.19 178.66 L109.15 178.66 C111.46 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.53 C113.33 201.84 111.46 203.7 109.15 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.53 L0 182.84 Z"
+ class="st1"/>
+ </g>
+ <g id="shape10-19" v:mID="10" v:groupContext="shape" transform="translate(19.0195,-43.5843)">
+ <title>Sheet.10</title>
+ <path d="M0 182.84 C-0 180.53 1.88 178.66 4.19 178.66 L109.15 178.66 C111.46 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.53 C113.33 201.84 111.46 203.7 109.15 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.53 L0 182.84 Z"
+ class="st2"/>
+ </g>
+ <g id="shape11-21" v:mID="11" v:groupContext="shape" transform="translate(19.0195,-47.1109)">
+ <title>Sheet.11</title>
+ <desc>Key4: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="58.0575" cy="196.509" width="116.12" height="14.3829"/>
+ <path d="M116.11 189.32 L0 189.32 L0 203.7 L116.11 203.7 L116.11 189.32" class="st3"/>
+ <text x="15.59" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key4: Value = 0</text> </g>
+ <g id="shape12-25" v:mID="12" v:groupContext="shape" transform="translate(19.0195,-19.0195)">
+ <title>Sheet.12</title>
+ <path d="M0 182.84 C-0 180.53 1.88 178.66 4.19 178.66 L109.15 178.66 C111.46 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.53 C113.33 201.84 111.46 203.7 109.15 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.53 L0 182.84 Z"
+ class="st1"/>
+ </g>
+ <g id="shape13-27" v:mID="13" v:groupContext="shape" transform="translate(19.0195,-19.0195)">
+ <title>Sheet.13</title>
+ <path d="M0 182.84 C-0 180.53 1.88 178.66 4.19 178.66 L109.15 178.66 C111.46 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.53 C113.33 201.84 111.46 203.7 109.15 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.53 L0 182.84 Z"
+ class="st2"/>
+ </g>
+ <g id="shape14-29" v:mID="14" v:groupContext="shape" transform="translate(19.0195,-22.5475)">
+ <title>Sheet.14</title>
+ <desc>Key7: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="58.0575" cy="196.509" width="116.12" height="14.3829"/>
+ <path d="M116.11 189.32 L0 189.32 L0 203.7 L116.11 203.7 L116.11 189.32" class="st3"/>
+ <text x="15.59" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key7: Value = 1</text> </g>
+ <g id="shape15-33" v:mID="15" v:groupContext="shape" transform="translate(141.656,-45.5615)">
+ <title>Sheet.15</title>
+ <path d="M0 169.01 L22.75 169.01 L22.75 157.45 L45.5 180.57 L22.75 203.7 L22.75 192.14 L0 192.14 L0 169.01 Z"
+ class="st5"/>
+ </g>
+ <g id="shape16-35" v:mID="16" v:groupContext="shape" transform="translate(193.22,-56.0464)">
+ <title>Sheet.16</title>
+ <path d="M0 182.84 C0 180.53 1.88 178.66 4.19 178.66 L96.55 178.66 C98.87 178.66 100.73 180.53 100.73 182.84 L100.73
+ 199.54 C100.73 201.84 98.87 203.7 96.55 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.54 L0 182.84 Z"
+ class="st1"/>
+ </g>
+ <g id="shape17-37" v:mID="17" v:groupContext="shape" transform="translate(193.22,-56.0464)">
+ <title>Sheet.17</title>
+ <path d="M0 182.84 C0 180.53 1.88 178.66 4.19 178.66 L96.55 178.66 C98.87 178.66 100.73 180.53 100.73 182.84 L100.73
+ 199.54 C100.73 201.84 98.87 203.7 96.55 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.54 L0 182.84 Z"
+ class="st2"/>
+ </g>
+ <g id="shape18-39" v:mID="18" v:groupContext="shape" transform="translate(228.157,-66.9545)">
+ <title>Sheet.18</title>
+ <desc>F</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.63538" cy="197.084" width="11.28" height="13.2327"/>
+ <path d="M11.27 190.47 L0 190.47 L0 203.7 L11.27 203.7 L11.27 190.47" class="st3"/>
+ <text x="2.27" y="200.39" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>F</text> </g>
+ <g id="shape19-43" v:mID="19" v:groupContext="shape" transform="translate(234.88,-66.9545)">
+ <title>Sheet.19</title>
+ <desc>(key,</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.261" cy="197.084" width="34.53" height="13.2327"/>
+ <path d="M34.52 190.47 L0 190.47 L0 203.7 L34.52 203.7 L34.52 190.47" class="st3"/>
+ <text x="5.32" y="200.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(key, </text> </g>
+ <g id="shape20-47" v:mID="20" v:groupContext="shape" transform="translate(198.215,-53.7734)">
+ <title>Sheet.20</title>
+ <desc>hash_index =</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="41.4128" cy="197.084" width="82.83" height="13.2327"/>
+ <path d="M82.83 190.47 L0 190.47 L0 203.7 L82.83 203.7 L82.83 190.47" class="st3"/>
+ <text x="8.47" y="200.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash_index = </text> </g>
+ <g id="shape21-51" v:mID="21" v:groupContext="shape" transform="translate(274.858,-53.7734)">
+ <title>Sheet.21</title>
+ <desc>i)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.28241" cy="197.084" width="10.57" height="13.2327"/>
+ <path d="M10.56 190.47 L0 190.47 L0 203.7 L10.56 203.7 L10.56 190.47" class="st3"/>
+ <text x="2.22" y="200.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>i)</text> </g>
+ <g id="shape22-55" v:mID="22" v:groupContext="shape" transform="translate(351.453,-93.7923)">
+ <title>Sheet.22</title>
+ <path d="M0 182.84 C0 180.53 1.88 178.66 4.19 178.66 L109.16 178.66 C111.47 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.54 C113.33 201.84 111.47 203.7 109.16 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.54 L0 182.84 Z"
+ class="st1"/>
+ </g>
+ <g id="shape23-57" v:mID="23" v:groupContext="shape" transform="translate(351.453,-93.7923)">
+ <title>Sheet.23</title>
+ <path d="M0 182.84 C0 180.53 1.88 178.66 4.19 178.66 L109.16 178.66 C111.47 178.66 113.33 180.53 113.33 182.84 L113.33
+ 199.54 C113.33 201.84 111.47 203.7 109.16 203.7 L4.19 203.7 C1.88 203.7 0 201.84 0 199.54 L0 182.84 Z"
+ class="st2"/>
+ </g>
+ <g id="shape24-59" v:mID="24" v:groupContext="shape" transform="translate(355.798,-97.3147)">
+ <title>Sheet.24</title>
+ <desc>Key1: Position 4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="51.7083" cy="196.509" width="103.42" height="14.3829"/>
+ <path d="M103.42 189.32 L0 189.32 L0 203.7 L103.42 203.7 L103.42 189.32" class="st3"/>
+ <text x="8.41" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key1: Position 4</text> </g>
+ <g id="shape25-63" v:mID="25" v:groupContext="shape" transform="translate(351.453,-68.9879)">
+ <title>Sheet.25</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape26-65" v:mID="26" v:groupContext="shape" transform="translate(351.453,-68.9879)">
+ <title>Sheet.26</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape27-67" v:mID="27" v:groupContext="shape" transform="translate(355.798,-72.4921)">
+ <title>Sheet.27</title>
+ <desc>Key3: Position 6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="51.7083" cy="196.509" width="103.42" height="14.3829"/>
+ <path d="M103.42 189.32 L0 189.32 L0 203.7 L103.42 203.7 L103.42 189.32" class="st3"/>
+ <text x="8.41" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key3: Position 6</text> </g>
+ <g id="shape28-71" v:mID="28" v:groupContext="shape" transform="translate(351.453,-44.0636)">
+ <title>Sheet.28</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape29-73" v:mID="29" v:groupContext="shape" transform="translate(351.453,-44.0636)">
+ <title>Sheet.29</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape30-75" v:mID="30" v:groupContext="shape" transform="translate(351.215,-47.5198)">
+ <title>Sheet.30</title>
+ <desc>Key4: Position 14</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="55.5403" cy="196.509" width="111.09" height="14.3829"/>
+ <path d="M111.08 189.32 L0 189.32 L0 203.7 L111.08 203.7 L111.08 189.32" class="st3"/>
+ <text x="8.91" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key4: Position 14</text> </g>
+ <g id="shape31-79" v:mID="31" v:groupContext="shape" transform="translate(351.453,-19.4988)">
+ <title>Sheet.31</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape32-81" v:mID="32" v:groupContext="shape" transform="translate(351.453,-19.4988)">
+ <title>Sheet.32</title>
+ <path d="M0 182.94 C0 180.65 1.88 178.78 4.17 178.78 L109.18 178.78 C111.47 178.78 113.33 180.65 113.33 182.94 L113.33
+ 199.55 C113.33 201.84 111.47 203.7 109.18 203.7 L4.17 203.7 C1.88 203.7 0 201.84 0 199.55 L0 182.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape33-83" v:mID="33" v:groupContext="shape" transform="translate(351.215,-22.9565)">
+ <title>Sheet.33</title>
+ <desc>Key7: Position 14</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="55.5403" cy="196.509" width="111.09" height="14.3829"/>
+ <path d="M111.08 189.32 L0 189.32 L0 203.7 L111.08 203.7 L111.08 189.32" class="st3"/>
+ <text x="8.91" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key7: Position 14</text> </g>
+ <g id="shape34-87" v:mID="34" v:groupContext="shape" transform="translate(299.89,-46.0408)">
+ <title>Sheet.34</title>
+ <path d="M0 169.01 L22.75 169.01 L22.75 157.45 L45.5 180.57 L22.75 203.7 L22.75 192.14 L0 192.14 L0 169.01 Z"
+ class="st5"/>
+ </g>
+ <g id="shape35-89" v:mID="35" v:groupContext="shape" transform="translate(528.896,-117.518)">
+ <title>Sheet.35</title>
+ <path d="M0 182.94 C0 180.66 1.89 178.78 4.17 178.78 L137.64 178.78 C139.92 178.78 141.79 180.66 141.79 182.94 L141.79
+ 199.57 C141.79 201.84 139.92 203.7 137.64 203.7 L4.17 203.7 C1.89 203.7 0 201.84 0 199.57 L0 182.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape36-91" v:mID="36" v:groupContext="shape" transform="translate(528.896,-117.518)">
+ <title>Sheet.36</title>
+ <path d="M0 182.94 C0 180.66 1.89 178.78 4.17 178.78 L137.64 178.78 C139.92 178.78 141.79 180.66 141.79 182.94 L141.79
+ 199.57 C141.79 201.84 139.92 203.7 137.64 203.7 L4.17 203.7 C1.89 203.7 0 201.84 0 199.57 L0 182.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape37-93" v:mID="37" v:groupContext="shape" transform="translate(530.056,-121.017)">
+ <title>Sheet.37</title>
+ <desc>0000</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="19.1585" cy="196.509" width="38.32" height="14.3829"/>
+ <path d="M38.32 189.32 L0 189.32 L0 203.7 L38.32 203.7 L38.32 189.32" class="st3"/>
+ <text x="5.83" y="200.1" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0000 </text> </g>
+ <g id="shape38-97" v:mID="38" v:groupContext="shape" transform="translate(567.215,-121.017)">
+ <title>Sheet.38</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.7483" cy="196.509" width="11.5" height="14.3829"/>
+ <path d="M11.5 189.32 L0 189.32 L0 203.7 L11.5 203.7 L11.5 189.32" class="st3"/>
+ <text x="2.42" y="200.1" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape39-101" v:mID="39" v:groupContext="shape" transform="translate(576.215,-121.017)">
+ <title>Sheet.39</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.7483" cy="196.509" width="11.5" height="14.3829"/>
+ <path d="M11.5 189.32 L0 189.32 L0 203.7 L11.5 203.7 L11.5 189.32" class="st3"/>
+ <text x="2.42" y="200.1" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape40-105" v:mID="40" v:groupContext="shape" transform="translate(584.486,-121.017)">
+ <title>Sheet.40</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.7483" cy="196.509" width="11.5" height="14.3829"/>
+ <path d="M11.5 189.32 L0 189.32 L0 203.7 L11.5 203.7 L11.5 189.32" class="st3"/>
+ <text x="2.42" y="200.1" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape41-109" v:mID="41" v:groupContext="shape" transform="translate(588.646,-121.017)">
+ <title>Sheet.41</title>
+ <desc>0 0000 00</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="32.5687" cy="196.509" width="65.14" height="14.3829"/>
+ <path d="M65.14 189.32 L0 189.32 L0 203.7 L65.14 203.7 L65.14 189.32" class="st3"/>
+ <text x="5.91" y="200.1" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0 0000 00</text> </g>
+ <g id="shape42-113" v:mID="42" v:groupContext="shape" transform="translate(644.965,-121.017)">
+ <title>Sheet.42</title>
+ <desc>?</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="6.12511" cy="196.509" width="12.26" height="14.3829"/>
+ <path d="M12.25 189.32 L0 189.32 L0 203.7 L12.25 203.7 L12.25 189.32" class="st3"/>
+ <text x="2.47" y="200.1" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>?</text> </g>
+ <g id="shape43-117" v:mID="43" v:groupContext="shape" transform="translate(654.718,-121.017)">
+ <title>Sheet.43</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="5.7483" cy="196.509" width="11.5" height="14.3829"/>
+ <path d="M11.5 189.32 L0 189.32 L0 203.7 L11.5 203.7 L11.5 189.32" class="st3"/>
+ <text x="2.42" y="200.1" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape44-121" v:mID="44" v:groupContext="shape" transform="translate(464.786,-105.296)">
+ <title>Sheet.44</title>
+ <path d="M0 203.7 L108.29 203.7 C108.86 203.7 109.31 203.22 109.31 202.68 L109.31 189.5 L107.27 189.5 L107.27 202.68
+ L108.29 201.66 L0 201.66 L0 203.7 ZM111.35 190.52 L108.29 184.41 L105.23 190.55 L111.35 190.52 Z"
+ class="st11"/>
+ </g>
+ <g id="shape45-123" v:mID="45" v:groupContext="shape" transform="translate(464.786,-80.4315)">
+ <title>Sheet.45</title>
+ <path d="M0 203.7 L123.63 203.7 C124.2 203.7 124.65 203.25 124.65 202.68 L124.65 164.28 L122.61 164.28 L122.61 202.68
+ L123.63 201.66 L0 201.66 L0 203.7 ZM126.69 165.3 L123.6 159.18 L120.57 165.33 L126.69 165.3 Z"
+ class="st11"/>
+ </g>
+ <g id="shape46-125" v:mID="46" v:groupContext="shape" transform="translate(464.786,-55.4772)">
+ <title>Sheet.46</title>
+ <path d="M0 203.7 L186.48 203.7 C186.75 203.7 186.99 203.61 187.2 203.4 C187.38 203.22 187.5 202.95 187.5 202.68 L187.41
+ 139.32 L185.37 139.32 L185.46 202.68 L186.48 201.66 L0 201.66 L0 203.7 ZM189.51 140.07 L185.94 134.23 L183.41
+ 140.61 L189.51 140.07 Z" class="st11"/>
+ </g>
+ <g id="shape47-127" v:mID="47" v:groupContext="shape" transform="translate(464.786,-30.9125)">
+ <title>Sheet.47</title>
+ <path d="M0 203.7 L186.48 203.7 C186.75 203.7 186.99 203.61 187.2 203.4 C187.38 203.22 187.5 202.95 187.5 202.68 L187.41
+ 114.76 L185.37 114.76 L185.46 202.68 L186.48 201.66 L0 201.66 L0 203.7 ZM189.51 115.51 L185.94 109.67 L183.41
+ 116.05 L189.51 115.51 Z" class="st11"/>
+ </g>
+ <g id="shape48-129" v:mID="48" v:groupContext="shape" transform="translate(442.996,-151.106)">
+ <title>Sheet.48</title>
+ <path d="M0 179.56 C0 176.89 2.19 174.7 4.86 174.7 L70.8 174.7 C73.47 174.7 75.64 176.89 75.64 179.56 L75.64 198.88 C75.64
+ 201.54 73.47 203.7 70.8 203.7 L4.86 203.7 C2.19 203.7 0 201.54 0 198.88 L0 179.56 Z" class="st5"/>
+ </g>
+ <g id="shape49-131" v:mID="49" v:groupContext="shape" transform="translate(443.529,-155.018)">
+ <title>Sheet.49</title>
+ <desc>Values</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="37.8175" cy="192.914" width="75.64" height="21.5726"/>
+ <path d="M75.64 182.13 L0 182.13 L0 203.7 L75.64 203.7 L75.64 182.13" class="st3"/>
+ <text x="10.34" y="198.31" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Values</text> </g>
+ <g id="shape50-135" v:mID="50" v:groupContext="shape" transform="translate(102.458,-122.192)">
+ <title>Sheet.50</title>
+ <path d="M0 203.7 C-0 199.21 0.62 195.55 1.37 195.55 L11.67 195.55 C12.42 195.55 13.03 191.9 13.03 187.4 C13.03 191.9
+ 13.64 195.55 14.39 195.55 L24.69 195.55 C25.44 195.55 26.05 199.21 26.05 203.7" class="st13"/>
+ </g>
+ <g id="shape51-138" v:mID="51" v:groupContext="shape" transform="translate(115.454,-137.5)">
+ <title>Sheet.51</title>
+ <path d="M0.2 203.7 L322.66 174.12 L322.48 172.1 L0 201.68 L0.2 203.7 L0.2 203.7 ZM321.84 176.24 L327.66 172.64 L321.28
+ 170.16 L321.84 176.24 L321.84 176.24 Z" class="st14"/>
+ </g>
+ <g id="shape52-140" v:mID="52" v:groupContext="shape" transform="translate(518.211,-142.473)">
+ <title>Sheet.52</title>
+ <path d="M0.99 176.74 L44.78 200.38 L43.82 202.17 L0 178.51 L0.99 176.74 L0.99 176.74 ZM44.87 198.1 L48.8 203.7 L41.96
+ 203.46 L44.87 198.1 L44.87 198.1 Z" class="st11"/>
+ </g>
+ <g id="shape53-142" v:mID="53" v:groupContext="shape" transform="translate(518.331,-141.963)">
+ <title>Sheet.53</title>
+ <path d="M0.75 176.17 L60.09 200.32 L59.34 202.2 L0 178.06 L0.75 176.17 L0.75 176.17 ZM59.91 198.04 L64.44 203.19 L57.6
+ 203.7 L59.91 198.04 L59.91 198.04 Z" class="st11"/>
+ </g>
+ <g id="shape54-144" v:mID="54" v:groupContext="shape" transform="translate(576.558,-153.706)">
+ <title>Sheet.54</title>
+ <path d="M0 177.04 C0 174.1 2.4 171.71 5.34 171.71 L101.51 171.71 C104.48 171.71 106.85 174.1 106.85 177.04 L106.85 198.37
+ C106.85 201.33 104.48 203.7 101.51 203.7 L5.34 203.7 C2.4 203.7 0 201.33 0 198.37 L0 177.04 Z" class="st1"/>
+ </g>
+ <g id="shape55-146" v:mID="55" v:groupContext="shape" transform="translate(577.365,-151.611)">
+ <title>Sheet.55</title>
+ <path d="M0 177.04 C0 174.1 2.4 171.71 5.34 171.71 L101.51 171.71 C104.48 171.71 106.85 174.1 106.85 177.04 L106.85 198.37
+ C106.85 201.33 104.48 203.7 101.51 203.7 L5.34 203.7 C2.4 203.7 0 201.33 0 198.37 L0 177.04 Z" class="st2"/>
+ </g>
+ <g id="shape56-148" v:mID="56" v:groupContext="shape" transform="translate(593.952,-167.894)">
+ <title>Sheet.56</title>
+ <desc>Lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.2942" cy="196.509" width="86.59" height="14.3829"/>
+ <path d="M86.59 189.32 L0 189.32 L0 203.7 L86.59 203.7 L86.59 189.32" class="st3"/>
+ <text x="7.31" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Lookup_table</text> </g>
+ <g id="shape57-152" v:mID="57" v:groupContext="shape" transform="translate(608.239,-153.515)">
+ <title>Sheet.57</title>
+ <desc>(16 bits)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="26.8054" cy="196.509" width="53.62" height="14.3829"/>
+ <path d="M53.61 189.32 L0 189.32 L0 203.7 L53.61 203.7 L53.61 189.32" class="st3"/>
+ <text x="5.16" y="200.1" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(16 bits)</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i11.svg b/doc/guides/prog_guide/img/efd_i11.svg
new file mode 100644
index 00000000..f2cc656b
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i11.svg
@@ -0,0 +1,319 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i12.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="10.2783in" height="4.28958in"
+ viewBox="0 0 740.039 308.85" xml:space="preserve" color-interpolation-filters="sRGB" class="st21">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st2 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st3 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st4 {fill:#000000;font-family:Arial;font-size:0.998566em}
+ .st5 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st6 {stroke:#00b050;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st7 {stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st8 {fill:#000000;font-family:Arial;font-size:0.918686em;font-weight:bold}
+ .st9 {fill:#00b050;font-size:1em}
+ .st10 {fill:#c00000;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st11 {fill:#004280;stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.00749637}
+ .st12 {fill:#ffffff;font-family:Arial;font-size:1.16833em}
+ .st13 {fill:#2e75b5;stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st14 {fill:#ffffff;font-family:Arial;font-size:1.16666em}
+ .st15 {font-size:1em}
+ .st16 {fill:none;stroke:none;stroke-width:0.25}
+ .st17 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st18 {marker-end:url(#mrkr5-121);stroke:#5b9bd5;stroke-dasharray:1.5,3;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st19 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st20 {fill:#000000;font-family:Calibri;font-size:1.16666em}
+ .st21 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-121" class="st19" v:arrowType="5" v:arrowSize="2" v:setback="4.69" refX="-4.69" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape5-1" v:mID="5" v:groupContext="shape" transform="translate(36.0674,-256.878)">
+ <title>Sheet.5</title>
+ <path d="M0 291.88 C0 290 1.52 288.48 3.41 288.48 L68.51 288.48 C70.4 288.48 71.91 290 71.91 291.88 L71.91 305.46 C71.91
+ 307.33 70.4 308.85 68.51 308.85 L3.41 308.85 C1.52 308.85 0 307.33 0 305.46 L0 291.88 Z" class="st1"/>
+ </g>
+ <g id="shape6-3" v:mID="6" v:groupContext="shape" transform="translate(36.0674,-256.878)">
+ <title>Sheet.6</title>
+ <path d="M0 291.88 C0 290 1.52 288.48 3.41 288.48 L68.51 288.48 C70.4 288.48 71.91 290 71.91 291.88 L71.91 305.46 C71.91
+ 307.33 70.4 308.85 68.51 308.85 L3.41 308.85 C1.52 308.85 0 307.33 0 305.46 L0 291.88 Z" class="st2"/>
+ </g>
+ <g id="shape7-5" v:mID="7" v:groupContext="shape" transform="translate(61.6502,-258.089)">
+ <title>Sheet.7</title>
+ <desc>Key</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="13.7891" cy="301.658" width="27.58" height="14.3829"/>
+ <path d="M27.58 294.47 L0 294.47 L0 308.85 L27.58 308.85 L27.58 294.47" class="st3"/>
+ <text x="3.46" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key</text> </g>
+ <g id="shape8-9" v:mID="8" v:groupContext="shape" transform="translate(51.9748,-236.328)">
+ <title>Sheet.8</title>
+ <path d="M0 298.54 L9.81 298.54 L9.81 288.24 L29.44 288.24 L29.44 298.54 L39.26 298.54 L19.63 308.85 L0 298.54 Z"
+ class="st5"/>
+ </g>
+ <g id="shape9-11" v:mID="9" v:groupContext="shape" transform="translate(36.0674,-215.298)">
+ <title>Sheet.9</title>
+ <path d="M0 291.77 C0 289.89 1.54 288.36 3.42 288.36 L68.49 288.36 C70.38 288.36 71.91 289.89 71.91 291.77 L71.91 305.43
+ C71.91 307.32 70.38 308.85 68.49 308.85 L3.42 308.85 C1.54 308.85 0 307.32 0 305.43 L0 291.77 Z"
+ class="st1"/>
+ </g>
+ <g id="shape10-13" v:mID="10" v:groupContext="shape" transform="translate(36.0674,-215.298)">
+ <title>Sheet.10</title>
+ <path d="M0 291.77 C0 289.89 1.54 288.36 3.42 288.36 L68.49 288.36 C70.38 288.36 71.91 289.89 71.91 291.77 L71.91 305.43
+ C71.91 307.32 70.38 308.85 68.49 308.85 L3.42 308.85 C1.54 308.85 0 307.32 0 305.43 L0 291.77 Z"
+ class="st2"/>
+ </g>
+ <g id="shape11-15" v:mID="11" v:groupContext="shape" transform="translate(58.8889,-216.57)">
+ <title>Sheet.11</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.8573" cy="301.658" width="33.72" height="14.3829"/>
+ <path d="M33.71 294.47 L0 294.47 L0 308.85 L33.71 308.85 L33.71 294.47" class="st3"/>
+ <text x="3.86" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape12-19" v:mID="12" v:groupContext="shape" transform="translate(27.3033,-174.437)">
+ <title>Sheet.12</title>
+ <path d="M0 292.58 C0 290.78 1.46 289.32 3.26 289.32 L87.15 289.32 C88.95 289.32 90.4 290.78 90.4 292.58 L90.4 305.6
+ C90.4 307.4 88.95 308.85 87.15 308.85 L3.26 308.85 C1.46 308.85 0 307.4 0 305.6 L0 292.58 Z" class="st1"/>
+ </g>
+ <g id="shape13-21" v:mID="13" v:groupContext="shape" transform="translate(27.3033,-174.437)">
+ <title>Sheet.13</title>
+ <path d="M0 292.58 C0 290.78 1.46 289.32 3.26 289.32 L87.15 289.32 C88.95 289.32 90.4 290.78 90.4 292.58 L90.4 305.6
+ C90.4 307.4 88.95 308.85 87.15 308.85 L3.26 308.85 C1.46 308.85 0 307.4 0 305.6 L0 292.58 Z" class="st2"/>
+ </g>
+ <g id="shape14-23" v:mID="14" v:groupContext="shape" transform="translate(36.0515,-175.256)">
+ <title>Sheet.14</title>
+ <desc>0x0102ABCD</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.6644" cy="301.658" width="87.33" height="14.3829"/>
+ <path d="M87.33 294.47 L0 294.47 L0 308.85 L87.33 308.85 L87.33 294.47" class="st3"/>
+ <text x="7.36" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102ABCD</text> </g>
+ <g id="shape15-27" v:mID="15" v:groupContext="shape" transform="translate(51.9748,-194.029)">
+ <title>Sheet.15</title>
+ <path d="M0 298.48 L9.81 298.48 L9.81 288.12 L29.44 288.12 L29.44 298.48 L39.26 298.48 L19.63 308.85 L0 298.48 Z"
+ class="st5"/>
+ </g>
+ <g id="shape16-29" v:mID="16" v:groupContext="shape" transform="translate(48.9133,-159.818)">
+ <title>Sheet.16</title>
+ <path d="M26.41 296.87 C26.41 300.18 25.97 302.86 25.41 302.86 L14.21 302.86 C13.66 302.86 13.21 305.55 13.21 308.85
+ C13.21 305.55 12.76 302.86 12.21 302.86 L1.01 302.86 C0.45 302.86 0 300.18 0 296.87" class="st6"/>
+ </g>
+ <g id="shape17-32" v:mID="17" v:groupContext="shape" transform="translate(19.0195,-19.0195)">
+ <title>Sheet.17</title>
+ <path d="M0 196.93 L0 308.85 L145.15 308.85 L145.15 196.93 L0 196.93 L0 196.93 Z" class="st1"/>
+ </g>
+ <g id="shape18-34" v:mID="18" v:groupContext="shape" transform="translate(19.0195,-19.0195)">
+ <title>Sheet.18</title>
+ <path d="M0 196.93 L145.15 196.93 L145.15 308.85 L0 308.85 L0 196.93" class="st7"/>
+ </g>
+ <g id="shape19-37" v:mID="19" v:groupContext="shape" transform="translate(28.2638,-70.6655)">
+ <title>Sheet.19</title>
+ <path d="M0 280.69 C0 277.58 2.53 275.06 5.64 275.06 L124.14 275.06 C127.26 275.06 129.78 277.58 129.78 280.69 L129.78
+ 303.22 C129.78 306.33 127.26 308.85 124.14 308.85 L5.64 308.85 C2.53 308.85 0 306.33 0 303.22 L0 280.69
+ Z" class="st1"/>
+ </g>
+ <g id="shape20-39" v:mID="20" v:groupContext="shape" transform="translate(28.2638,-70.6655)">
+ <title>Sheet.20</title>
+ <path d="M0 280.69 C0 277.58 2.53 275.06 5.64 275.06 L124.14 275.06 C127.26 275.06 129.78 277.58 129.78 280.69 L129.78
+ 303.22 C129.78 306.33 127.26 308.85 124.14 308.85 L5.64 308.85 C2.53 308.85 0 306.33 0 303.22 L0 280.69
+ Z" class="st2"/>
+ </g>
+ <g id="shape21-41" v:mID="21" v:groupContext="shape" transform="translate(57.4514,-85.7513)">
+ <title>Sheet.21</title>
+ <desc>hash_index =</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="45.0133" cy="301.658" width="90.03" height="14.3829"/>
+ <path d="M90.03 294.47 L0 294.47 L0 308.85 L90.03 308.85 L90.03 294.47" class="st3"/>
+ <text x="9.2" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash_index = </text> </g>
+ <g id="shape22-45" v:mID="22" v:groupContext="shape" transform="translate(76.3001,-71.3719)">
+ <title>Sheet.22</title>
+ <desc>38123</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.0762" cy="301.658" width="42.16" height="14.3829"/>
+ <path d="M42.15 294.47 L0 294.47 L0 308.85 L42.15 308.85 L42.15 294.47" class="st3"/>
+ <text x="4.42" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>38123</text> </g>
+ <g id="shape23-49" v:mID="23" v:groupContext="shape" transform="translate(28.2638,-27.048)">
+ <title>Sheet.23</title>
+ <path d="M0 280.69 C0 277.59 2.54 275.06 5.64 275.06 L124.14 275.06 C127.26 275.06 129.78 277.59 129.78 280.69 L129.78
+ 303.22 C129.78 306.33 127.26 308.85 124.14 308.85 L5.64 308.85 C2.54 308.85 0 306.33 0 303.22 L0 280.69
+ Z" class="st1"/>
+ </g>
+ <g id="shape24-51" v:mID="24" v:groupContext="shape" transform="translate(28.2638,-27.048)">
+ <title>Sheet.24</title>
+ <path d="M0 280.69 C0 277.59 2.54 275.06 5.64 275.06 L124.14 275.06 C127.26 275.06 129.78 277.59 129.78 280.69 L129.78
+ 303.22 C129.78 306.33 127.26 308.85 124.14 308.85 L5.64 308.85 C2.54 308.85 0 306.33 0 303.22 L0 280.69
+ Z" class="st2"/>
+ </g>
+ <g id="shape25-53" v:mID="25" v:groupContext="shape" transform="translate(54.0924,-41.564)">
+ <title>Sheet.25</title>
+ <desc>lookup_table =</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="46.931" cy="301.658" width="93.87" height="14.3829"/>
+ <path d="M93.86 294.47 L0 294.47 L0 308.85 L93.86 308.85 L93.86 294.47" class="st3"/>
+ <text x="7.79" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table =</text> </g>
+ <g id="shape26-57" v:mID="26" v:groupContext="shape" transform="translate(28.0195,-28.5506)">
+ <title>Sheet.26</title>
+ <desc>0110 1100 0101 1101</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="64.89" cy="302.233" width="129.79" height="13.2327"/>
+ <path d="M129.78 295.62 L0 295.62 L0 308.85 L129.78 308.85 L129.78 295.62" class="st3"/>
+ <text x="11.25" y="305.54" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0110 11<tspan
+ class="st9">0</tspan>0 0101 1101</text> </g>
+ <g id="shape27-62" v:mID="27" v:groupContext="shape" transform="translate(26.2461,-113.863)">
+ <title>Sheet.27</title>
+ <desc>Group ID: 0x0102</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="48.6286" cy="302.881" width="97.26" height="11.9384"/>
+ <path d="M97.26 296.91 L0 296.91 L0 308.85 L97.26 308.85 L97.26 296.91" class="st3"/>
+ <text x="7.73" y="305.86" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group ID: 0x0102</text> </g>
+ <g id="shape28-66" v:mID="28" v:groupContext="shape" transform="translate(42.3703,-135.313)">
+ <title>Sheet.28</title>
+ <path d="M0 298.48 L9.84 298.48 L9.84 288.12 L29.53 288.12 L29.53 298.48 L39.38 298.48 L19.69 308.85 L0 298.48 Z"
+ class="st5"/>
+ </g>
+ <g id="shape29-68" v:mID="29" v:groupContext="shape" transform="translate(117.645,-244.476)">
+ <title>Sheet.29</title>
+ <path d="M0 274.07 L22.75 274.07 L22.75 262.48 L45.5 285.66 L22.75 308.85 L22.75 297.26 L0 297.26 L0 274.07 Z"
+ class="st5"/>
+ </g>
+ <g id="shape30-70" v:mID="30" v:groupContext="shape" transform="translate(169.209,-251.966)">
+ <title>Sheet.30</title>
+ <path d="M0 283.69 C0 280.91 2.27 278.65 5.04 278.65 L111.77 278.65 C114.56 278.65 116.81 280.91 116.81 283.69 L116.81
+ 303.82 C116.81 306.6 114.56 308.85 111.77 308.85 L5.04 308.85 C2.27 308.85 0 306.6 0 303.82 L0 283.69 Z"
+ class="st1"/>
+ </g>
+ <g id="shape31-72" v:mID="31" v:groupContext="shape" transform="translate(169.209,-251.966)">
+ <title>Sheet.31</title>
+ <path d="M0 283.69 C0 280.91 2.27 278.65 5.04 278.65 L111.77 278.65 C114.56 278.65 116.81 280.91 116.81 283.69 L116.81
+ 303.82 C116.81 306.6 114.56 308.85 111.77 308.85 L5.04 308.85 C2.27 308.85 0 306.6 0 303.82 L0 283.69 Z"
+ class="st2"/>
+ </g>
+ <g id="shape35-74" v:mID="35" v:groupContext="shape" transform="translate(291.966,-244.476)">
+ <title>Sheet.35</title>
+ <path d="M0 274.07 L22.69 274.07 L22.69 262.48 L45.38 285.66 L22.69 308.85 L22.69 297.26 L0 297.26 L0 274.07 Z"
+ class="st5"/>
+ </g>
+ <g id="shape36-76" v:mID="36" v:groupContext="shape" transform="translate(343.17,-254.482)">
+ <title>Sheet.36</title>
+ <path d="M0 288.09 C0 285.8 1.88 283.93 4.17 283.93 L109.18 283.93 C111.47 283.93 113.33 285.8 113.33 288.09 L113.33
+ 304.7 C113.33 306.99 111.47 308.85 109.18 308.85 L4.17 308.85 C1.88 308.85 0 306.99 0 304.7 L0 288.09 Z"
+ class="st1"/>
+ </g>
+ <g id="shape37-78" v:mID="37" v:groupContext="shape" transform="translate(343.17,-254.482)">
+ <title>Sheet.37</title>
+ <path d="M0 288.09 C0 285.8 1.88 283.93 4.17 283.93 L109.18 283.93 C111.47 283.93 113.33 285.8 113.33 288.09 L113.33
+ 304.7 C113.33 306.99 111.47 308.85 109.18 308.85 L4.17 308.85 C1.88 308.85 0 306.99 0 304.7 L0 288.09 Z"
+ class="st2"/>
+ </g>
+ <g id="shape38-80" v:mID="38" v:groupContext="shape" transform="translate(368.337,-257.958)">
+ <title>Sheet.38</title>
+ <desc>Position = 6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="38.1131" cy="301.658" width="76.23" height="14.3829"/>
+ <path d="M76.23 294.47 L0 294.47 L0 308.85 L76.23 308.85 L76.23 294.47" class="st3"/>
+ <text x="6.64" y="305.25" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Position = 6</text> </g>
+ <g id="shape39-84" v:mID="39" v:groupContext="shape" transform="translate(158.044,-86.5202)">
+ <title>Sheet.39</title>
+ <path d="M0 308.85 L69.59 308.85 C70.16 308.85 70.62 308.39 70.62 307.83 L70.62 148.5 L68.57 148.5 L68.57 307.83 L69.59
+ 306.81 L0 306.81 L0 308.85 ZM72.66 149.52 L69.59 143.4 L66.53 149.52 L72.66 149.52 Z" class="st11"/>
+ </g>
+ <g id="shape41-86" v:mID="41" v:groupContext="shape" transform="translate(335.112,-199.647)">
+ <title>Sheet.41</title>
+ <desc>Apply the equation</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="71.2648" cy="300.436" width="142.53" height="16.8275"/>
+ <path d="M142.53 292.02 L0 292.02 L0 308.85 L142.53 308.85 L142.53 292.02" class="st3"/>
+ <text x="13.19" y="304.64" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Apply the equation </text> </g>
+ <g id="shape42-90" v:mID="42" v:groupContext="shape" transform="translate(341.115,-182.871)">
+ <title>Sheet.42</title>
+ <desc>to retrieve the bit</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="64.5256" cy="300.436" width="129.06" height="16.8275"/>
+ <path d="M129.05 292.02 L0 292.02 L0 308.85 L129.05 308.85 L129.05 292.02" class="st3"/>
+ <text x="12.31" y="304.64" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>to retrieve the bit </text> </g>
+ <g id="shape43-94" v:mID="43" v:groupContext="shape" transform="translate(349.999,-166.095)">
+ <title>Sheet.43</title>
+ <desc>position in the</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="54.2285" cy="300.436" width="108.46" height="16.8275"/>
+ <path d="M108.46 292.02 L0 292.02 L0 308.85 L108.46 308.85 L108.46 292.02" class="st3"/>
+ <text x="10.97" y="304.64" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>position in the </text> </g>
+ <g id="shape44-98" v:mID="44" v:groupContext="shape" transform="translate(353.361,-149.319)">
+ <title>Sheet.44</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="47.9619" cy="300.436" width="95.93" height="16.8275"/>
+ <path d="M95.92 292.02 L0 292.02 L0 308.85 L95.92 308.85 L95.92 292.02" class="st3"/>
+ <text x="8.21" y="304.64" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape47-102" v:mID="47" v:groupContext="shape" transform="translate(115.17,255.2) rotate(-90)">
+ <title>1-D word balloon</title>
+ <desc>Retrieve the value “0&#39; from the specified location in the loo...</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="Scale" v:val="VT0(1):26"/>
+ <v:ud v:nameU="AntiScale" v:val="VT0(1):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.2016" cy="218.85" width="180" height="58.4032" transform="rotate(90)"/>
+ <path d="M0 308.85 L58.4 308.85 L58.4 128.85 L0 128.85 L0 204.67 L-11.87 38.85 L-7.09 233.03 L0 233.03 L0 308.85 Z"
+ class="st13"/>
+ <text x="136.98" y="-41.8" transform="rotate(90)" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Retrieve the value “0&#39; from <tspan
+ x="134.41" dy="1.2em" class="st15">the specified location in the </tspan><tspan x="181.1" dy="1.2em"
+ class="st15">lookup table</tspan></text> </g>
+ <g id="shape48-107" v:mID="48" v:groupContext="shape" transform="translate(169.209,-251.966)">
+ <title>Sheet.48</title>
+ <desc>F(Key, hash_index = 38123</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54.2285" cy="295.35" width="108.46" height="27"/>
+ <rect x="0" y="281.85" width="108.457" height="27" class="st16"/>
+ <text x="5.86" y="291.75" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>F(Key, hash_index = <tspan
+ x="39.02" dy="1.2em" class="st15">38123</tspan></text> </g>
+ <g id="shape49-111" v:mID="49" v:groupContext="shape" transform="translate(553.962,99) rotate(90)">
+ <title>1-D word balloon.49</title>
+ <desc>Apply the equation to retrieve the bit position in the lookup...</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="Scale" v:val="VT0(1):26"/>
+ <v:ud v:nameU="AntiScale" v:val="VT0(1):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.2016" cy="218.85" width="180" height="58.4032" transform="rotate(-90)"/>
+ <path d="M0 308.85 L58.4 308.85 L58.4 128.85 L0 128.85 L0 204.67 L-51.13 299.85 L0 233.03 L0 308.85 Z" class="st13"/>
+ <text x="-284.62" y="16.6" transform="rotate(-90)" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Apply the equation to <tspan
+ x="-296.67" dy="1.2em" class="st15">retrieve the bit position in </tspan><tspan x="-270.22" dy="1.2em"
+ class="st15">the lookup</tspan>_table</text> </g>
+ <g id="shape50-116" v:mID="50" v:groupContext="shape" transform="translate(640.132,-104.709) rotate(44.1224)">
+ <title>Sheet.50</title>
+ <path d="M0 308.85 L54.13 308.85" class="st18"/>
+ </g>
+ <g id="shape51-122" v:mID="51" v:groupContext="shape" transform="translate(433.02,-122.267)">
+ <title>Sheet.51</title>
+ <desc>(Hash(key,seed1)+38123*hash(key,seed2))%16</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="144" cy="295.35" width="288" height="27"/>
+ <rect x="0" y="281.85" width="288" height="27" class="st2"/>
+ <text x="9.86" y="299.55" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(Hash(key,seed1)+38123*hash(key,seed2))%16</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i12.svg b/doc/guides/prog_guide/img/efd_i12.svg
new file mode 100644
index 00000000..a309d582
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i12.svg
@@ -0,0 +1,1008 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i13.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="10.2932in" height="5.27505in"
+ viewBox="0 0 741.108 379.804" xml:space="preserve" color-interpolation-filters="sRGB" class="st30">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st2 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st3 {fill:#004280;font-family:Arial;font-size:0.828804em}
+ .st4 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st5 {stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st6 {fill:#7030a0;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st7 {fill:#d0d6d9;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st8 {fill:#006fc5;stroke:#006fc5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.00749637}
+ .st9 {fill:#006fc5;stroke:#006fc5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0149927}
+ .st10 {fill:#d0d6d9;stroke:#d0d6d9;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st11 {fill:#d0d6d9;stroke:#d0d6d9;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0149927}
+ .st12 {fill:#004280;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st13 {fill:#00b050;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st14 {fill:#ff0000;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st15 {fill:#00b050;stroke:#00b050;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st16 {fill:#c00000;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st17 {fill:#000000;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st18 {fill:#7f6d00;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st19 {fill:#ff0000;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st20 {fill:#7e8d96;font-family:Arial;font-size:0.828804em;font-weight:bold}
+ .st21 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st22 {fill:#000000;font-family:Arial;font-size:0.998566em}
+ .st23 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st24 {stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st25 {fill:#ffffff;font-family:Arial;font-size:0.998566em}
+ .st26 {fill:#ff6600;stroke:#ff6600;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.00749637}
+ .st27 {fill:#004280;stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st28 {fill:#ff0000;stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st29 {fill:#ffffff;font-family:Arial;font-size:1.49785em}
+ .st30 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(304.703,-329.32)">
+ <title>Sheet.3</title>
+ <path d="M0 379.8 C-0 375.37 0.6 371.78 1.35 371.78 L205.05 371.78 C205.78 371.78 206.38 368.18 206.38 363.75 C206.38
+ 368.18 206.98 371.78 207.73 371.78 L411.43 371.78 C412.15 371.78 412.75 375.37 412.75 379.8" class="st1"/>
+ </g>
+ <g id="shape4-4" v:mID="4" v:groupContext="shape" transform="translate(219.943,-329.32)">
+ <title>Sheet.4</title>
+ <path d="M0 379.8 C0 375.64 0.57 372.25 1.26 372.25 L29.77 372.25 C30.48 372.25 31.03 368.88 31.03 364.71 C31.03 368.88
+ 31.6 372.25 32.29 372.25 L60.81 372.25 C61.51 372.25 62.07 375.64 62.07 379.8" class="st1"/>
+ </g>
+ <g id="shape5-7" v:mID="5" v:groupContext="shape" transform="translate(241.175,-343.9)">
+ <title>Sheet.5</title>
+ <desc>Bins</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="12.7158" cy="373.835" width="25.44" height="11.9384"/>
+ <path d="M25.43 367.87 L0 367.87 L0 379.8 L25.43 379.8 L25.43 367.87" class="st2"/>
+ <text x="3.04" y="376.82" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Bins</text> </g>
+ <g id="shape6-11" v:mID="6" v:groupContext="shape" transform="translate(496.212,-344.504)">
+ <title>Sheet.6</title>
+ <desc>Groups</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="20.3447" cy="373.835" width="40.69" height="11.9384"/>
+ <path d="M40.69 367.87 L0 367.87 L0 379.8 L40.69 379.8 L40.69 367.87" class="st2"/>
+ <text x="4.04" y="376.82" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Groups</text> </g>
+ <g id="shape7-15" v:mID="7" v:groupContext="shape" transform="translate(131.823,-260.299)">
+ <title>Sheet.7</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape8-17" v:mID="8" v:groupContext="shape" transform="translate(131.823,-260.299)">
+ <title>Sheet.8</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape9-20" v:mID="9" v:groupContext="shape" transform="translate(134.706,-310.738)">
+ <title>Sheet.9</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape10-24" v:mID="10" v:groupContext="shape" transform="translate(122.218,-329.32)">
+ <title>Sheet.10</title>
+ <path d="M0 379.8 C-0 375.64 0.57 372.25 1.26 372.25 L29.77 372.25 C30.47 372.25 31.03 368.88 31.03 364.71 C31.03 368.88
+ 31.6 372.25 32.29 372.25 L60.81 372.25 C61.51 372.25 62.07 375.64 62.07 379.8" class="st1"/>
+ </g>
+ <g id="shape11-27" v:mID="11" v:groupContext="shape" transform="translate(137.598,-343.9)">
+ <title>Sheet.11</title>
+ <desc>Chunks</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="20.9813" cy="373.835" width="41.97" height="11.9384"/>
+ <path d="M41.96 367.87 L0 367.87 L0 379.8 L41.96 379.8 L41.96 367.87" class="st2"/>
+ <text x="4.12" y="376.82" class="st3" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Chunks</text> </g>
+ <g id="shape12-31" v:mID="12" v:groupContext="shape" transform="translate(131.823,-195.232)">
+ <title>Sheet.12</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape13-33" v:mID="13" v:groupContext="shape" transform="translate(131.823,-195.232)">
+ <title>Sheet.13</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape14-36" v:mID="14" v:groupContext="shape" transform="translate(134.706,-245.682)">
+ <title>Sheet.14</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape15-40" v:mID="15" v:groupContext="shape" transform="translate(131.823,-130.525)">
+ <title>Sheet.15</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st7"/>
+ </g>
+ <g id="shape16-42" v:mID="16" v:groupContext="shape" transform="translate(131.823,-130.525)">
+ <title>Sheet.16</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape17-45" v:mID="17" v:groupContext="shape" transform="translate(134.706,-180.952)">
+ <title>Sheet.17</title>
+ <desc>…</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.30974" cy="373.835" width="14.62" height="11.9384"/>
+ <path d="M14.62 367.87 L0 367.87 L0 379.8 L14.62 379.8 L14.62 367.87" class="st2"/>
+ <text x="2.34" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>…</text> </g>
+ <g id="shape18-49" v:mID="18" v:groupContext="shape" transform="translate(131.823,-65.4584)">
+ <title>Sheet.18</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape19-51" v:mID="19" v:groupContext="shape" transform="translate(131.823,-65.4584)">
+ <title>Sheet.19</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape20-54" v:mID="20" v:groupContext="shape" transform="translate(130.403,-115.896)">
+ <title>Sheet.20</title>
+ <desc>variable</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="24.7986" cy="373.835" width="49.6" height="11.9384"/>
+ <path d="M49.6 367.87 L0 367.87 L0 379.8 L49.6 379.8 L49.6 367.87" class="st2"/>
+ <text x="6" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>variable </text> </g>
+ <g id="shape21-58" v:mID="21" v:groupContext="shape" transform="translate(130.403,-103.913)">
+ <title>Sheet.21</title>
+ <desc># of</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="13.347" cy="373.835" width="26.7" height="11.9384"/>
+ <path d="M26.69 367.87 L0 367.87 L0 379.8 L26.69 379.8 L26.69 367.87" class="st2"/>
+ <text x="4.51" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/># of </text> </g>
+ <g id="shape22-62" v:mID="22" v:groupContext="shape" transform="translate(130.403,-91.93)">
+ <title>Sheet.22</title>
+ <desc>chunks</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.6122" cy="373.835" width="43.23" height="11.9384"/>
+ <path d="M43.22 367.87 L0 367.87 L0 379.8 L43.22 379.8 L43.22 367.87" class="st2"/>
+ <text x="4.2" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>chunks</text> </g>
+ <g id="shape23-66" v:mID="23" v:groupContext="shape" transform="translate(130.403,-79.9472)">
+ <title>Sheet.23</title>
+ <desc>(power</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.9251" cy="373.835" width="43.86" height="11.9384"/>
+ <path d="M43.85 367.87 L0 367.87 L0 379.8 L43.85 379.8 L43.85 367.87" class="st2"/>
+ <text x="5.62" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(power </text> </g>
+ <g id="shape24-70" v:mID="24" v:groupContext="shape" transform="translate(130.403,-67.9643)">
+ <title>Sheet.24</title>
+ <desc>of 2)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="13.6626" cy="373.835" width="27.33" height="11.9384"/>
+ <path d="M27.33 367.87 L0 367.87 L0 379.8 L27.33 379.8 L27.33 367.87" class="st2"/>
+ <text x="3.17" y="376.82" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>of 2)</text> </g>
+ <g id="shape25-74" v:mID="25" v:groupContext="shape" transform="translate(172.289,-260.838)">
+ <title>Sheet.25</title>
+ <path d="M1.43 379.8 L3.29 375.51 L1.85 374.9 L0 379.19 L1.43 379.8 L1.43 379.8 ZM3.9 374.08 L5.76 369.79 L4.32 369.18
+ L2.47 373.47 L3.9 374.08 L3.9 374.08 ZM6.37 368.36 L8.22 364.07 L6.79 363.45 L4.94 367.75 L6.37 368.36 L6.37
+ 368.36 ZM8.84 362.64 L10.69 358.35 L9.26 357.73 L7.41 362.02 L8.84 362.64 L8.84 362.64 ZM11.31 356.92 L13.16
+ 352.62 L11.73 352 L9.87 356.3 L11.31 356.92 L11.31 356.92 ZM13.78 351.19 L15.63 346.9 L14.2 346.28 L12.34
+ 350.57 L13.78 351.19 L13.78 351.19 ZM16.25 345.47 L18.1 341.17 L16.67 340.56 L14.81 344.85 L16.25 345.47
+ L16.25 345.47 ZM18.71 339.74 L20.57 335.45 L19.13 334.84 L17.28 339.13 L18.71 339.74 L18.71 339.74 ZM21.18
+ 334.02 L23.04 329.73 L21.6 329.12 L19.75 333.41 L21.18 334.02 L21.18 334.02 ZM23.65 328.3 L25.5 324.01 L24.07
+ 323.39 L22.22 327.68 L23.65 328.3 L23.65 328.3 ZM26.12 322.58 L27.97 318.28 L26.54 317.67 L24.69 321.96
+ L26.12 322.58 L26.12 322.58 ZM28.59 316.85 L29.44 314.87 L28.01 314.25 L27.16 316.24 L28.59 316.85 L28.59
+ 316.85 Z" class="st8"/>
+ </g>
+ <g id="shape26-76" v:mID="26" v:groupContext="shape" transform="translate(172.476,-20.463)">
+ <title>Sheet.26</title>
+ <path d="M1.55 203.84 L2.28 208.45 L0.74 208.7 L0 204.09 L1.55 203.84 L1.55 203.84 ZM2.52 209.99 L3.27 214.61 L1.73 214.86
+ L0.99 210.23 L2.52 209.99 L2.52 209.99 ZM3.51 216.15 L4.25 220.76 L2.7 221 L1.97 216.39 L3.51 216.15 L3.51
+ 216.15 ZM4.49 222.3 L5.24 226.92 L3.69 227.16 L2.96 222.54 L4.49 222.3 L4.49 222.3 ZM5.48 228.45 L6.21 233.07
+ L4.67 233.31 L3.93 228.7 L5.48 228.45 L5.48 228.45 ZM6.47 234.6 L7.2 239.22 L5.66 239.47 L4.92 234.86 L6.47
+ 234.6 L6.47 234.6 ZM7.44 240.76 L8.18 245.37 L6.65 245.63 L5.9 241 L7.44 240.76 L7.44 240.76 ZM8.43 246.91
+ L9.17 251.53 L7.62 251.77 L6.89 247.15 L8.43 246.91 L8.43 246.91 ZM9.41 253.07 L10.14 257.68 L8.61 257.92
+ L7.88 253.31 L9.41 253.07 L9.41 253.07 ZM10.4 259.21 L11.14 263.84 L9.59 264.08 L8.85 259.47 L10.4 259.21
+ L10.4 259.21 ZM11.38 265.37 L12.13 269.98 L10.58 270.24 L9.84 265.62 L11.38 265.37 L11.38 265.37 ZM12.37
+ 271.52 L13.1 276.14 L11.56 276.39 L10.82 271.76 L12.37 271.52 L12.37 271.52 ZM13.34 277.68 L14.09 282.29
+ L12.55 282.53 L11.81 277.92 L13.34 277.68 L13.34 277.68 ZM14.33 283.84 L15.07 288.45 L13.52 288.69 L12.79
+ 284.08 L14.33 283.84 L14.33 283.84 ZM15.32 289.99 L16.06 294.61 L14.51 294.85 L13.78 290.23 L15.32 289.99
+ L15.32 289.99 ZM16.3 296.13 L17.03 300.75 L15.5 301 L14.75 296.39 L16.3 296.13 L16.3 296.13 ZM17.29 302.29
+ L18.02 306.9 L16.48 307.16 L15.74 302.53 L17.29 302.29 L17.29 302.29 ZM18.26 308.45 L19 313.06 L17.47 313.3
+ L16.73 308.69 L18.26 308.45 L18.26 308.45 ZM19.25 314.6 L19.99 319.22 L18.44 319.46 L17.71 314.84 L19.25
+ 314.6 L19.25 314.6 ZM20.23 320.76 L20.96 325.37 L19.43 325.61 L18.7 321 L20.23 320.76 L20.23 320.76 ZM21.22
+ 326.9 L21.96 331.51 L20.41 331.77 L19.67 327.15 L21.22 326.9 L21.22 326.9 ZM22.2 333.06 L22.95 337.67 L21.4
+ 337.92 L20.66 333.31 L22.2 333.06 L22.2 333.06 ZM23.19 339.21 L23.92 343.83 L22.38 344.07 L21.64 339.45
+ L23.19 339.21 L23.19 339.21 ZM24.18 345.37 L24.91 349.98 L23.37 350.22 L22.63 345.61 L24.18 345.37 L24.18
+ 345.37 ZM25.15 351.52 L25.89 356.14 L24.36 356.38 L23.61 351.76 L25.15 351.52 L25.15 351.52 ZM26.14 357.67
+ L26.88 362.28 L25.33 362.53 L24.6 357.92 L26.14 357.67 L26.14 357.67 ZM27.12 363.82 L27.85 368.44 L26.32
+ 368.69 L25.59 364.08 L27.12 363.82 L27.12 363.82 ZM28.11 369.98 L28.84 374.59 L27.3 374.83 L26.56 370.22
+ L28.11 369.98 L28.11 369.98 ZM29.08 376.13 L29.64 379.55 L28.09 379.8 L27.55 376.37 L29.08 376.13 L29.08
+ 376.13 Z" class="st9"/>
+ </g>
+ <g id="shape27-78" v:mID="27" v:groupContext="shape" transform="translate(276.159,-233.368)">
+ <title>Sheet.27</title>
+ <path d="M0.45 294.85 L354.04 376.06 L353.59 378.04 L0 296.85 L0.45 294.85 L0.45 294.85 ZM353.5 373.84 L358.79 378.19
+ L352.12 379.8 L353.5 373.84 L353.5 373.84 Z" class="st10"/>
+ </g>
+ <g id="shape28-80" v:mID="28" v:groupContext="shape" transform="translate(275.859,-178.726)">
+ <title>Sheet.28</title>
+ <path d="M1.05 240.32 L231.44 376.33 L230.39 378.1 L0 242.09 L1.05 240.32 L1.05 240.32 ZM231.59 374.05 L235.31 379.8
+ L228.47 379.32 L231.59 374.05 L231.59 374.05 Z" class="st10"/>
+ </g>
+ <g id="shape29-82" v:mID="29" v:groupContext="shape" transform="translate(275.379,-87.6866)">
+ <title>Sheet.29</title>
+ <path d="M2 149.94 L50.05 374.61 L48.05 375.04 L0 150.38 L2 149.94 L2 149.94 ZM51.83 373.18 L50.12 379.8 L45.85 374.47
+ L51.83 373.18 L51.83 373.18 Z" class="st11"/>
+ </g>
+ <g id="shape30-84" v:mID="30" v:groupContext="shape" transform="translate(276.279,-177.108)">
+ <title>Sheet.30</title>
+ <path d="M0.21 353.74 L229.55 375.85 L229.34 377.89 L0 355.75 L0.21 353.74 L0.21 353.74 ZM228.71 373.72 L234.53 377.35
+ L228.14 379.8 L228.71 373.72 L228.71 373.72 Z" class="st10"/>
+ </g>
+ <g id="shape31-86" v:mID="31" v:groupContext="shape" transform="translate(275.919,-213.926)">
+ <title>Sheet.31</title>
+ <path d="M0.45 308.72 L312.65 376.06 L312.2 378.04 L0 310.72 L0.45 308.72 L0.45 308.72 ZM312.08 373.84 L317.43 378.13
+ L310.79 379.8 L312.08 373.84 L312.08 373.84 Z" class="st10"/>
+ </g>
+ <g id="shape32-88" v:mID="32" v:groupContext="shape" transform="translate(275.439,-143.377)">
+ <title>Sheet.32</title>
+ <path d="M1.4 238.41 L150.34 375.59 L148.96 377.09 L0 239.9 L1.4 238.41 L1.4 238.41 ZM150.98 373.41 L153.4 379.8 L146.83
+ 377.9 L150.98 373.41 L150.98 373.41 Z" class="st11"/>
+ </g>
+ <g id="shape33-90" v:mID="33" v:groupContext="shape" transform="translate(275.274,-108.821)">
+ <title>Sheet.33</title>
+ <path d="M1.73 236.53 L90.79 374.97 L89.08 376.07 L0 237.63 L1.73 236.53 L1.73 236.53 ZM91.96 373 L92.7 379.8 L86.82
+ 376.31 L91.96 373 L91.96 373 Z" class="st11"/>
+ </g>
+ <g id="shape34-92" v:mID="34" v:groupContext="shape" transform="translate(275.364,-124.069)">
+ <title>Sheet.34</title>
+ <path d="M1.55 251.66 L108.22 375.28 L106.67 376.61 L0 253 L1.55 251.66 L1.55 251.66 ZM109.1 373.18 L110.78 379.8 L104.46
+ 377.17 L109.1 373.18 L109.1 373.18 Z" class="st11"/>
+ </g>
+ <g id="shape35-94" v:mID="35" v:groupContext="shape" transform="translate(275.154,-87.7165)">
+ <title>Sheet.35</title>
+ <path d="M1.97 215.68 L49.85 374.64 L47.9 375.22 L0 216.27 L1.97 215.68 L1.97 215.68 ZM51.52 373.08 L50.35 379.8 L45.65
+ 374.83 L51.52 373.08 L51.52 373.08 Z" class="st11"/>
+ </g>
+ <g id="shape36-96" v:mID="36" v:groupContext="shape" transform="translate(276.009,-143.736)">
+ <title>Sheet.36</title>
+ <path d="M0.74 320.41 L147.92 376.36 L147.2 378.26 L0 322.32 L0.74 320.41 L0.74 320.41 ZM147.7 374.08 L152.34 379.11
+ L145.52 379.8 L147.7 374.08 L147.7 374.08 Z" class="st11"/>
+ </g>
+ <g id="shape37-98" v:mID="37" v:groupContext="shape" transform="translate(275.649,-108.821)">
+ <title>Sheet.37</title>
+ <path d="M1.46 285.74 L89.46 375.45 L88 376.87 L0 287.16 L1.46 285.74 L1.46 285.74 ZM90.21 373.29 L92.29 379.8 L85.82
+ 377.57 L90.21 373.29 L90.21 373.29 Z" class="st11"/>
+ </g>
+ <g id="shape38-100" v:mID="38" v:groupContext="shape" transform="translate(275.934,-108.686)">
+ <title>Sheet.38</title>
+ <path d="M0.89 335.24 L87.85 376.57 L86.97 378.41 L0 337.09 L0.89 335.24 L0.89 335.24 ZM87.81 374.29 L92.01 379.67 L85.16
+ 379.8 L87.81 374.29 L87.81 374.29 Z" class="st11"/>
+ </g>
+ <g id="shape39-102" v:mID="39" v:groupContext="shape" transform="translate(275.574,-89.454)">
+ <title>Sheet.39</title>
+ <path d="M1.61 316.29 L48.49 375.18 L46.88 376.45 L0 317.57 L1.61 316.29 L1.61 316.29 ZM49.45 373.11 L50.86 379.8 L44.65
+ 376.91 L49.45 373.11 L49.45 373.11 Z" class="st11"/>
+ </g>
+ <g id="shape40-104" v:mID="40" v:groupContext="shape" transform="translate(276.324,-141.744)">
+ <title>Sheet.40</title>
+ <path d="M0.11 368.21 L146.74 375.79 L146.62 377.83 L0 370.23 L0.11 368.21 L0.11 368.21 ZM145.82 373.71 L151.78 377.08
+ L145.51 379.8 L145.82 373.71 L145.82 373.71 Z" class="st11"/>
+ </g>
+ <g id="shape41-106" v:mID="41" v:groupContext="shape" transform="translate(230.508,-309.069)">
+ <title>Sheet.41</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st4"/>
+ </g>
+ <g id="shape42-108" v:mID="42" v:groupContext="shape" transform="translate(230.508,-309.069)">
+ <title>Sheet.42</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape43-111" v:mID="43" v:groupContext="shape" transform="translate(233.39,-309.868)">
+ <title>Sheet.43</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape44-115" v:mID="44" v:groupContext="shape" transform="translate(263.764,-309.869)">
+ <title>Sheet.44</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape45-119" v:mID="45" v:groupContext="shape" transform="translate(230.508,-292.413)">
+ <title>Sheet.45</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape46-121" v:mID="46" v:groupContext="shape" transform="translate(230.508,-292.413)">
+ <title>Sheet.46</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape47-124" v:mID="47" v:groupContext="shape" transform="translate(233.39,-293.221)">
+ <title>Sheet.47</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape48-128" v:mID="48" v:groupContext="shape" transform="translate(230.508,-275.757)">
+ <title>Sheet.48</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st4"/>
+ </g>
+ <g id="shape49-130" v:mID="49" v:groupContext="shape" transform="translate(230.508,-275.757)">
+ <title>Sheet.49</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape50-133" v:mID="50" v:groupContext="shape" transform="translate(233.39,-276.574)">
+ <title>Sheet.50</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape51-137" v:mID="51" v:groupContext="shape" transform="translate(252.478,-276.574)">
+ <title>Sheet.51</title>
+ <desc>3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>3</text> </g>
+ <g id="shape52-141" v:mID="52" v:groupContext="shape" transform="translate(258.001,-276.574)">
+ <title>Sheet.52</title>
+ <desc>+1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="8.11122" cy="373.835" width="16.23" height="11.9384"/>
+ <path d="M16.22 367.87 L0 367.87 L0 379.8 L16.22 379.8 L16.22 367.87" class="st2"/>
+ <text x="2.44" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>+1</text> </g>
+ <g id="shape53-145" v:mID="53" v:groupContext="shape" transform="translate(230.508,-259.7)">
+ <title>Sheet.53</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape54-147" v:mID="54" v:groupContext="shape" transform="translate(230.508,-259.7)">
+ <title>Sheet.54</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape55-150" v:mID="55" v:groupContext="shape" transform="translate(233.39,-260.497)">
+ <title>Sheet.55</title>
+ <desc>3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>3</text> </g>
+ <g id="shape56-154" v:mID="56" v:groupContext="shape" transform="translate(230.508,-243.164)">
+ <title>Sheet.56</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st4"/>
+ </g>
+ <g id="shape57-156" v:mID="57" v:groupContext="shape" transform="translate(230.508,-243.164)">
+ <title>Sheet.57</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape58-159" v:mID="58" v:groupContext="shape" transform="translate(233.39,-244.053)">
+ <title>Sheet.58</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape59-163" v:mID="59" v:groupContext="shape" transform="translate(263.764,-244.053)">
+ <title>Sheet.59</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape60-167" v:mID="60" v:groupContext="shape" transform="translate(230.508,-227.107)">
+ <title>Sheet.60</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape61-169" v:mID="61" v:groupContext="shape" transform="translate(230.508,-227.107)">
+ <title>Sheet.61</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape62-172" v:mID="62" v:groupContext="shape" transform="translate(233.39,-227.976)">
+ <title>Sheet.62</title>
+ <desc>5</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>5</text> </g>
+ <g id="shape63-176" v:mID="63" v:groupContext="shape" transform="translate(230.508,-210.211)">
+ <title>Sheet.63</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape64-178" v:mID="64" v:groupContext="shape" transform="translate(230.508,-210.211)">
+ <title>Sheet.64</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape65-181" v:mID="65" v:groupContext="shape" transform="translate(233.39,-211.085)">
+ <title>Sheet.65</title>
+ <desc>6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>6</text> </g>
+ <g id="shape66-185" v:mID="66" v:groupContext="shape" transform="translate(230.508,-193.794)">
+ <title>Sheet.66</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st4"/>
+ </g>
+ <g id="shape67-187" v:mID="67" v:groupContext="shape" transform="translate(230.508,-193.794)">
+ <title>Sheet.67</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape68-190" v:mID="68" v:groupContext="shape" transform="translate(233.39,-194.681)">
+ <title>Sheet.68</title>
+ <desc>7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>7</text> </g>
+ <g id="shape69-194" v:mID="69" v:groupContext="shape" transform="translate(263.764,-194.681)">
+ <title>Sheet.69</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape70-198" v:mID="70" v:groupContext="shape" transform="translate(230.508,-177.258)">
+ <title>Sheet.70</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape71-200" v:mID="71" v:groupContext="shape" transform="translate(230.508,-177.258)">
+ <title>Sheet.71</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape72-203" v:mID="72" v:groupContext="shape" transform="translate(233.39,-178.117)">
+ <title>Sheet.72</title>
+ <desc>8</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>8</text> </g>
+ <g id="shape73-207" v:mID="73" v:groupContext="shape" transform="translate(230.508,-160.602)">
+ <title>Sheet.73</title>
+ <path d="M0 363.15 L0 379.8 L41.18 379.8 L41.18 363.15 L0 363.15 L0 363.15 Z" class="st7"/>
+ </g>
+ <g id="shape74-209" v:mID="74" v:groupContext="shape" transform="translate(230.508,-160.602)">
+ <title>Sheet.74</title>
+ <path d="M0 363.15 L41.18 363.15 L41.18 379.8 L0 379.8 L0 363.15" class="st5"/>
+ </g>
+ <g id="shape75-212" v:mID="75" v:groupContext="shape" transform="translate(233.39,-161.505)">
+ <title>Sheet.75</title>
+ <desc>9</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>9</text> </g>
+ <g id="shape76-216" v:mID="76" v:groupContext="shape" transform="translate(230.508,-143.946)">
+ <title>Sheet.76</title>
+ <path d="M0 363.15 L0 379.8 L41.18 379.8 L41.18 363.15 L0 363.15 L0 363.15 Z" class="st4"/>
+ </g>
+ <g id="shape77-218" v:mID="77" v:groupContext="shape" transform="translate(230.508,-143.946)">
+ <title>Sheet.77</title>
+ <path d="M0 363.15 L41.18 363.15 L41.18 379.8 L0 379.8 L0 363.15" class="st5"/>
+ </g>
+ <g id="shape78-221" v:mID="78" v:groupContext="shape" transform="translate(233.39,-144.841)">
+ <title>Sheet.78</title>
+ <desc>10</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>10</text> </g>
+ <g id="shape79-225" v:mID="79" v:groupContext="shape" transform="translate(263.764,-144.841)">
+ <title>Sheet.79</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape80-229" v:mID="80" v:groupContext="shape" transform="translate(230.508,-127.529)">
+ <title>Sheet.80</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape81-231" v:mID="81" v:groupContext="shape" transform="translate(230.508,-127.529)">
+ <title>Sheet.81</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape82-234" v:mID="82" v:groupContext="shape" transform="translate(233.39,-128.329)">
+ <title>Sheet.82</title>
+ <desc>11</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>11</text> </g>
+ <g id="shape83-238" v:mID="83" v:groupContext="shape" transform="translate(230.508,-110.754)">
+ <title>Sheet.83</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape84-240" v:mID="84" v:groupContext="shape" transform="translate(230.508,-110.754)">
+ <title>Sheet.84</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape85-243" v:mID="85" v:groupContext="shape" transform="translate(233.39,-111.64)">
+ <title>Sheet.85</title>
+ <desc>12</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>12</text> </g>
+ <g id="shape86-247" v:mID="86" v:groupContext="shape" transform="translate(230.508,-94.9362)">
+ <title>Sheet.86</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape87-249" v:mID="87" v:groupContext="shape" transform="translate(230.508,-94.9362)">
+ <title>Sheet.87</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape88-252" v:mID="88" v:groupContext="shape" transform="translate(233.39,-95.7375)">
+ <title>Sheet.88</title>
+ <desc>…</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.30974" cy="373.835" width="14.62" height="11.9384"/>
+ <path d="M14.62 367.87 L0 367.87 L0 379.8 L14.62 379.8 L14.62 367.87" class="st2"/>
+ <text x="2.34" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>…</text> </g>
+ <g id="shape89-256" v:mID="89" v:groupContext="shape" transform="translate(230.508,-78.999)">
+ <title>Sheet.89</title>
+ <path d="M0 363.27 L0 379.8 L41.18 379.8 L41.18 363.27 L0 363.27 L0 363.27 Z" class="st7"/>
+ </g>
+ <g id="shape90-258" v:mID="90" v:groupContext="shape" transform="translate(230.508,-78.999)">
+ <title>Sheet.90</title>
+ <path d="M0 363.27 L41.18 363.27 L41.18 379.8 L0 379.8 L0 363.27" class="st5"/>
+ </g>
+ <g id="shape91-261" v:mID="91" v:groupContext="shape" transform="translate(233.39,-79.8525)">
+ <title>Sheet.91</title>
+ <desc>255</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="11.1326" cy="373.835" width="22.27" height="11.9384"/>
+ <path d="M22.27 367.87 L0 367.87 L0 379.8 L22.27 379.8 L22.27 367.87" class="st2"/>
+ <text x="2.84" y="376.82" class="st12" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>255</text> </g>
+ <g id="shape92-265" v:mID="92" v:groupContext="shape" transform="translate(276.219,-250.503)">
+ <title>Sheet.92</title>
+ <path d="M0.33 311.98 L396.81 375.94 L396.48 377.95 L0 313.99 L0.33 311.98 L0.33 311.98 ZM396.12 373.75 L401.68 377.74
+ L395.16 379.8 L396.12 373.75 L396.12 373.75 Z" class="st15"/>
+ </g>
+ <g id="shape93-267" v:mID="93" v:groupContext="shape" transform="translate(275.859,-178.426)">
+ <title>Sheet.93</title>
+ <path d="M0.57 305.72 L230.93 376.21 L230.33 378.16 L0 307.67 L0.57 305.72 L0.57 305.72 ZM230.57 373.96 L235.52 378.67
+ L228.77 379.8 L230.57 373.96 L230.57 373.96 Z" class="st15"/>
+ </g>
+ <g id="shape94-269" v:mID="94" v:groupContext="shape" transform="translate(276.279,-151.285)">
+ <title>Sheet.94</title>
+ <path d="M0.21 379.8 L230.12 353.17 L229.88 351.14 L0 377.8 L0.21 379.8 L0.21 379.8 ZM229.34 355.3 L235.07 351.55 L228.65
+ 349.25 L229.34 355.3 L229.34 355.3 Z" class="st15"/>
+ </g>
+ <g id="shape95-271" v:mID="95" v:groupContext="shape" transform="translate(276.009,-232.679)">
+ <title>Sheet.95</title>
+ <path d="M0.27 327.47 L354.22 375.91 L353.95 377.92 L0 329.48 L0.27 327.47 L0.27 327.47 ZM353.5 373.75 L359.15 377.62
+ L352.66 379.8 L353.5 373.75 L353.5 373.75 Z" class="st10"/>
+ </g>
+ <g id="shape96-273" v:mID="96" v:groupContext="shape" transform="translate(276.279,-201.134)">
+ <title>Sheet.96</title>
+ <path d="M0.21 379.8 L353.86 348.14 L353.68 346.1 L0 377.77 L0.21 379.8 L0.21 379.8 ZM353.05 350.24 L358.88 346.64 L352.48
+ 344.16 L353.05 350.24 L353.05 350.24 Z" class="st15"/>
+ </g>
+ <g id="shape97-275" v:mID="97" v:groupContext="shape" transform="translate(346.482,-41.2531)">
+ <title>Sheet.97</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st7"/>
+ </g>
+ <g id="shape98-277" v:mID="98" v:groupContext="shape" transform="translate(346.482,-41.2531)">
+ <title>Sheet.98</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape99-280" v:mID="99" v:groupContext="shape" transform="translate(349.371,-91.6514)">
+ <title>Sheet.99</title>
+ <desc>…</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.30974" cy="373.835" width="14.62" height="11.9384"/>
+ <path d="M14.62 367.87 L0 367.87 L0 379.8 L14.62 379.8 L14.62 367.87" class="st2"/>
+ <text x="2.34" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>…</text> </g>
+ <g id="shape100-284" v:mID="100" v:groupContext="shape" transform="translate(470.019,-94.337)">
+ <title>Sheet.100</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st7"/>
+ </g>
+ <g id="shape101-286" v:mID="101" v:groupContext="shape" transform="translate(470.019,-94.337)">
+ <title>Sheet.101</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape102-289" v:mID="102" v:groupContext="shape" transform="translate(472.925,-144.778)">
+ <title>Sheet.102</title>
+ <desc>5</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>5</text> </g>
+ <g id="shape103-293" v:mID="103" v:groupContext="shape" transform="translate(511.558,-113.749)">
+ <title>Sheet.103</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape104-295" v:mID="104" v:groupContext="shape" transform="translate(511.558,-113.749)">
+ <title>Sheet.104</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape105-298" v:mID="105" v:groupContext="shape" transform="translate(514.441,-164.138)">
+ <title>Sheet.105</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape106-302" v:mID="106" v:groupContext="shape" transform="translate(542.148,-164.138)">
+ <title>Sheet.106</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape107-306" v:mID="107" v:groupContext="shape" transform="translate(542.148,-152.155)">
+ <title>Sheet.107</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape108-310" v:mID="108" v:groupContext="shape" transform="translate(536.626,-140.172)">
+ <title>Sheet.108</title>
+ <desc>10</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>10</text> </g>
+ <g id="shape109-314" v:mID="109" v:groupContext="shape" transform="translate(514.201,-114.441)">
+ <title>Sheet.109</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape110-318" v:mID="110" v:groupContext="shape" transform="translate(519.723,-114.441)">
+ <title>Sheet.110</title>
+ <desc>+4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="8.11122" cy="373.835" width="16.23" height="11.9384"/>
+ <path d="M16.22 367.87 L0 367.87 L0 379.8 L16.22 379.8 L16.22 367.87" class="st2"/>
+ <text x="2.44" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>+4</text> </g>
+ <g id="shape111-322" v:mID="111" v:groupContext="shape" transform="translate(552.257,-130.525)">
+ <title>Sheet.111</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st7"/>
+ </g>
+ <g id="shape112-324" v:mID="112" v:groupContext="shape" transform="translate(552.257,-130.525)">
+ <title>Sheet.112</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape113-327" v:mID="113" v:groupContext="shape" transform="translate(555.203,-180.952)">
+ <title>Sheet.113</title>
+ <desc>3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>3</text> </g>
+ <g id="shape114-331" v:mID="114" v:groupContext="shape" transform="translate(634.615,-169.11)">
+ <title>Sheet.114</title>
+ <path d="M0 313.9 L0 379.8 L41.18 379.8 L41.18 313.9 L0 313.9 L0 313.9 Z" class="st4"/>
+ </g>
+ <g id="shape115-333" v:mID="115" v:groupContext="shape" transform="translate(634.615,-169.11)">
+ <title>Sheet.115</title>
+ <path d="M0 313.9 L41.18 313.9 L41.18 379.8 L0 379.8 L0 313.9" class="st5"/>
+ </g>
+ <g id="shape116-336" v:mID="116" v:groupContext="shape" transform="translate(637.526,-219.595)">
+ <title>Sheet.116</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape117-340" v:mID="117" v:groupContext="shape" transform="translate(665.234,-219.595)">
+ <title>Sheet.117</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape118-344" v:mID="118" v:groupContext="shape" transform="translate(665.2,-225.489)">
+ <title>Sheet.118</title>
+ <path d="M0 379.32 L0 379.8 L5.52 379.8 L5.52 379.32 L0 379.32 L0 379.32 Z" class="st19"/>
+ </g>
+ <g id="shape119-346" v:mID="119" v:groupContext="shape" transform="translate(665.234,-207.612)">
+ <title>Sheet.119</title>
+ <desc>7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>7</text> </g>
+ <g id="shape120-350" v:mID="120" v:groupContext="shape" transform="translate(637.286,-169.898)">
+ <title>Sheet.120</title>
+ <desc>5</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>5</text> </g>
+ <g id="shape121-354" v:mID="121" v:groupContext="shape" transform="translate(642.809,-169.898)">
+ <title>Sheet.121</title>
+ <desc>-</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="3.49545" cy="373.835" width="7" height="11.9384"/>
+ <path d="M6.99 367.87 L0 367.87 L0 379.8 L6.99 379.8 L6.99 367.87" class="st2"/>
+ <text x="1.84" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>-</text> </g>
+ <g id="shape122-358" v:mID="122" v:groupContext="shape" transform="translate(646.17,-169.898)">
+ <title>Sheet.122</title>
+ <desc>3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>3</text> </g>
+ <g id="shape123-362" v:mID="123" v:groupContext="shape" transform="translate(676.275,-186.725)">
+ <title>Sheet.123</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape124-364" v:mID="124" v:groupContext="shape" transform="translate(676.275,-186.725)">
+ <title>Sheet.124</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape125-367" v:mID="125" v:groupContext="shape" transform="translate(679.141,-237.17)">
+ <title>Sheet.125</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape126-371" v:mID="126" v:groupContext="shape" transform="translate(706.849,-237.17)">
+ <title>Sheet.126</title>
+ <desc>0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0</text> </g>
+ <g id="shape127-375" v:mID="127" v:groupContext="shape" transform="translate(678.901,-187.474)">
+ <title>Sheet.127</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape128-379" v:mID="128" v:groupContext="shape" transform="translate(304.943,-21.841)">
+ <title>Sheet.128</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape129-381" v:mID="129" v:groupContext="shape" transform="translate(304.943,-21.841)">
+ <title>Sheet.129</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape130-384" v:mID="130" v:groupContext="shape" transform="translate(307.855,-72.2917)">
+ <title>Sheet.130</title>
+ <desc>64</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>64</text> </g>
+ <g id="shape131-388" v:mID="131" v:groupContext="shape" transform="translate(330.041,-72.2917)">
+ <title>Sheet.131</title>
+ <desc>96</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>96</text> </g>
+ <g id="shape132-392" v:mID="132" v:groupContext="shape" transform="translate(307.616,-22.5952)">
+ <title>Sheet.132</title>
+ <desc>7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>7</text> </g>
+ <g id="shape133-396" v:mID="133" v:groupContext="shape" transform="translate(428.72,-77.4413)">
+ <title>Sheet.133</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape134-398" v:mID="134" v:groupContext="shape" transform="translate(428.72,-77.4413)">
+ <title>Sheet.134</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape135-401" v:mID="135" v:groupContext="shape" transform="translate(431.648,-127.825)">
+ <title>Sheet.135</title>
+ <desc>6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>6</text> </g>
+ <g id="shape136-405" v:mID="136" v:groupContext="shape" transform="translate(453.834,-127.825)">
+ <title>Sheet.136</title>
+ <desc>98</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>98</text> </g>
+ <g id="shape137-409" v:mID="137" v:groupContext="shape" transform="translate(431.409,-78.1289)">
+ <title>Sheet.137</title>
+ <desc>5</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>5</text> </g>
+ <g id="shape138-413" v:mID="138" v:groupContext="shape" transform="translate(593.796,-149.818)">
+ <title>Sheet.138</title>
+ <path d="M0 313.9 L0 379.8 L41.18 379.8 L41.18 313.9 L0 313.9 L0 313.9 Z" class="st4"/>
+ </g>
+ <g id="shape139-415" v:mID="139" v:groupContext="shape" transform="translate(593.796,-149.818)">
+ <title>Sheet.139</title>
+ <path d="M0 313.9 L41.18 313.9 L41.18 379.8 L0 379.8 L0 313.9" class="st5"/>
+ </g>
+ <g id="shape140-418" v:mID="140" v:groupContext="shape" transform="translate(596.718,-200.312)">
+ <title>Sheet.140</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape141-422" v:mID="141" v:groupContext="shape" transform="translate(618.904,-200.312)">
+ <title>Sheet.141</title>
+ <desc>99</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>99</text> </g>
+ <g id="shape142-426" v:mID="142" v:groupContext="shape" transform="translate(596.478,-150.615)">
+ <title>Sheet.142</title>
+ <desc>9</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>9</text> </g>
+ <g id="shape143-430" v:mID="143" v:groupContext="shape" transform="translate(387.181,-58.0291)">
+ <title>Sheet.143</title>
+ <path d="M0 314.02 L0 379.8 L41.18 379.8 L41.18 314.02 L0 314.02 L0 314.02 Z" class="st4"/>
+ </g>
+ <g id="shape144-432" v:mID="144" v:groupContext="shape" transform="translate(387.181,-58.0291)">
+ <title>Sheet.144</title>
+ <path d="M0 314.02 L41.18 314.02 L41.18 379.8 L0 379.8 L0 314.02" class="st5"/>
+ </g>
+ <g id="shape145-435" v:mID="145" v:groupContext="shape" transform="translate(390.133,-108.466)">
+ <title>Sheet.145</title>
+ <desc>7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>7</text> </g>
+ <g id="shape146-439" v:mID="146" v:groupContext="shape" transform="translate(412.318,-108.466)">
+ <title>Sheet.146</title>
+ <desc>97</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="7.95203" cy="373.835" width="15.91" height="11.9384"/>
+ <path d="M15.9 367.87 L0 367.87 L0 379.8 L15.9 379.8 L15.9 367.87" class="st2"/>
+ <text x="2.42" y="376.82" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>97</text> </g>
+ <g id="shape147-443" v:mID="147" v:groupContext="shape" transform="translate(389.893,-58.7692)">
+ <title>Sheet.147</title>
+ <desc>6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.77151" cy="373.835" width="9.55" height="11.9384"/>
+ <path d="M9.54 367.87 L0 367.87 L0 379.8 L9.54 379.8 L9.54 367.87" class="st2"/>
+ <text x="2.01" y="376.82" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>6</text> </g>
+ <g id="shape148-447" v:mID="148" v:groupContext="shape" transform="translate(31.8163,-277.674)">
+ <title>Sheet.148</title>
+ <path d="M0 362.83 C0 360.95 1.52 359.43 3.41 359.43 L68.51 359.43 C70.4 359.43 71.91 360.95 71.91 362.83 L71.91 376.41
+ C71.91 378.28 70.4 379.8 68.51 379.8 L3.41 379.8 C1.52 379.8 0 378.28 0 376.41 L0 362.83 Z" class="st4"/>
+ </g>
+ <g id="shape149-449" v:mID="149" v:groupContext="shape" transform="translate(31.8163,-277.674)">
+ <title>Sheet.149</title>
+ <path d="M0 362.83 C0 360.95 1.52 359.43 3.41 359.43 L68.51 359.43 C70.4 359.43 71.91 360.95 71.91 362.83 L71.91 376.41
+ C71.91 378.28 70.4 379.8 68.51 379.8 L3.41 379.8 C1.52 379.8 0 378.28 0 376.41 L0 362.83 Z" class="st21"/>
+ </g>
+ <g id="shape150-451" v:mID="150" v:groupContext="shape" transform="translate(36,-278.851)">
+ <title>Sheet.150</title>
+ <desc>Insert key</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="35.613" cy="372.612" width="71.23" height="14.3829"/>
+ <path d="M71.23 365.42 L0 365.42 L0 379.8 L71.23 379.8 L71.23 365.42" class="st2"/>
+ <text x="9.64" y="376.21" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Insert key </text> </g>
+ <g id="shape151-455" v:mID="151" v:groupContext="shape" transform="translate(47.7236,-257.004)">
+ <title>Sheet.151</title>
+ <path d="M0 369.44 L9.81 369.44 L9.81 359.07 L29.44 359.07 L29.44 369.44 L39.26 369.44 L19.63 379.8 L0 369.44 Z"
+ class="st23"/>
+ </g>
+ <g id="shape152-457" v:mID="152" v:groupContext="shape" transform="translate(31.8163,-236.094)">
+ <title>Sheet.152</title>
+ <path d="M0 362.73 C0 360.85 1.54 359.31 3.42 359.31 L68.49 359.31 C70.38 359.31 71.91 360.85 71.91 362.73 L71.91 376.39
+ C71.91 378.28 70.38 379.8 68.49 379.8 L3.42 379.8 C1.54 379.8 0 378.28 0 376.39 L0 362.73 Z" class="st4"/>
+ </g>
+ <g id="shape153-459" v:mID="153" v:groupContext="shape" transform="translate(31.8163,-236.094)">
+ <title>Sheet.153</title>
+ <path d="M0 362.73 C0 360.85 1.54 359.31 3.42 359.31 L68.49 359.31 C70.38 359.31 71.91 360.85 71.91 362.73 L71.91 376.39
+ C71.91 378.28 70.38 379.8 68.49 379.8 L3.42 379.8 C1.54 379.8 0 378.28 0 376.39 L0 362.73 Z" class="st21"/>
+ </g>
+ <g id="shape154-461" v:mID="154" v:groupContext="shape" transform="translate(54.6845,-237.332)">
+ <title>Sheet.154</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.8573" cy="372.612" width="33.72" height="14.3829"/>
+ <path d="M33.71 365.42 L0 365.42 L0 379.8 L33.71 379.8 L33.71 365.42" class="st2"/>
+ <text x="3.86" y="376.21" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape155-465" v:mID="155" v:groupContext="shape" transform="translate(23.0522,-195.232)">
+ <title>Sheet.155</title>
+ <path d="M0 363.53 C0 361.73 1.46 360.27 3.26 360.27 L87.15 360.27 C88.95 360.27 90.4 361.73 90.4 363.53 L90.4 376.55
+ C90.4 378.35 88.95 379.8 87.15 379.8 L3.26 379.8 C1.46 379.8 0 378.35 0 376.55 L0 363.53 Z" class="st4"/>
+ </g>
+ <g id="shape156-467" v:mID="156" v:groupContext="shape" transform="translate(23.0522,-195.232)">
+ <title>Sheet.156</title>
+ <path d="M0 363.53 C0 361.73 1.46 360.27 3.26 360.27 L87.15 360.27 C88.95 360.27 90.4 361.73 90.4 363.53 L90.4 376.55
+ C90.4 378.35 88.95 379.8 87.15 379.8 L3.26 379.8 C1.46 379.8 0 378.35 0 376.55 L0 363.53 Z" class="st21"/>
+ </g>
+ <g id="shape157-469" v:mID="157" v:groupContext="shape" transform="translate(27,-196.017)">
+ <title>Sheet.157</title>
+ <desc>0x0102ABCD</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.6644" cy="372.612" width="87.33" height="14.3829"/>
+ <path d="M87.33 365.42 L0 365.42 L0 379.8 L87.33 379.8 L87.33 365.42" class="st2"/>
+ <text x="7.36" y="376.21" class="st22" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102ABCD</text> </g>
+ <g id="shape158-473" v:mID="158" v:groupContext="shape" transform="translate(47.7236,-214.824)">
+ <title>Sheet.158</title>
+ <path d="M0 369.5 L9.81 369.5 L9.81 359.19 L29.44 359.19 L29.44 369.5 L39.26 369.5 L19.63 379.8 L0 369.5 Z"
+ class="st23"/>
+ </g>
+ <g id="shape159-475" v:mID="159" v:groupContext="shape" transform="translate(49.8539,-181.212)">
+ <title>Sheet.159</title>
+ <path d="M11.89 368.42 C11.89 371.57 11.47 374.11 10.94 374.11 L6.9 374.11 C6.37 374.11 5.94 376.67 5.94 379.8 C5.94
+ 376.67 5.52 374.11 5 374.11 L0.95 374.11 C0.43 374.11 0 371.57 0 368.42" class="st24"/>
+ </g>
+ <g id="shape160-478" v:mID="160" v:groupContext="shape" transform="translate(64.2606,-180.973)">
+ <title>Sheet.160</title>
+ <path d="M9.54 368.54 C9.54 371.66 9.21 374.17 8.79 374.17 L5.53 374.17 C5.11 374.17 4.77 376.7 4.77 379.8 C4.77 376.7
+ 4.43 374.17 4.02 374.17 L0.76 374.17 C0.34 374.17 0 371.66 0 368.54" class="st24"/>
+ </g>
+ <g id="shape161-481" v:mID="161" v:groupContext="shape" transform="translate(18.19,-60.9649)">
+ <title>Sheet.161</title>
+ <path d="M0 354.74 C0 351.97 2.25 349.73 5.03 349.73 L10.77 349.73 L30.27 267.14 L26.92 349.73 L59.58 349.73 C62.35 349.73
+ 64.59 351.97 64.59 354.74 L64.59 354.74 L64.59 362.26 L64.59 374.8 C64.59 377.57 62.35 379.8 59.58 379.8
+ L26.92 379.8 L10.77 379.8 L10.77 379.8 L5.03 379.8 C2.25 379.8 0 377.57 0 374.8 L0 362.26 L0 354.74 L0 354.74
+ Z" class="st23"/>
+ </g>
+ <g id="shape162-483" v:mID="162" v:groupContext="shape" transform="translate(28.141,-66.9569)">
+ <title>Sheet.162</title>
+ <desc>chunk id</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="27.5794" cy="372.612" width="55.16" height="14.3829"/>
+ <path d="M55.16 365.42 L0 365.42 L0 379.8 L55.16 379.8 L55.16 365.42" class="st2"/>
+ <text x="5.26" y="376.21" class="st25" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>chunk id</text> </g>
+ <g id="shape163-487" v:mID="163" v:groupContext="shape" transform="translate(50.8451,-112.132)">
+ <title>Sheet.163</title>
+ <path d="M0 354.64 C0 351.87 2.27 349.61 5.04 349.61 L10.74 349.61 L16.27 313.66 L26.86 349.61 L59.43 349.61 C62.22 349.61
+ 64.47 351.87 64.47 354.64 L64.47 354.64 L64.47 362.19 L64.47 374.77 C64.47 377.56 62.22 379.8 59.43 379.8
+ L26.86 379.8 L10.74 379.8 L10.74 379.8 L5.04 379.8 C2.27 379.8 0 377.56 0 374.77 L0 362.19 L0 354.64 L0
+ 354.64 Z" class="st23"/>
+ </g>
+ <g id="shape164-489" v:mID="164" v:groupContext="shape" transform="translate(68.8168,-118.181)">
+ <title>Sheet.164</title>
+ <desc>bin id</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="18.3881" cy="372.612" width="36.78" height="14.3829"/>
+ <path d="M36.78 365.42 L0 365.42 L0 379.8 L36.78 379.8 L36.78 365.42" class="st2"/>
+ <text x="4.06" y="376.21" class="st25" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bin id</text> </g>
+ <g id="shape165-493" v:mID="165" v:groupContext="shape" transform="translate(113.454,-225.085)">
+ <title>Sheet.165</title>
+ <path d="M0.01 375.68 L13.23 375.73 L13.22 377.77 L0 377.72 L0.01 375.68 L0.01 375.68 ZM12.22 373.69 L18.33 376.76 L12.2
+ 379.8 L12.22 373.69 L12.22 373.69 Z" class="st26"/>
+ </g>
+ <g id="shape166-495" v:mID="166" v:groupContext="shape" transform="translate(200.975,-280.969)">
+ <title>Sheet.166</title>
+ <path d="M0 375.73 L20.11 375.73 L20.11 377.77 L0 377.77 L0 375.73 L0 375.73 ZM19.09 373.69 L25.21 376.75 L19.09 379.8
+ L19.09 373.69 L19.09 373.69 Z" class="st26"/>
+ </g>
+ <g id="shape167-497" v:mID="167" v:groupContext="shape" transform="translate(275.739,-179.745)">
+ <title>Sheet.167</title>
+ <path d="M0.81 274.59 L231.38 376.48 L230.54 378.37 L0 276.48 L0.81 274.59 L0.81 274.59 ZM231.26 374.2 L235.64 379.47
+ L228.8 379.8 L231.26 374.2 L231.26 374.2 Z" class="st27"/>
+ </g>
+ <g id="shape168-499" v:mID="168" v:groupContext="shape" transform="translate(521.823,-96.8834)">
+ <title>Sheet.168</title>
+ <path d="M127.17 309.02 L127.17 378.79 C127.17 379.35 126.72 379.8 126.15 379.8 L3.06 379.8 C2.52 379.8 2.04 379.35 2.04
+ 378.79 L2.04 369.59 L4.08 369.59 L4.08 378.79 L3.06 377.77 L126.15 377.77 L125.13 378.79 L125.13 309.02
+ L127.17 309.02 ZM0 370.61 L3.06 364.5 L6.12 370.61 L0 370.61 Z" class="st28"/>
+ </g>
+ <g id="shape169-501" v:mID="169" v:groupContext="shape" transform="translate(478.603,-39.7553)">
+ <title>Sheet.169</title>
+ <path d="M0 347.57 C0 344.01 2.91 341.1 6.48 341.1 L237.86 341.1 C241.43 341.1 244.31 344.01 244.31 347.57 L244.31 373.36
+ C244.31 376.93 241.43 379.8 237.86 379.8 L6.48 379.8 C2.91 379.8 0 376.93 0 373.36 L0 347.57 Z"
+ class="st23"/>
+ </g>
+ <g id="shape170-503" v:mID="170" v:groupContext="shape" transform="translate(487.717,-45.5378)">
+ <title>Sheet.170</title>
+ <desc>Move bin from group 1 to 4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="126.387" cy="369.018" width="252.78" height="21.5726"/>
+ <path d="M252.77 358.23 L0 358.23 L0 379.8 L252.77 379.8 L252.77 358.23" class="st2"/>
+ <text x="18.98" y="374.41" class="st29" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Move bin from group 1 to 4</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i2.svg b/doc/guides/prog_guide/img/efd_i2.svg
new file mode 100644
index 00000000..a5f43f94
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i2.svg
@@ -0,0 +1,280 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i2.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="2.85156in" height="2.98777in"
+ viewBox="0 0 205.313 215.12" xml:space="preserve" color-interpolation-filters="sRGB" class="st18">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#deebf6;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st5 {fill:#ff0000;stroke:#c7c8c8;stroke-width:0.25}
+ .st6 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st7 {fill:none;stroke:#0070c0;stroke-width:1.5}
+ .st8 {marker-end:url(#mrkr5-91);stroke:#0070c0;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st9 {fill:#0070c0;fill-opacity:1;stroke:#0070c0;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st10 {fill:none;stroke:none;stroke-width:0.25}
+ .st11 {fill:#ff0000;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st12 {font-size:1em}
+ .st13 {marker-end:url(#mrkr5-101);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st14 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st15 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st16 {marker-end:url(#mrkr5-110);stroke:#41719c;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st17 {fill:#41719c;fill-opacity:1;stroke:#41719c;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st18 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-91" class="st9" v:arrowType="5" v:arrowSize="2" v:setback="4.45" refX="-4.45" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ <marker id="mrkr5-101" class="st14" v:arrowType="5" v:arrowSize="2" v:setback="6.16" refX="-6.16" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-110" class="st17" v:arrowType="5" v:arrowSize="2" v:setback="6.16" refX="-6.16" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape2-1" v:mID="2" v:groupContext="shape" transform="translate(24.4044,-42.7174)">
+ <title>Circle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow2-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 138.62 A76.5 76.5 0 0 1 153 138.62 A76.5 76.5 0 1 1 0 138.62 Z" class="st2"/>
+ </g>
+ <path d="M0 138.62 A76.5 76.5 0 0 1 153 138.62 A76.5 76.5 0 1 1 0 138.62 Z" class="st3"/>
+ </g>
+ <g id="shape3-6" v:mID="3" v:groupContext="shape" transform="translate(24.4044,-144.53)">
+ <title>Circle.3</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow3-7" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape4-11" v:mID="4" v:groupContext="shape" transform="translate(21.0294,-102.342)">
+ <title>Circle.4</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow4-12" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape5-16" v:mID="5" v:groupContext="shape" transform="translate(69.4044,-183.342)">
+ <title>Circle.5</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow5-17" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape6-21" v:mID="6" v:groupContext="shape" transform="translate(117.217,-183.342)">
+ <title>Circle.6</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow6-22" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st5"/>
+ </g>
+ <g id="shape7-26" v:mID="7" v:groupContext="shape" transform="translate(171.217,-104.03)">
+ <title>Circle.7</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow7-27" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st5"/>
+ </g>
+ <g id="shape8-31" v:mID="8" v:groupContext="shape" transform="translate(109.904,-38.2174)">
+ <title>Circle.8</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow8-32" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st5"/>
+ </g>
+ <g id="shape9-36" v:mID="9" v:groupContext="shape" transform="translate(21.0294,-124.842)">
+ <title>Circle.9</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow9-37" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st5"/>
+ </g>
+ <g id="shape10-41" v:mID="10" v:groupContext="shape" transform="translate(147.029,-168.717)">
+ <title>Circle.10</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow10-42" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape11-46" v:mID="11" v:groupContext="shape" transform="translate(138.029,-48.3424)">
+ <title>Circle.11</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-47" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape12-51" v:mID="12" v:groupContext="shape" transform="translate(160.529,-74.2174)">
+ <title>Circle.12</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow12-52" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape13-56" v:mID="13" v:groupContext="shape" transform="translate(40.7169,-57.3424)">
+ <title>Circle.13</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow13-57" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape14-61" v:mID="14" v:groupContext="shape" transform="translate(42.4044,-168.717)">
+ <title>Circle.14</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow14-62" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape15-66" v:mID="15" v:groupContext="shape" transform="translate(66.0294,-42.7174)">
+ <title>Circle.15</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow15-67" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape16-71" v:mID="16" v:groupContext="shape" transform="translate(25.5294,-79.8424)">
+ <title>Circle.16</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow16-72" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape17-76" v:mID="17" v:groupContext="shape" transform="translate(165.029,-143.405)">
+ <title>Circle.17</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow17-77" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st2"/>
+ </g>
+ <path d="M0 208.93 A6.1875 6.1875 0 1 1 12.37 208.93 A6.1875 6.1875 0 0 1 0 208.93 Z" class="st4"/>
+ </g>
+ <g id="shape18-81" v:mID="18" v:groupContext="shape" transform="translate(276.618,4.50201) rotate(45)">
+ <title>Ellipse</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow18-82" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,1.63935,1.1506)" class="st1">
+ <path d="M0 187.01 A14.7383 28.1086 0 1 1 29.48 187.01 A14.7383 28.1086 0 1 1 0 187.01 Z" class="st6"/>
+ </g>
+ <path d="M0 187.01 A14.7383 28.1086 0 1 1 29.48 187.01 A14.7383 28.1086 0 1 1 0 187.01 Z" class="st7"/>
+ </g>
+ <g id="shape19-86" v:mID="19" v:groupContext="shape" transform="translate(251.273,355.436) rotate(156.038)">
+ <title>Sheet.19</title>
+ <path d="M-0 215.12 A73.4538 31.2572 85.43 0 1 40.92 208.96 L41.1 209.27" class="st8"/>
+ </g>
+ <g id="shape20-92" v:mID="20" v:groupContext="shape" transform="translate(62.705,-78.7174)">
+ <title>Sheet.20</title>
+ <desc>Target Hashed Value</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="42.6994" cy="203.87" width="85.4" height="22.5"/>
+ <rect x="0" y="192.62" width="85.3987" height="22.5" class="st10"/>
+ <text x="6.73" y="200.27" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target Hashed <tspan
+ x="28.48" dy="1.2em" class="st12">Value</tspan></text> </g>
+ <g id="shape21-96" v:mID="21" v:groupContext="shape" transform="translate(314.101,88.728) rotate(75.9638)">
+ <title>Sheet.21</title>
+ <path d="M0 215.12 L16.92 215.12" class="st13"/>
+ </g>
+ <g id="shape23-102" v:mID="23" v:groupContext="shape" transform="translate(60.4044,-138.342)">
+ <title>Sheet.23</title>
+ <desc>Keys</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="24.75" cy="203.87" width="49.5" height="22.5"/>
+ <rect x="0" y="192.62" width="49.5" height="22.5" class="st10"/>
+ <text x="13.21" y="207.47" class="st15" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Keys</text> </g>
+ <g id="shape24-105" v:mID="24" v:groupContext="shape" transform="translate(-125.293,114.034) rotate(-104.574)">
+ <title>Sheet.24</title>
+ <path d="M0 215.12 L22.9 215.12" class="st16"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i3.svg b/doc/guides/prog_guide/img/efd_i3.svg
new file mode 100644
index 00000000..ae229037
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i3.svg
@@ -0,0 +1,634 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i3.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="6.56036in" height="5.44284in"
+ viewBox="0 0 472.346 391.884" xml:space="preserve" color-interpolation-filters="sRGB" class="st22">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {marker-end:url(#mrkr5-24);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st6 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st7 {fill:none;stroke:#2e75b5;stroke-width:1}
+ .st8 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em}
+ .st9 {font-size:1em}
+ .st10 {fill:none;stroke:none;stroke-width:1}
+ .st11 {fill:#feffff;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st12 {fill:#5b9bd5;fill-opacity:0.25;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.25}
+ .st13 {fill:#4f87bb;stroke:#40709c;stroke-width:0.75}
+ .st14 {fill:#feffff;font-family:Calibri;font-size:0.75em}
+ .st15 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st16 {fill:none;stroke:#2e75b5;stroke-width:2.25}
+ .st17 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st18 {fill:#305497;stroke:#2e75b5;stroke-width:1}
+ .st19 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:none}
+ .st20 {fill:#92d050;fill-opacity:0.3;stroke:none;stroke-width:0.25}
+ .st21 {fill:#feffff;font-family:Calibri;font-size:1.16666em}
+ .st22 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-24" class="st6" v:arrowType="5" v:arrowSize="2" v:setback="6.16" refX="-6.16" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <v:layer v:name="Connector" v:index="0"/>
+ <g id="shape2-1" v:mID="2" v:groupContext="shape" transform="translate(111.25,-354.482)">
+ <title>Rectangle</title>
+ <desc>Packet Header</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="42.75" cy="382.884" width="85.5" height="18"/>
+ <g id="shadow2-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="85.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="85.5" height="18" class="st3"/>
+ <text x="13.24" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Packet Header</text> </g>
+ <g id="shape3-7" v:mID="3" v:groupContext="shape" transform="translate(192.25,-354.482)">
+ <title>Rectangle.3</title>
+ <desc>Payload</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="382.884" width="108" height="18"/>
+ <g id="shadow3-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="108" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="108" height="18" class="st3"/>
+ <text x="37.95" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Payload</text> </g>
+ <g id="shape4-13" v:mID="4" v:groupContext="shape" transform="translate(136,-311.232)">
+ <title>Rectangle.4</title>
+ <desc>Flow Key</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27" cy="382.884" width="54" height="18"/>
+ <g id="shadow4-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="54" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="54" height="18" class="st3"/>
+ <text x="8.87" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Key</text> </g>
+ <g id="shape5-19" v:mID="5" v:groupContext="shape" transform="translate(465.501,-160.057) rotate(59.7436)">
+ <title>Sheet.5</title>
+ <path d="M0 391.88 L25.1 391.88" class="st5"/>
+ </g>
+ <g id="shape8-25" v:mID="8" v:groupContext="shape" transform="translate(219.25,-320.169)">
+ <title>Sheet.8</title>
+ <desc>Fields of the packet are used to form a flow Key</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="377.822" width="135" height="28.125"/>
+ <rect x="0" y="363.759" width="135" height="28.125" class="st7"/>
+ <text x="10.7" y="374.22" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Fields of the packet are <tspan
+ x="9.67" dy="1.2em" class="st9">used to form a flow Key</tspan></text> </g>
+ <g id="group13-29" transform="translate(120.25,-266.897)" v:mID="13" v:groupContext="group">
+ <title>Sheet.13</title>
+ <g id="shape11-30" v:mID="11" v:groupContext="shape" transform="translate(85.5,751.143) rotate(180)">
+ <title>Trapezoid</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-31" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,-0.345598,-1.97279)" class="st1">
+ <path d="M0 391.88 L85.5 391.88 L60.19 359.26 L25.31 359.26 L0 391.88 Z" class="st2"/>
+ </g>
+ <path d="M0 391.88 L85.5 391.88 L60.19 359.26 L25.31 359.26 L0 391.88 Z" class="st3"/>
+ </g>
+ <g id="shape12-35" v:mID="12" v:groupContext="shape" transform="translate(13.5,-6.525)">
+ <title>Sheet.12</title>
+ <desc>H(..)</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27" cy="381.689" width="54" height="20.3906"/>
+ <rect x="0" y="371.494" width="54" height="20.3906" class="st10"/>
+ <text x="16.27" y="385.29" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H(..)</text> </g>
+ </g>
+ <g id="shape14-38" v:mID="14" v:groupContext="shape" transform="translate(-229.872,96.3648) rotate(-90.0429)">
+ <title>Simple Arrow</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <g id="shadow14-39" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,-1.97305,0.344122)" class="st1">
+ <path d="M0 391.88 L10.18 387.38 L10.18 389.63 L16.71 389.63 L16.71 391.88 L16.71 394.13 L10.18 394.13 L10.18 396.38
+ L0 391.88 Z" class="st12"/>
+ </g>
+ <path d="M0 391.88 L10.18 387.38 L10.18 389.63 L16.71 389.63 L16.71 391.88 L16.71 394.13 L10.18 394.13 L10.18 396.38
+ L0 391.88 Z" class="st13"/>
+ </g>
+ <g id="shape15-43" v:mID="15" v:groupContext="shape" transform="translate(212.5,-271.46)">
+ <title>Sheet.15</title>
+ <desc>Hash function is used to create a flow table index</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="377.822" width="135" height="28.125"/>
+ <rect x="0" y="363.759" width="135" height="28.125" class="st7"/>
+ <text x="9.05" y="374.22" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Hash function is used to <tspan
+ x="7.39" dy="1.2em" class="st9">create a flow table index</tspan></text> </g>
+ <g id="shape58-47" v:mID="58" v:groupContext="shape" transform="translate(199,-221.397)">
+ <title>Rectangle.58</title>
+ <desc>Key 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow58-48" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1</text> </g>
+ <g id="shape59-53" v:mID="59" v:groupContext="shape" transform="translate(232.75,-221.397)">
+ <title>Rectangle.59</title>
+ <desc>Action 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow59-54" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 1</text> </g>
+ <g id="shape60-59" v:mID="60" v:groupContext="shape" transform="translate(280,-221.397)">
+ <title>Rectangle.60</title>
+ <desc>Key 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow60-60" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 2</text> </g>
+ <g id="shape61-65" v:mID="61" v:groupContext="shape" transform="translate(313.75,-221.397)">
+ <title>Rectangle.61</title>
+ <desc>Action 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow61-66" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 2</text> </g>
+ <g id="shape62-71" v:mID="62" v:groupContext="shape" transform="translate(361,-221.397)">
+ <title>Rectangle.62</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow62-72" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape63-76" v:mID="63" v:groupContext="shape" transform="translate(394.75,-221.397)">
+ <title>Rectangle.63</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow63-77" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape64-81" v:mID="64" v:groupContext="shape" transform="translate(199,-198.897)">
+ <title>Rectangle.64</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow64-82" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape65-86" v:mID="65" v:groupContext="shape" transform="translate(232.75,-198.897)">
+ <title>Rectangle.65</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow65-87" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape66-91" v:mID="66" v:groupContext="shape" transform="translate(280,-198.897)">
+ <title>Rectangle.66</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow66-92" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape67-96" v:mID="67" v:groupContext="shape" transform="translate(313.75,-198.897)">
+ <title>Rectangle.67</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow67-97" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape68-101" v:mID="68" v:groupContext="shape" transform="translate(361,-198.897)">
+ <title>Rectangle.68</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow68-102" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape69-106" v:mID="69" v:groupContext="shape" transform="translate(394.75,-198.897)">
+ <title>Rectangle.69</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow69-107" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape70-111" v:mID="70" v:groupContext="shape" transform="translate(199,-162.897)">
+ <title>Rectangle.70</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow70-112" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape71-117" v:mID="71" v:groupContext="shape" transform="translate(232.75,-162.897)">
+ <title>Rectangle.71</title>
+ <desc>Action x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow71-118" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="4.99" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action x</text> </g>
+ <g id="shape72-123" v:mID="72" v:groupContext="shape" transform="translate(280,-162.897)">
+ <title>Rectangle.72</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow72-124" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape73-129" v:mID="73" v:groupContext="shape" transform="translate(313.75,-162.897)">
+ <title>Rectangle.73</title>
+ <desc>Action y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow73-130" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="4.89" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action y</text> </g>
+ <g id="shape74-135" v:mID="74" v:groupContext="shape" transform="translate(361,-162.897)">
+ <title>Rectangle.74</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow74-136" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="shape75-141" v:mID="75" v:groupContext="shape" transform="translate(394.75,-162.897)">
+ <title>Rectangle.75</title>
+ <desc>Action z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow75-142" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="5.18" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action z</text> </g>
+ <g id="shape76-147" v:mID="76" v:groupContext="shape" transform="translate(199,-126.397)">
+ <title>Rectangle.76</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow76-148" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape77-152" v:mID="77" v:groupContext="shape" transform="translate(232.75,-126.397)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow77-153" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape78-157" v:mID="78" v:groupContext="shape" transform="translate(280,-126.397)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow78-158" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape79-162" v:mID="79" v:groupContext="shape" transform="translate(313.75,-126.397)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow79-163" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape80-167" v:mID="80" v:groupContext="shape" transform="translate(361,-126.397)">
+ <title>Rectangle.80</title>
+ <desc>Key N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow80-168" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.21" y="385.58" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key N</text> </g>
+ <g id="shape81-173" v:mID="81" v:groupContext="shape" transform="translate(394.75,-126.397)">
+ <title>Rectangle.81</title>
+ <desc>Action N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="382.884" width="42.75" height="18"/>
+ <g id="shadow81-174" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="42.75" height="18" class="st3"/>
+ <text x="5.67" y="385.58" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action N</text> </g>
+ <g id="shape82-179" v:mID="82" v:groupContext="shape" transform="translate(196.75,-117.397)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow82-180" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="256.384" width="245.25" height="135.5" class="st15"/>
+ </g>
+ <rect x="0" y="256.384" width="245.25" height="135.5" class="st16"/>
+ </g>
+ <g id="shape83-184" v:mID="83" v:groupContext="shape" transform="translate(554.884,123.862) rotate(90)">
+ <title>Sheet.83</title>
+ <path d="M0 391.88 L99 391.88" class="st17"/>
+ </g>
+ <g id="shape84-187" v:mID="84" v:groupContext="shape" transform="translate(208,-248.397)">
+ <title>Sheet.84</title>
+ <desc>Load Balancing Flow Table</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="91.75" cy="386.259" width="183.5" height="11.25"/>
+ <rect x="0" y="380.634" width="183.5" height="11.25" class="st18"/>
+ <text x="26.14" y="389.86" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Load Balancing Flow Table</text> </g>
+ <g id="shape85-190" v:mID="85" v:groupContext="shape" transform="translate(190,-157.835)">
+ <title>Rectangle.85</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow85-191" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="363.759" width="261" height="28.125" class="st19"/>
+ </g>
+ <rect x="0" y="363.759" width="261" height="28.125" class="st20"/>
+ </g>
+ <g id="shape86-195" v:mID="86" v:groupContext="shape" transform="translate(163,-169.022)">
+ <title>Sheet.86</title>
+ <path d="M0 391.88 L18.76 391.88" class="st5"/>
+ </g>
+ <g id="shape87-200" v:mID="87" v:groupContext="shape" transform="translate(19,-198.107)">
+ <title>Sheet.87</title>
+ <desc>Hash value used to index Flow table</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="377.822" width="135" height="28.125"/>
+ <rect x="0" y="363.759" width="135" height="28.125" class="st7"/>
+ <text x="6.79" y="374.22" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Hash value used to index <tspan
+ x="42.16" dy="1.2em" class="st9">Flow table</tspan></text> </g>
+ <g id="shape88-204" v:mID="88" v:groupContext="shape" transform="translate(551.381,21.2928) rotate(87.9001)">
+ <title>Sheet.88</title>
+ <path d="M0 391.88 L20.86 391.88" class="st5"/>
+ </g>
+ <g id="shape89-209" v:mID="89" v:groupContext="shape" transform="translate(494.785,297.309) rotate(131.987)">
+ <title>Sheet.89</title>
+ <path d="M0 391.88 L30.84 391.88" class="st5"/>
+ </g>
+ <g id="shape90-214" v:mID="90" v:groupContext="shape" transform="translate(228.25,-92.5847)">
+ <title>Rectangle.90</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow90-215" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape91-220" v:mID="91" v:groupContext="shape" transform="translate(340.75,-92.5847)">
+ <title>Rectangle.91</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow91-221" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="group96-226" transform="translate(253,-51.4597)" v:mID="96" v:groupContext="group">
+ <title>Sheet.96</title>
+ <g id="shape97-227" v:mID="97" v:groupContext="shape" transform="translate(85.5,751.143) rotate(180)">
+ <title>Trapezoid</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow97-228" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,-0.345598,-1.97279)" class="st1">
+ <path d="M0 391.88 L85.5 391.88 L60.19 359.26 L25.31 359.26 L0 391.88 Z" class="st2"/>
+ </g>
+ <path d="M0 391.88 L85.5 391.88 L60.19 359.26 L25.31 359.26 L0 391.88 Z" class="st3"/>
+ </g>
+ <g id="shape98-232" v:mID="98" v:groupContext="shape" transform="translate(13.5,-6.525)">
+ <title>Sheet.98</title>
+ <desc>Match</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27" cy="381.689" width="54" height="20.3906"/>
+ <rect x="0" y="371.494" width="54" height="20.3906" class="st10"/>
+ <text x="10.98" y="385.29" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Match</text> </g>
+ </g>
+ <g id="shape99-235" v:mID="99" v:groupContext="shape" transform="translate(532.137,0.00916548) rotate(54.6508)">
+ <title>Sheet.99</title>
+ <path d="M0 391.88 L93.23 391.88" class="st5"/>
+ </g>
+ <g id="shape100-240" v:mID="100" v:groupContext="shape" transform="translate(683.134,224.487) rotate(90)">
+ <title>Sheet.100</title>
+ <path d="M0 391.88 L77.15 391.88" class="st5"/>
+ </g>
+ <g id="shape101-245" v:mID="101" v:groupContext="shape" transform="translate(692.213,476.024) rotate(129.078)">
+ <title>Sheet.101</title>
+ <path d="M0 391.88 L95.37 391.88" class="st5"/>
+ </g>
+ <g id="shape102-250" v:mID="102" v:groupContext="shape" transform="translate(293.5,-97.0847)">
+ <title>Rectangle.102</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="382.884" width="31.5" height="18"/>
+ <g id="shadow102-251" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape103-256" v:mID="103" v:groupContext="shape" transform="translate(169.75,-55.9597)">
+ <title>Rectangle.103</title>
+ <desc>Flow Key</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27" cy="382.884" width="54" height="18"/>
+ <g id="shadow103-257" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="54" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="54" height="18" class="st3"/>
+ <text x="8.87" y="385.88" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Flow Key</text> </g>
+ <g id="shape104-262" v:mID="104" v:groupContext="shape" transform="translate(226,-64.9597)">
+ <title>Sheet.104</title>
+ <path d="M0 391.88 L34.34 391.88" class="st5"/>
+ </g>
+ <g id="shape105-267" v:mID="105" v:groupContext="shape" transform="translate(54,-82.4597)">
+ <title>Sheet.105</title>
+ <desc>Retrieved keys are matched with input key</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="377.822" width="135" height="28.125"/>
+ <rect x="0" y="363.759" width="135" height="28.125" class="st7"/>
+ <text x="22.51" y="374.22" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Retrieved keys are <tspan
+ x="9.83" dy="1.2em" class="st9">matched with input key</tspan></text> </g>
+ <g id="shape106-271" v:mID="106" v:groupContext="shape" transform="translate(271,-23.9597)">
+ <title>Rectangle.106</title>
+ <desc>Action</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="27" cy="382.884" width="54" height="18"/>
+ <g id="shadow106-272" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="373.884" width="54" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="373.884" width="54" height="18" class="st3"/>
+ <text x="8.67" y="387.08" class="st21" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action</text> </g>
+ <g id="shape111-277" v:mID="111" v:groupContext="shape" transform="translate(-94.8716,350.902) rotate(-90.0429)">
+ <title>Simple Arrow.111</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <g id="shadow111-278" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,-1.97305,0.344122)" class="st1">
+ <path d="M0 391.88 L10.18 387.38 L10.18 389.63 L16.71 389.63 L16.71 391.88 L16.71 394.13 L10.18 394.13 L10.18 396.38
+ L0 391.88 Z" class="st12"/>
+ </g>
+ <path d="M0 391.88 L10.18 387.38 L10.18 389.63 L16.71 389.63 L16.71 391.88 L16.71 394.13 L10.18 394.13 L10.18 396.38
+ L0 391.88 Z" class="st13"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i4.svg b/doc/guides/prog_guide/img/efd_i4.svg
new file mode 100644
index 00000000..5be5ccd7
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i4.svg
@@ -0,0 +1,203 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i4.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="2.78993in" height="1.78151in"
+ viewBox="0 0 200.875 128.269" xml:space="preserve" color-interpolation-filters="sRGB" class="st19">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:none;stroke:none;stroke-width:0.25}
+ .st2 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em}
+ .st3 {font-size:1em}
+ .st4 {fill:#5b9bd5;font-family:Calibri;font-size:0.75em;font-weight:bold}
+ .st5 {fill:#deebf6;stroke:none;stroke-width:0.25}
+ .st6 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st7 {stroke:#5b9bd5;stroke-dasharray:0.75,1.5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st8 {fill:#ff0000;font-size:1em}
+ .st9 {baseline-shift:-28.8834%;font-size:0.577667em}
+ .st10 {fill:#ff0000;font-family:Calibri;font-size:0.75em}
+ .st11 {fill:#5b9bd5;font-size:1em}
+ .st12 {visibility:visible}
+ .st13 {fill:#5b9bd5;fill-opacity:0.25;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.25}
+ .st14 {fill:url(#grad0-73);stroke:#40709c;stroke-width:0.75}
+ .st15 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st16 {fill:#00fefe;font-size:1em}
+ .st17 {fill:#00b050}
+ .st18 {stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st19 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Patterns_And_Gradients">
+ <linearGradient id="grad0-73" x1="0" y1="0" x2="1" y2="0" gradientTransform="rotate(250 0.5 0.5)">
+ <stop offset="0" stop-color="#4f87bb" stop-opacity="1"/>
+ <stop offset="0.48" stop-color="#4f87bb" stop-opacity="1"/>
+ <stop offset="0.82" stop-color="#5b9bd5" stop-opacity="1"/>
+ </linearGradient>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape2-1" v:mID="2" v:groupContext="shape" transform="translate(18.25,-59.3478)">
+ <title>Sheet.2</title>
+ <desc>Key 1 Key 2 ... Key 28</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="18" cy="121.519" width="36" height="13.5"/>
+ <rect x="0" y="114.769" width="36" height="13.5" class="st1"/>
+ <text x="8.09" y="108.02" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1<v:newlineChar/><tspan
+ x="8.09" dy="1.2em" class="st3">Key </tspan>2<v:newlineChar/><tspan x="14.59" dy="1.2em" class="st3">...<v:newlineChar/></tspan><tspan
+ x="5.81" dy="1.2em" class="st3">Key </tspan>28</text> </g>
+ <g id="shape9-7" v:mID="9" v:groupContext="shape" transform="translate(52,-91.9728)">
+ <title>Sheet.9</title>
+ <desc>Target Value</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="17.4375" cy="122.644" width="34.88" height="11.25"/>
+ <rect x="0" y="117.019" width="34.875" height="11.25" class="st1"/>
+ <text x="5.43" y="119.94" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Target <tspan x="6.77"
+ dy="1.2em" class="st3">Value</tspan></text> </g>
+ <g id="shape11-11" v:mID="11" v:groupContext="shape" transform="translate(52,-42.4728)">
+ <title>Sheet.11</title>
+ <desc>0 1 0</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="17.4375" cy="105.769" width="34.88" height="45"/>
+ <rect x="0" y="83.2689" width="34.875" height="45" class="st5"/>
+ <text x="15.16" y="92.27" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0<v:newlineChar/><tspan
+ x="15.16" dy="1.2em" class="st3">1<v:newlineChar/><v:newlineChar/></tspan><tspan x="15.16" dy="2.4em"
+ class="st3">0</tspan></text> </g>
+ <g id="shape8-16" v:mID="8" v:groupContext="shape" transform="translate(180.269,21.6711) rotate(90)">
+ <title>Sheet.8</title>
+ <path d="M0 128.27 L69.75 128.27" class="st6"/>
+ </g>
+ <g id="shape10-19" v:mID="10" v:groupContext="shape" transform="translate(215.144,21.6711) rotate(90)">
+ <title>Sheet.10</title>
+ <path d="M0 128.27 L69.75 128.27" class="st6"/>
+ </g>
+ <g id="shape4-22" v:mID="4" v:groupContext="shape" transform="translate(22.75,-77.3478)">
+ <title>Sheet.4</title>
+ <path d="M0 128.27 L157.5 128.27" class="st7"/>
+ </g>
+ <g id="shape5-25" v:mID="5" v:groupContext="shape" transform="translate(23.875,-66.0978)">
+ <title>Sheet.5</title>
+ <path d="M0 128.27 L158.62 128.27" class="st7"/>
+ </g>
+ <g id="shape6-28" v:mID="6" v:groupContext="shape" transform="translate(22.75,-54.8478)">
+ <title>Sheet.6</title>
+ <path d="M0 128.27 L159.75 128.27" class="st7"/>
+ </g>
+ <g id="shape7-31" v:mID="7" v:groupContext="shape" transform="translate(22.75,-87.4728)">
+ <title>Sheet.7</title>
+ <path d="M0 128.27 L155.25 128.27" class="st6"/>
+ </g>
+ <g id="shape12-34" v:mID="12" v:groupContext="shape" transform="translate(91.9375,-42.4728)">
+ <title>Sheet.12</title>
+ <desc>0 0 0</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="8.4375" cy="105.769" width="16.88" height="45"/>
+ <rect x="0" y="83.2689" width="16.875" height="45" class="st1"/>
+ <text x="6.16" y="92.27" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0<v:newlineChar/><tspan
+ x="6.16" dy="1.2em" class="st8">0<v:newlineChar/><v:newlineChar/></tspan><tspan x="6.16" dy="2.4em"
+ class="st3">0</tspan></text> </g>
+ <g id="shape26-39" v:mID="26" v:groupContext="shape" transform="translate(86.875,-88.5978)">
+ <title>Sheet.26</title>
+ <desc>H1(x)</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.0625" cy="122.644" width="28.13" height="11.25"/>
+ <rect x="0" y="117.019" width="28.125" height="11.25" class="st1"/>
+ <text x="5.03" y="125.34" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan dy="-0.284em"
+ class="st9" v:baseFontSize="8">1</tspan><tspan dy="0.164em" class="st3">(</tspan>x)</text> </g>
+ <g id="shape27-44" v:mID="27" v:groupContext="shape" transform="translate(115,-42.4728)">
+ <title>Sheet.27</title>
+ <desc>1 1 0</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="8.4375" cy="105.769" width="16.88" height="45"/>
+ <rect x="0" y="83.2689" width="16.875" height="45" class="st1"/>
+ <text x="6.16" y="92.27" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1<v:newlineChar/><tspan
+ x="6.16" dy="1.2em" class="st11">1<v:newlineChar/><v:newlineChar/></tspan><tspan x="6.16" dy="2.4em"
+ class="st11">0</tspan></text> </g>
+ <g id="shape28-49" v:mID="28" v:groupContext="shape" transform="translate(109.938,-88.5978)">
+ <title>Sheet.28</title>
+ <desc>H2(x)</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.0625" cy="122.644" width="28.13" height="11.25"/>
+ <rect x="0" y="117.019" width="28.125" height="11.25" class="st1"/>
+ <text x="5.03" y="125.34" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan dy="-0.284em"
+ class="st9" v:baseFontSize="8">2</tspan><tspan dy="0.164em" class="st3">(</tspan>x)</text> </g>
+ <g id="shape29-54" v:mID="29" v:groupContext="shape" transform="translate(155.5,-42.4728)">
+ <title>Sheet.29</title>
+ <desc>0 1 0</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="8.4375" cy="105.769" width="16.88" height="45"/>
+ <rect x="0" y="83.2689" width="16.875" height="45" class="st1"/>
+ <text x="6.16" y="92.27" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0<v:newlineChar/><tspan
+ x="6.16" dy="1.2em" class="st3">1<v:newlineChar/><v:newlineChar/></tspan><tspan x="6.16" dy="2.4em"
+ class="st3">0</tspan></text> </g>
+ <g id="shape30-59" v:mID="30" v:groupContext="shape" transform="translate(150.438,-88.5978)">
+ <title>Sheet.30</title>
+ <desc>Hm(x)</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.0625" cy="122.644" width="28.13" height="11.25"/>
+ <rect x="0" y="117.019" width="28.125" height="11.25" class="st1"/>
+ <text x="4.24" y="125.34" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan dy="-0.284em"
+ class="st9" v:baseFontSize="8">m</tspan><tspan dy="0.164em" class="st3">(</tspan>x)</text> </g>
+ <g id="shape31-64" v:mID="31" v:groupContext="shape" transform="translate(130.188,-89.7228)">
+ <title>Sheet.31</title>
+ <desc>…..</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="14.0625" cy="122.644" width="28.13" height="11.25"/>
+ <rect x="0" y="117.019" width="28.125" height="11.25" class="st1"/>
+ <text x="8.46" y="125.34" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>…..</text> </g>
+ <g id="shape32-67" v:mID="32" v:groupContext="shape" transform="translate(34,-23.3478)">
+ <title>Sheet.32</title>
+ <desc>Store m for this group of keys</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="66.375" cy="122.644" width="132.75" height="11.25"/>
+ <g id="shadow32-68" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st12">
+ <rect x="0" y="117.019" width="132.75" height="11.25" class="st13"/>
+ </g>
+ <rect x="0" y="117.019" width="132.75" height="11.25" class="st14"/>
+ <text x="6.32" y="125.64" class="st15" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Store <tspan
+ class="st16">m</tspan> for this group of keys</text> </g>
+ <g id="shape36-76" v:mID="36" v:groupContext="shape" transform="translate(159.381,-100.964)">
+ <title>Sheet.36</title>
+ <path d="M3.45 125.81 L6.87 119.34 L7.99 120.16 L3.87 128.27 L0 124.35 L0.86 123.13 L3.45 125.81 Z" class="st17"/>
+ </g>
+ <g id="group44-79" transform="translate(97.5625,-100.086)" v:mID="44" v:groupContext="group">
+ <title>Sheet.44</title>
+ <g id="shape42-80" v:mID="42" v:groupContext="shape" transform="translate(85.4972,28.6255) rotate(41.8011)">
+ <title>Sheet.42</title>
+ <path d="M0 128.27 L6.04 128.27" class="st18"/>
+ </g>
+ <g id="shape43-83" v:mID="43" v:groupContext="shape" transform="translate(-87.9035,34.8564) rotate(-43.2597)">
+ <title>Sheet.43</title>
+ <path d="M0 128.27 L5.87 128.27" class="st18"/>
+ </g>
+ </g>
+ <g id="group45-86" transform="translate(120.625,-100.086)" v:mID="45" v:groupContext="group">
+ <title>Sheet.45</title>
+ <g id="shape46-87" v:mID="46" v:groupContext="shape" transform="translate(85.4972,28.6255) rotate(41.8011)">
+ <title>Sheet.46</title>
+ <path d="M0 128.27 L6.04 128.27" class="st18"/>
+ </g>
+ <g id="shape47-90" v:mID="47" v:groupContext="shape" transform="translate(-87.9035,34.8564) rotate(-43.2597)">
+ <title>Sheet.47</title>
+ <path d="M0 128.27 L5.87 128.27" class="st18"/>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i5.svg b/doc/guides/prog_guide/img/efd_i5.svg
new file mode 100644
index 00000000..b6540ba4
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i5.svg
@@ -0,0 +1,183 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i5.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="8.34375in" height="2.86443in"
+ viewBox="0 0 600.75 206.239" xml:space="preserve" color-interpolation-filters="sRGB" class="st14">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:1.5em}
+ .st5 {fill:#feffff;font-family:Calibri;font-size:1.16666em}
+ .st6 {marker-end:url(#mrkr5-36);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st7 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st8 {stroke:#5b9bd5;stroke-dasharray:1.5,3;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st9 {fill:none;stroke:none;stroke-width:0.25}
+ .st10 {fill:#5b9bd5;font-family:Calibri;font-size:1.5em;font-weight:bold}
+ .st11 {baseline-shift:-32.4951%;font-size:0.649902em}
+ .st12 {fill:#deebf6;stroke:#0070c0;stroke-width:1}
+ .st13 {fill:#5b9bd5;font-family:Calibri;font-size:1.5em}
+ .st14 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-36" class="st7" v:arrowType="5" v:arrowSize="2" v:setback="4.69" refX="-4.69" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape2-1" v:mID="2" v:groupContext="shape" transform="translate(93.0294,-158.5)">
+ <title>Rectangle</title>
+ <desc>All Keys</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="216" cy="192.739" width="432" height="27"/>
+ <g id="shadow2-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="179.239" width="432" height="27" class="st2"/>
+ </g>
+ <rect x="0" y="179.239" width="432" height="27" class="st3"/>
+ <text x="187.88" y="198.14" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>All Keys</text> </g>
+ <g id="shape3-7" v:mID="3" v:groupContext="shape" transform="translate(21.0294,-77.5)">
+ <title>Rectangle.3</title>
+ <desc>Group 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="188.239" width="108" height="36"/>
+ <g id="shadow3-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="170.239" width="108" height="36" class="st2"/>
+ </g>
+ <rect x="0" y="170.239" width="108" height="36" class="st3"/>
+ <text x="30.97" y="192.44" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group 1</text> </g>
+ <g id="shape4-13" v:mID="4" v:groupContext="shape" transform="translate(156.029,-77.5)">
+ <title>Rectangle.4</title>
+ <desc>Group 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="188.239" width="108" height="36"/>
+ <g id="shadow4-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="170.239" width="108" height="36" class="st2"/>
+ </g>
+ <rect x="0" y="170.239" width="108" height="36" class="st3"/>
+ <text x="30.97" y="192.44" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group 2</text> </g>
+ <g id="shape5-19" v:mID="5" v:groupContext="shape" transform="translate(291.029,-77.5)">
+ <title>Rectangle.5</title>
+ <desc>Group 3</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="188.239" width="108" height="36"/>
+ <g id="shadow5-20" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="170.239" width="108" height="36" class="st2"/>
+ </g>
+ <rect x="0" y="170.239" width="108" height="36" class="st3"/>
+ <text x="30.97" y="192.44" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group 3</text> </g>
+ <g id="shape6-25" v:mID="6" v:groupContext="shape" transform="translate(471.029,-77.5)">
+ <title>Rectangle.6</title>
+ <desc>Group X</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="188.239" width="108" height="36"/>
+ <g id="shadow6-26" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="170.239" width="108" height="36" class="st2"/>
+ </g>
+ <rect x="0" y="170.239" width="108" height="36" class="st3"/>
+ <text x="30.88" y="192.44" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group X</text> </g>
+ <g id="shape7-31" v:mID="7" v:groupContext="shape" transform="translate(359.05,247.819) rotate(165.964)">
+ <title>Sheet.7</title>
+ <path d="M0 206.24 L178.5 206.24" class="st6"/>
+ </g>
+ <g id="shape8-37" v:mID="8" v:groupContext="shape" transform="translate(428.903,215.562) rotate(144.462)">
+ <title>Sheet.8</title>
+ <path d="M0 206.24 L70.39 206.24" class="st6"/>
+ </g>
+ <g id="shape9-42" v:mID="9" v:groupContext="shape" transform="translate(470.075,-81.0976) rotate(51.3402)">
+ <title>Sheet.9</title>
+ <path d="M0 206.24 L50.59 206.24" class="st6"/>
+ </g>
+ <g id="shape10-47" v:mID="10" v:groupContext="shape" transform="translate(364.228,-150.976) rotate(15.5241)">
+ <title>Sheet.10</title>
+ <path d="M0 206.24 L161.1 206.24" class="st6"/>
+ </g>
+ <g id="shape11-52" v:mID="11" v:groupContext="shape" transform="translate(408.029,-95.5)">
+ <title>Sheet.11</title>
+ <path d="M0 206.24 L45 206.24" class="st8"/>
+ </g>
+ <g id="shape12-55" v:mID="12" v:groupContext="shape" transform="translate(48.0294,-50.5)">
+ <title>Sheet.12</title>
+ <desc>H7</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="192.739" width="45" height="27"/>
+ <rect x="0" y="179.239" width="45" height="27" class="st9"/>
+ <text x="13.86" y="198.14" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan
+ dy="-0.284em" class="st11" v:baseFontSize="18">7</tspan></text> </g>
+ <g id="shape13-59" v:mID="13" v:groupContext="shape" transform="translate(192.029,-50.5)">
+ <title>Sheet.13</title>
+ <desc>H267</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="192.739" width="45" height="27"/>
+ <rect x="0" y="179.239" width="45" height="27" class="st9"/>
+ <text x="7.93" y="198.14" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan dy="-0.284em"
+ class="st11" v:baseFontSize="18">267</tspan></text> </g>
+ <g id="shape14-63" v:mID="14" v:groupContext="shape" transform="translate(318.029,-50.5)">
+ <title>Sheet.14</title>
+ <desc>H46</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="192.739" width="45" height="27"/>
+ <rect x="0" y="179.239" width="45" height="27" class="st9"/>
+ <text x="10.89" y="198.14" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan
+ dy="-0.284em" class="st11" v:baseFontSize="18">46</tspan></text> </g>
+ <g id="shape15-67" v:mID="15" v:groupContext="shape" transform="translate(502.529,-50.5)">
+ <title>Sheet.15</title>
+ <desc>H132</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="22.5" cy="192.739" width="45" height="27"/>
+ <rect x="0" y="179.239" width="45" height="27" class="st9"/>
+ <text x="7.93" y="198.14" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>H<tspan dy="-0.284em"
+ class="st11" v:baseFontSize="18">132</tspan></text> </g>
+ <g id="shape16-71" v:mID="16" v:groupContext="shape" transform="translate(111.029,-19)">
+ <title>Sheet.16</title>
+ <desc>Store hash function index for each group of keys</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="189" cy="192.739" width="378" height="27"/>
+ <rect x="0" y="179.239" width="378" height="27" class="st12"/>
+ <text x="12.27" y="198.14" class="st13" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Store hash function index for each group of keys</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i6.svg b/doc/guides/prog_guide/img/efd_i6.svg
new file mode 100644
index 00000000..9aee30bc
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i6.svg
@@ -0,0 +1,1254 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i6.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="8.2496in" height="5.89673in"
+ viewBox="0 0 593.971 424.565" xml:space="preserve" color-interpolation-filters="sRGB" class="st27">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:#feffff;font-family:Calibri;font-size:0.75em}
+ .st6 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st7 {fill:none;stroke:#2e75b5;stroke-width:2.25}
+ .st8 {fill:#305497;stroke:#2e75b5;stroke-width:1}
+ .st9 {fill:#feffff;font-family:Calibri;font-size:0.833336em;font-weight:bold}
+ .st10 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2)}
+ .st11 {fill:#5b9bd5}
+ .st12 {stroke:#c7c8c8;stroke-width:0.25}
+ .st13 {fill:#acccea;stroke:#c7c8c8;stroke-width:0.25}
+ .st14 {fill:#feffff;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st15 {fill:#ed7d31;stroke:#c7c8c8;stroke-width:0.25}
+ .st16 {fill:#deebf6;stroke:#c7c8c8;stroke-width:0.25}
+ .st17 {marker-end:url(#mrkr5-212);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st18 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st19 {fill:none;stroke:#2e75b5;stroke-width:1}
+ .st20 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em}
+ .st21 {fill:none;stroke:none;stroke-width:0.25}
+ .st22 {font-size:1em}
+ .st23 {fill:#ffffff}
+ .st24 {stroke:#5b9bd5;stroke-dasharray:1.5,3;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st25 {marker-end:url(#mrkr5-444);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st26 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st27 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-212" class="st18" v:arrowType="5" v:arrowSize="2" v:setback="5.8" refX="-5.8" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-444" class="st26" v:arrowType="5" v:arrowSize="2" v:setback="4.69" refX="-4.69" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(319.501,-335.688)">
+ <title>Rectangle.58</title>
+ <desc>Key 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow3-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1</text> </g>
+ <g id="shape4-7" v:mID="4" v:groupContext="shape" transform="translate(353.251,-335.688)">
+ <title>Rectangle.59</title>
+ <desc>Action 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow4-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 1</text> </g>
+ <g id="shape5-13" v:mID="5" v:groupContext="shape" transform="translate(400.501,-335.688)">
+ <title>Rectangle.60</title>
+ <desc>Key 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow5-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 2</text> </g>
+ <g id="shape6-19" v:mID="6" v:groupContext="shape" transform="translate(434.251,-335.688)">
+ <title>Rectangle.61</title>
+ <desc>Action 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow6-20" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 2</text> </g>
+ <g id="shape7-25" v:mID="7" v:groupContext="shape" transform="translate(481.501,-335.688)">
+ <title>Rectangle.62</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow7-26" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape8-30" v:mID="8" v:groupContext="shape" transform="translate(515.251,-335.688)">
+ <title>Rectangle.63</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow8-31" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape9-35" v:mID="9" v:groupContext="shape" transform="translate(319.501,-313.188)">
+ <title>Rectangle.64</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow9-36" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape10-40" v:mID="10" v:groupContext="shape" transform="translate(353.251,-313.188)">
+ <title>Rectangle.65</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow10-41" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape11-45" v:mID="11" v:groupContext="shape" transform="translate(400.501,-313.188)">
+ <title>Rectangle.66</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-46" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape12-50" v:mID="12" v:groupContext="shape" transform="translate(434.251,-313.188)">
+ <title>Rectangle.67</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow12-51" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape13-55" v:mID="13" v:groupContext="shape" transform="translate(481.501,-313.188)">
+ <title>Rectangle.68</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow13-56" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape14-60" v:mID="14" v:groupContext="shape" transform="translate(515.251,-313.188)">
+ <title>Rectangle.69</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow14-61" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape15-65" v:mID="15" v:groupContext="shape" transform="translate(319.501,-277.188)">
+ <title>Rectangle.70</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow15-66" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape16-71" v:mID="16" v:groupContext="shape" transform="translate(353.251,-277.188)">
+ <title>Rectangle.71</title>
+ <desc>Action x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow16-72" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.99" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action x</text> </g>
+ <g id="shape17-77" v:mID="17" v:groupContext="shape" transform="translate(400.501,-277.188)">
+ <title>Rectangle.72</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow17-78" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape18-83" v:mID="18" v:groupContext="shape" transform="translate(434.251,-277.188)">
+ <title>Rectangle.73</title>
+ <desc>Action y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow18-84" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.89" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action y</text> </g>
+ <g id="shape19-89" v:mID="19" v:groupContext="shape" transform="translate(481.501,-277.188)">
+ <title>Rectangle.74</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow19-90" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="shape20-95" v:mID="20" v:groupContext="shape" transform="translate(515.251,-277.188)">
+ <title>Rectangle.75</title>
+ <desc>Action z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow20-96" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.18" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action z</text> </g>
+ <g id="shape21-101" v:mID="21" v:groupContext="shape" transform="translate(319.501,-240.687)">
+ <title>Rectangle.76</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow21-102" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape22-106" v:mID="22" v:groupContext="shape" transform="translate(353.251,-240.687)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow22-107" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape23-111" v:mID="23" v:groupContext="shape" transform="translate(400.501,-240.687)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow23-112" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape24-116" v:mID="24" v:groupContext="shape" transform="translate(434.251,-240.687)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow24-117" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape25-121" v:mID="25" v:groupContext="shape" transform="translate(481.501,-240.687)">
+ <title>Rectangle.80</title>
+ <desc>Key N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow25-122" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.21" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key N</text> </g>
+ <g id="shape26-127" v:mID="26" v:groupContext="shape" transform="translate(515.251,-240.687)">
+ <title>Rectangle.81</title>
+ <desc>Action N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow26-128" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.67" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action N</text> </g>
+ <g id="shape27-133" v:mID="27" v:groupContext="shape" transform="translate(317.251,-231.687)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow27-134" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st6"/>
+ </g>
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st7"/>
+ </g>
+ <g id="shape28-138" v:mID="28" v:groupContext="shape" transform="translate(328.501,-362.688)">
+ <title>Sheet.28</title>
+ <desc>Local Table for N Specific Flows Serviced at Node 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="110.423" cy="418.94" width="220.85" height="11.25"/>
+ <rect x="0" y="413.315" width="220.846" height="11.25" class="st8"/>
+ <text x="5.77" y="421.94" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Local Table for N Specific Flows Serviced at Node 1</text> </g>
+ <g id="group34-141" transform="translate(66.0294,-165.569)" v:mID="34" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Device)"/>
+ <v:cp v:nameU="SubShapeType" v:lbl="SubShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Load balancer)"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Load balancer</title>
+ <g id="shape35-142" v:mID="35" v:groupContext="shape" transform="translate(0,-7.33146)">
+ <title>Sheet.35</title>
+ <g id="shadow35-143" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23 Z" class="st10"/>
+ <path d="M0 377.86 L72 377.86" class="st6"/>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23" class="st6"/>
+ </g>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23 Z" class="st11"/>
+ <path d="M0 377.86 L72 377.86" class="st12"/>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23" class="st12"/>
+ </g>
+ <g id="shape36-152" v:mID="36" v:groupContext="shape" transform="translate(8.03054,-12.9324)">
+ <title>Sheet.36</title>
+ <g id="shadow36-153" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M45.34 421.81 L41.2 422.66 L44.12 424.56 L49.68 423.16 L48.75 417.51 L45.8 415.59 L46.69 419.68 L36.97
+ 413.34 L35.6 415.45 L45.34 421.81 ZM50.83 405.36 L39.2 405.36 L39.2 407.88 L50.8 407.88 L47.82 410.83
+ L51.34 410.83 L55.21 406.61 L51.32 402.39 L47.83 402.39 L50.83 405.36 ZM46.49 392.01 L36.75 398.37
+ L38.13 400.48 L47.84 394.14 L46.96 398.23 L49.91 396.31 L50.84 390.66 L45.28 389.26 L42.36 391.16
+ L46.49 392.01 ZM27.71 397.16 C22.66 397.16 18.58 401.25 18.58 406.29 C18.58 411.33 22.66 415.42
+ 27.71 415.42 C32.75 415.42 36.84 411.33 36.84 406.29 C36.84 401.25 32.75 397.16 27.71 397.16 ZM27.71
+ 400.04 C31.15 400.04 33.96 402.84 33.96 406.29 C33.96 409.74 31.15 412.54 27.71 412.54 C24.26 412.54
+ 21.46 409.74 21.46 406.29 C21.46 402.84 24.26 400.04 27.71 400.04 ZM11.64 405.04 L0 405.04 L0 407.56
+ L11.6 407.56 L8.62 410.51 L12.14 410.51 L16.01 406.29 L12.12 402.07 L8.64 402.07 L11.64 405.04 Z"
+ class="st2"/>
+ </g>
+ <path d="M45.34 421.81 L41.2 422.66 L44.12 424.56 L49.68 423.16 L48.75 417.51 L45.8 415.59 L46.69 419.68 L36.97 413.34
+ L35.6 415.45 L45.34 421.81 ZM50.83 405.36 L39.2 405.36 L39.2 407.88 L50.8 407.88 L47.82 410.83 L51.34
+ 410.83 L55.21 406.61 L51.32 402.39 L47.83 402.39 L50.83 405.36 ZM46.49 392.01 L36.75 398.37 L38.13 400.48
+ L47.84 394.14 L46.96 398.23 L49.91 396.31 L50.84 390.66 L45.28 389.26 L42.36 391.16 L46.49 392.01 ZM27.71
+ 397.16 C22.66 397.16 18.58 401.25 18.58 406.29 C18.58 411.33 22.66 415.42 27.71 415.42 C32.75 415.42
+ 36.84 411.33 36.84 406.29 C36.84 401.25 32.75 397.16 27.71 397.16 ZM27.71 400.04 C31.15 400.04 33.96
+ 402.84 33.96 406.29 C33.96 409.74 31.15 412.54 27.71 412.54 C24.26 412.54 21.46 409.74 21.46 406.29
+ C21.46 402.84 24.26 400.04 27.71 400.04 ZM11.64 405.04 L0 405.04 L0 407.56 L11.6 407.56 L8.62 410.51
+ L12.14 410.51 L16.01 406.29 L12.12 402.07 L8.64 402.07 L11.64 405.04 Z" class="st13"/>
+ </g>
+ </g>
+ <g id="shape37-157" v:mID="37" v:groupContext="shape" transform="translate(21.0294,-45.4375)">
+ <title>Rectangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow37-158" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="336.433" width="135" height="88.1315" class="st2"/>
+ </g>
+ <rect x="0" y="336.433" width="135" height="88.1315" class="st3"/>
+ </g>
+ <g id="shape38-162" v:mID="38" v:groupContext="shape" transform="translate(34.693,-126.438)">
+ <title>Sheet.38</title>
+ <desc>EFD Table</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="49.3364" cy="415.565" width="98.68" height="18"/>
+ <rect x="0" y="406.565" width="98.6728" height="18" class="st8"/>
+ <text x="24.87" y="419.17" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>EFD Table</text> </g>
+ <g id="shape39-165" v:mID="39" v:groupContext="shape" transform="translate(30.0294,-99.4375)">
+ <title>Rectangle.39</title>
+ <desc>Group_id</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="26.9182" cy="415.565" width="53.84" height="18"/>
+ <g id="shadow39-166" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st15"/>
+ <text x="7.87" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group_id</text> </g>
+ <g id="shape40-171" v:mID="40" v:groupContext="shape" transform="translate(93.193,-99.4375)">
+ <title>Rectangle.40</title>
+ <desc>Hash index</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="26.9182" cy="415.565" width="53.84" height="18"/>
+ <g id="shadow40-172" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st15"/>
+ <text x="4.64" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Hash index</text> </g>
+ <g id="shape41-177" v:mID="41" v:groupContext="shape" transform="translate(30.193,-82.4275)">
+ <title>Rectangle.41</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow41-178" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape42-182" v:mID="42" v:groupContext="shape" transform="translate(30.193,-66.8125)">
+ <title>Rectangle.42</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow42-183" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape43-187" v:mID="43" v:groupContext="shape" transform="translate(30.1112,-52.1875)">
+ <title>Rectangle.43</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow43-188" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape44-192" v:mID="44" v:groupContext="shape" transform="translate(93.0294,-81.4375)">
+ <title>Rectangle.44</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow44-193" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape45-197" v:mID="45" v:groupContext="shape" transform="translate(93.193,-66.8125)">
+ <title>Rectangle.45</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow45-198" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape46-202" v:mID="46" v:groupContext="shape" transform="translate(93.193,-52.1875)">
+ <title>Rectangle.46</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow46-203" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape47-207" v:mID="47" v:groupContext="shape" transform="translate(374.924,544.022) rotate(135)">
+ <title>Sheet.47</title>
+ <path d="M-0 417.75 A40.674 18.0151 -156.2 0 0 40.24 422.15 L40.49 421.89" class="st17"/>
+ </g>
+ <g id="shape48-213" v:mID="48" v:groupContext="shape" transform="translate(21.0294,-19)">
+ <title>Sheet.48</title>
+ <desc>Supports X*N Flows</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="415.565" width="135" height="18"/>
+ <rect x="0" y="406.565" width="135" height="18" class="st19"/>
+ <text x="19.05" y="419.17" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Supports X*N Flows</text> </g>
+ <g id="shape49-216" v:mID="49" v:groupContext="shape" transform="translate(48.0294,-229.938)">
+ <title>Sheet.49</title>
+ <desc>Frontend Server or Load Balancer</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="400.94" width="108" height="47.25"/>
+ <rect x="0" y="377.315" width="108" height="47.25" class="st21"/>
+ <text x="14.56" y="397.34" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Frontend Server<v:newlineChar/><tspan
+ x="13.16" dy="1.2em" class="st22">or Load Balancer </tspan> </text> </g>
+ <g id="group51-220" transform="translate(223.876,-310.938)" v:mID="51" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server</title>
+ <g id="shape52-221" v:mID="52" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.52</title>
+ <g id="shadow52-222" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape53-226" v:mID="53" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.53</title>
+ <g id="shadow53-227" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape54-231" v:mID="54" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.54</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow54-232" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape59-239" v:mID="59" v:groupContext="shape" transform="translate(277.876,-373.938)">
+ <title>Sheet.59</title>
+ <path d="M-0 424.56 A111.108 53.2538 42.31 0 1 93.83 421.21 L94.14 421.41" class="st17"/>
+ </g>
+ <g id="shape60-244" v:mID="60" v:groupContext="shape" transform="translate(205.876,-283.938)">
+ <title>Sheet.60</title>
+ <desc>Backend Server 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.93" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server 1</text> </g>
+ <g id="group61-247" transform="translate(223.876,-207.438)" v:mID="61" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server.61</title>
+ <g id="shape62-248" v:mID="62" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.62</title>
+ <g id="shadow62-249" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape63-253" v:mID="63" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.63</title>
+ <g id="shadow63-254" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape64-258" v:mID="64" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.64</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow64-259" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape65-266" v:mID="65" v:groupContext="shape" transform="translate(205.876,-180.437)">
+ <title>Sheet.65</title>
+ <desc>Backend Server 2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.93" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server 2</text> </g>
+ <g id="group66-269" transform="translate(219.029,-58.9375)" v:mID="66" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server.66</title>
+ <g id="shape67-270" v:mID="67" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.67</title>
+ <g id="shadow67-271" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape68-275" v:mID="68" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.68</title>
+ <g id="shadow68-276" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape69-280" v:mID="69" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.69</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow69-281" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape70-288" v:mID="70" v:groupContext="shape" transform="translate(201.029,-26.056)">
+ <title>Sheet.70</title>
+ <desc>Backend Server X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.86" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server X</text> </g>
+ <g id="shape71-291" v:mID="71" v:groupContext="shape" transform="translate(684.44,239.627) rotate(90)">
+ <title>Sheet.71</title>
+ <path d="M0 424.56 L45 424.56" class="st24"/>
+ </g>
+ <g id="shape72-294" v:mID="72" v:groupContext="shape" transform="translate(6.85967,-22.443) rotate(-38.1076)">
+ <title>Sheet.72</title>
+ <path d="M-0 424.56 A96.1331 44.4001 55.03 0 1 68.24 420.56 L68.51 420.79" class="st17"/>
+ </g>
+ <g id="shape73-299" v:mID="73" v:groupContext="shape" transform="translate(328.501,-135.937)">
+ <title>Rectangle.73</title>
+ <desc>Key 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow73-300" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1</text> </g>
+ <g id="shape74-305" v:mID="74" v:groupContext="shape" transform="translate(362.251,-135.937)">
+ <title>Rectangle.74</title>
+ <desc>Action 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow74-306" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 1</text> </g>
+ <g id="shape75-311" v:mID="75" v:groupContext="shape" transform="translate(409.501,-135.937)">
+ <title>Rectangle.75</title>
+ <desc>Key 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow75-312" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 2</text> </g>
+ <g id="shape76-317" v:mID="76" v:groupContext="shape" transform="translate(443.251,-135.937)">
+ <title>Rectangle.76</title>
+ <desc>Action 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow76-318" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 2</text> </g>
+ <g id="shape77-323" v:mID="77" v:groupContext="shape" transform="translate(490.501,-135.937)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow77-324" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape78-328" v:mID="78" v:groupContext="shape" transform="translate(524.251,-135.937)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow78-329" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape79-333" v:mID="79" v:groupContext="shape" transform="translate(328.501,-113.437)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow79-334" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape80-338" v:mID="80" v:groupContext="shape" transform="translate(362.251,-113.437)">
+ <title>Rectangle.80</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow80-339" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape81-343" v:mID="81" v:groupContext="shape" transform="translate(409.501,-113.437)">
+ <title>Rectangle.81</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow81-344" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape82-348" v:mID="82" v:groupContext="shape" transform="translate(443.251,-113.437)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow82-349" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape83-353" v:mID="83" v:groupContext="shape" transform="translate(490.501,-113.437)">
+ <title>Rectangle.83</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow83-354" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape84-358" v:mID="84" v:groupContext="shape" transform="translate(524.251,-113.437)">
+ <title>Rectangle.84</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow84-359" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape85-363" v:mID="85" v:groupContext="shape" transform="translate(328.501,-77.4375)">
+ <title>Rectangle.85</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow85-364" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape86-369" v:mID="86" v:groupContext="shape" transform="translate(362.251,-77.4375)">
+ <title>Rectangle.86</title>
+ <desc>Action x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow86-370" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.99" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action x</text> </g>
+ <g id="shape87-375" v:mID="87" v:groupContext="shape" transform="translate(409.501,-77.4375)">
+ <title>Rectangle.87</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow87-376" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape88-381" v:mID="88" v:groupContext="shape" transform="translate(443.251,-77.4375)">
+ <title>Rectangle.88</title>
+ <desc>Action y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow88-382" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.89" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action y</text> </g>
+ <g id="shape89-387" v:mID="89" v:groupContext="shape" transform="translate(490.501,-77.4375)">
+ <title>Rectangle.89</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow89-388" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="shape90-393" v:mID="90" v:groupContext="shape" transform="translate(524.251,-77.4375)">
+ <title>Rectangle.90</title>
+ <desc>Action z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow90-394" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.18" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action z</text> </g>
+ <g id="shape91-399" v:mID="91" v:groupContext="shape" transform="translate(328.501,-40.9375)">
+ <title>Rectangle.91</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow91-400" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape92-404" v:mID="92" v:groupContext="shape" transform="translate(362.251,-40.9375)">
+ <title>Rectangle.92</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow92-405" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape93-409" v:mID="93" v:groupContext="shape" transform="translate(409.501,-40.9375)">
+ <title>Rectangle.93</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow93-410" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape94-414" v:mID="94" v:groupContext="shape" transform="translate(443.251,-40.9375)">
+ <title>Rectangle.94</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow94-415" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape95-419" v:mID="95" v:groupContext="shape" transform="translate(490.501,-40.9375)">
+ <title>Rectangle.95</title>
+ <desc>Key N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow95-420" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.21" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key N</text> </g>
+ <g id="shape96-425" v:mID="96" v:groupContext="shape" transform="translate(524.251,-40.9375)">
+ <title>Rectangle.96</title>
+ <desc>Action N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow96-426" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.67" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action N</text> </g>
+ <g id="shape97-431" v:mID="97" v:groupContext="shape" transform="translate(326.251,-31.9375)">
+ <title>Rectangle.97</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow97-432" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st6"/>
+ </g>
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st7"/>
+ </g>
+ <g id="shape98-436" v:mID="98" v:groupContext="shape" transform="translate(337.501,-162.938)">
+ <title>Sheet.98</title>
+ <desc>Local Table for N Specific Flows Serviced at Node X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="110.423" cy="418.94" width="220.85" height="11.25"/>
+ <rect x="0" y="413.315" width="220.846" height="11.25" class="st8"/>
+ <text x="5.55" y="421.94" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Local Table for N Specific Flows Serviced at Node X</text> </g>
+ <g id="shape99-439" v:mID="99" v:groupContext="shape" transform="translate(-204.342,-29.4449) rotate(-53.7462)">
+ <title>Sheet.99</title>
+ <path d="M0 424.56 L160.37 424.56" class="st25"/>
+ </g>
+ <g id="shape100-445" v:mID="100" v:groupContext="shape" transform="translate(-37.6568,-164.882) rotate(-24.444)">
+ <title>Sheet.100</title>
+ <path d="M0 424.56 L101.71 424.56" class="st25"/>
+ </g>
+ <g id="shape101-450" v:mID="101" v:groupContext="shape" transform="translate(464.049,-50.8578) rotate(50.099)">
+ <title>Sheet.101</title>
+ <path d="M0 424.56 L139.8 424.56" class="st25"/>
+ </g>
+ <g id="shape102-455" v:mID="102" v:groupContext="shape" transform="translate(372.376,-207.438)">
+ <title>Sheet.102</title>
+ <desc>Supports N Flows</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="415.565" width="135" height="18"/>
+ <rect x="0" y="406.565" width="135" height="18" class="st19"/>
+ <text x="25.15" y="419.17" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Supports N Flows</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i7.svg b/doc/guides/prog_guide/img/efd_i7.svg
new file mode 100644
index 00000000..98f80005
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i7.svg
@@ -0,0 +1,790 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i8.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="10.6168in" height="4.81965in"
+ viewBox="0 0 764.409 347.015" xml:space="preserve" color-interpolation-filters="sRGB" class="st27">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st2 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st3 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st4 {fill:#000000;font-family:Intel Clear;font-size:0.998566em}
+ .st5 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st6 {stroke:#00b050;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st7 {stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st8 {stroke:#ca8f02;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st9 {stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st10 {fill:#c00000;font-family:Intel Clear;font-size:0.828804em;font-weight:bold}
+ .st11 {fill:#7f6d00;font-family:Intel Clear;font-size:0.828804em;font-weight:bold}
+ .st12 {fill:#00b050;stroke:#00b050;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0149927}
+ .st13 {fill:#004280;stroke:#004280;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0149927}
+ .st14 {fill:#00b050;stroke:#00b050;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st15 {fill:#ca8f02;stroke:#ca8f02;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.0299855}
+ .st16 {fill:#004280;font-family:Intel Clear;font-size:0.828804em}
+ .st17 {fill:#ffffff;font-family:Intel Clear;font-size:0.998566em}
+ .st18 {fill:#ffffff;font-family:Intel Clear;font-size:1.49785em}
+ .st19 {visibility:visible}
+ .st20 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st21 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st22 {fill:#feffff;font-family:Symbol;font-size:1.16666em}
+ .st23 {font-size:1em}
+ .st24 {font-family:Calibri;font-size:1em}
+ .st25 {fill:none;stroke:none;stroke-width:0.25}
+ .st26 {fill:#ffffff;font-family:Calibri;font-size:1.00001em}
+ .st27 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(27.7836,-307.505)">
+ <title>Sheet.3</title>
+ <path d="M0 329.94 C-0 328.06 1.54 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.06 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.54 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape4-3" v:mID="4" v:groupContext="shape" transform="translate(27.7836,-307.505)">
+ <title>Sheet.4</title>
+ <path d="M0 329.94 C-0 328.06 1.54 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.06 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.54 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape5-5" v:mID="5" v:groupContext="shape" transform="translate(50.1544,-309.121)">
+ <title>Sheet.5</title>
+ <desc>Key1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key1</text> </g>
+ <g id="shape6-9" v:mID="6" v:groupContext="shape" transform="translate(43.6909,-286.954)">
+ <title>Sheet.6</title>
+ <path d="M0 336.65 L9.81 336.65 L9.81 326.28 L29.44 326.28 L29.44 336.65 L39.26 336.65 L19.63 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape7-11" v:mID="7" v:groupContext="shape" transform="translate(27.7836,-266.044)">
+ <title>Sheet.7</title>
+ <path d="M0 330.04 C0 328.16 1.52 326.64 3.41 326.64 L68.51 326.64 C70.4 326.64 71.91 328.16 71.91 330.04 L71.91 343.62
+ C71.91 345.49 70.4 347.02 68.51 347.02 L3.41 347.02 C1.52 347.02 0 345.49 0 343.62 L0 330.04 Z"
+ class="st1"/>
+ </g>
+ <g id="shape8-13" v:mID="8" v:groupContext="shape" transform="translate(27.7836,-266.044)">
+ <title>Sheet.8</title>
+ <path d="M0 330.04 C0 328.16 1.52 326.64 3.41 326.64 L68.51 326.64 C70.4 326.64 71.91 328.16 71.91 330.04 L71.91 343.62
+ C71.91 345.49 70.4 347.02 68.51 347.02 L3.41 347.02 C1.52 347.02 0 345.49 0 343.62 L0 330.04 Z"
+ class="st2"/>
+ </g>
+ <g id="shape9-15" v:mID="9" v:groupContext="shape" transform="translate(50.7572,-267.602)">
+ <title>Sheet.9</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape10-19" v:mID="10" v:groupContext="shape" transform="translate(19.0195,-225.183)">
+ <title>Sheet.10</title>
+ <path d="M0 330.74 C0 328.94 1.46 327.48 3.26 327.48 L87.15 327.48 C88.95 327.48 90.4 328.94 90.4 330.74 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.26 347.02 C1.46 347.02 0 345.56 0 343.76 L0 330.74 Z"
+ class="st1"/>
+ </g>
+ <g id="shape11-21" v:mID="11" v:groupContext="shape" transform="translate(19.0195,-225.183)">
+ <title>Sheet.11</title>
+ <path d="M0 330.74 C0 328.94 1.46 327.48 3.26 327.48 L87.15 327.48 C88.95 327.48 90.4 328.94 90.4 330.74 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.26 347.02 C1.46 347.02 0 345.56 0 343.76 L0 330.74 Z"
+ class="st2"/>
+ </g>
+ <g id="shape12-23" v:mID="12" v:groupContext="shape" transform="translate(28.0373,-226.287)">
+ <title>Sheet.12</title>
+ <desc>0x0102ABCD</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.3615" cy="339.824" width="86.73" height="14.3829"/>
+ <path d="M86.72 332.63 L0 332.63 L0 347.02 L86.72 347.02 L86.72 332.63" class="st3"/>
+ <text x="7.12" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102ABCD</text> </g>
+ <g id="shape13-27" v:mID="13" v:groupContext="shape" transform="translate(43.6909,-244.775)">
+ <title>Sheet.13</title>
+ <path d="M0 336.71 L9.81 336.71 L9.81 326.4 L29.44 326.4 L29.44 336.71 L39.26 336.71 L19.63 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape14-29" v:mID="14" v:groupContext="shape" transform="translate(40.7496,-210.444)">
+ <title>Sheet.14</title>
+ <path d="M26.29 334.91 C26.29 338.26 25.84 340.96 25.29 340.96 L14.16 340.96 C13.6 340.96 13.15 343.67 13.15 347.02 C13.15
+ 343.67 12.7 340.96 12.14 340.96 L1.01 340.96 C0.46 340.96 0 338.26 0 334.91" class="st6"/>
+ </g>
+ <g id="shape15-32" v:mID="15" v:groupContext="shape" transform="translate(125.629,-307.625)">
+ <title>Sheet.15</title>
+ <path d="M0 330.04 C0 328.16 1.52 326.64 3.41 326.64 L68.63 326.64 C70.51 326.64 72.03 328.16 72.03 330.04 L72.03 343.62
+ C72.03 345.49 70.51 347.02 68.63 347.02 L3.41 347.02 C1.52 347.02 0 345.49 0 343.62 L0 330.04 Z"
+ class="st1"/>
+ </g>
+ <g id="shape16-34" v:mID="16" v:groupContext="shape" transform="translate(125.629,-307.625)">
+ <title>Sheet.16</title>
+ <path d="M0 330.04 C0 328.16 1.52 326.64 3.41 326.64 L68.63 326.64 C70.51 326.64 72.03 328.16 72.03 330.04 L72.03 343.62
+ C72.03 345.49 70.51 347.02 68.63 347.02 L3.41 347.02 C1.52 347.02 0 345.49 0 343.62 L0 330.04 Z"
+ class="st2"/>
+ </g>
+ <g id="shape17-36" v:mID="17" v:groupContext="shape" transform="translate(148.034,-309.155)">
+ <title>Sheet.17</title>
+ <desc>Key2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key2</text> </g>
+ <g id="shape18-40" v:mID="18" v:groupContext="shape" transform="translate(141.536,-286.954)">
+ <title>Sheet.18</title>
+ <path d="M0 336.65 L9.81 336.65 L9.81 326.28 L29.44 326.28 L29.44 336.65 L39.26 336.65 L19.63 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape19-42" v:mID="19" v:groupContext="shape" transform="translate(125.629,-266.044)">
+ <title>Sheet.19</title>
+ <path d="M0 329.94 C0 328.06 1.54 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.06 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.54 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st1"/>
+ </g>
+ <g id="shape20-44" v:mID="20" v:groupContext="shape" transform="translate(125.629,-266.044)">
+ <title>Sheet.20</title>
+ <path d="M0 329.94 C0 328.06 1.54 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.06 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.54 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st2"/>
+ </g>
+ <g id="shape21-46" v:mID="21" v:groupContext="shape" transform="translate(148.636,-267.636)">
+ <title>Sheet.21</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape22-50" v:mID="22" v:groupContext="shape" transform="translate(116.865,-225.183)">
+ <title>Sheet.22</title>
+ <path d="M0 330.74 C0 328.94 1.46 327.48 3.26 327.48 L87.15 327.48 C88.95 327.48 90.4 328.94 90.4 330.74 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.26 347.02 C1.46 347.02 0 345.56 0 343.76 L0 330.74 Z"
+ class="st1"/>
+ </g>
+ <g id="shape23-52" v:mID="23" v:groupContext="shape" transform="translate(116.865,-225.183)">
+ <title>Sheet.23</title>
+ <path d="M0 330.74 C0 328.94 1.46 327.48 3.26 327.48 L87.15 327.48 C88.95 327.48 90.4 328.94 90.4 330.74 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.26 347.02 C1.46 347.02 0 345.56 0 343.76 L0 330.74 Z"
+ class="st2"/>
+ </g>
+ <g id="shape24-54" v:mID="24" v:groupContext="shape" transform="translate(125.917,-226.322)">
+ <title>Sheet.24</title>
+ <desc>0x0103CDAB</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.3615" cy="339.824" width="86.73" height="14.3829"/>
+ <path d="M86.72 332.63 L0 332.63 L0 347.02 L86.72 347.02 L86.72 332.63" class="st3"/>
+ <text x="7.12" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0103CDAB</text> </g>
+ <g id="shape25-58" v:mID="25" v:groupContext="shape" transform="translate(141.536,-244.775)">
+ <title>Sheet.25</title>
+ <path d="M0 336.71 L9.81 336.71 L9.81 326.4 L29.44 326.4 L29.44 336.71 L39.26 336.71 L19.63 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape26-60" v:mID="26" v:groupContext="shape" transform="translate(138.595,-210.444)">
+ <title>Sheet.26</title>
+ <path d="M26.29 334.91 C26.29 338.26 25.84 340.96 25.29 340.96 L14.16 340.96 C13.6 340.96 13.15 343.67 13.15 347.02 C13.15
+ 343.67 12.7 340.96 12.14 340.96 L1.01 340.96 C0.46 340.96 0 338.26 0 334.91" class="st7"/>
+ </g>
+ <g id="shape27-63" v:mID="27" v:groupContext="shape" transform="translate(221.793,-307.625)">
+ <title>Sheet.27</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.64 326.64 C70.52 326.64 72.03 328.17 72.03 330.04 L72.03 343.63
+ C72.03 345.5 70.52 347.02 68.64 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st1"/>
+ </g>
+ <g id="shape28-65" v:mID="28" v:groupContext="shape" transform="translate(221.793,-307.625)">
+ <title>Sheet.28</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.64 326.64 C70.52 326.64 72.03 328.17 72.03 330.04 L72.03 343.63
+ C72.03 345.5 70.52 347.02 68.64 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st2"/>
+ </g>
+ <g id="shape29-67" v:mID="29" v:groupContext="shape" transform="translate(244.237,-309.155)">
+ <title>Sheet.29</title>
+ <desc>Key3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key3</text> </g>
+ <g id="shape30-71" v:mID="30" v:groupContext="shape" transform="translate(237.701,-286.954)">
+ <title>Sheet.30</title>
+ <path d="M0 336.65 L9.84 336.65 L9.84 326.28 L29.53 326.28 L29.53 336.65 L39.38 336.65 L19.69 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape31-73" v:mID="31" v:groupContext="shape" transform="translate(221.793,-266.044)">
+ <title>Sheet.31</title>
+ <path d="M0 329.94 C-0 328.07 1.55 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.07 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st1"/>
+ </g>
+ <g id="shape32-75" v:mID="32" v:groupContext="shape" transform="translate(221.793,-266.044)">
+ <title>Sheet.32</title>
+ <path d="M0 329.94 C-0 328.07 1.55 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.07 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st2"/>
+ </g>
+ <g id="shape33-77" v:mID="33" v:groupContext="shape" transform="translate(244.84,-267.636)">
+ <title>Sheet.33</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape34-81" v:mID="34" v:groupContext="shape" transform="translate(213.029,-225.183)">
+ <title>Sheet.34</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.27 327.48 C89.07 327.48 90.52 328.95 90.52 330.75 L90.52 343.76
+ C90.52 345.56 89.07 347.02 87.27 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st1"/>
+ </g>
+ <g id="shape35-83" v:mID="35" v:groupContext="shape" transform="translate(213.029,-225.183)">
+ <title>Sheet.35</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.27 327.48 C89.07 327.48 90.52 328.95 90.52 330.75 L90.52 343.76
+ C90.52 345.56 89.07 347.02 87.27 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st2"/>
+ </g>
+ <g id="shape36-85" v:mID="36" v:groupContext="shape" transform="translate(222.002,-226.322)">
+ <title>Sheet.36</title>
+ <desc>0x0102BAAD</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.4787" cy="339.824" width="86.96" height="14.3829"/>
+ <path d="M86.96 332.63 L0 332.63 L0 347.02 L86.96 347.02 L86.96 332.63" class="st3"/>
+ <text x="7.13" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102BAAD</text> </g>
+ <g id="shape37-89" v:mID="37" v:groupContext="shape" transform="translate(237.701,-244.775)">
+ <title>Sheet.37</title>
+ <path d="M0 336.71 L9.84 336.71 L9.84 326.4 L29.53 326.4 L29.53 336.71 L39.38 336.71 L19.69 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape38-91" v:mID="38" v:groupContext="shape" transform="translate(234.759,-210.444)">
+ <title>Sheet.38</title>
+ <path d="M26.41 334.91 C26.41 338.26 25.96 340.96 25.41 340.96 L14.22 340.96 C13.66 340.96 13.21 343.67 13.21 347.02
+ C13.21 343.67 12.76 340.96 12.2 340.96 L1.01 340.96 C0.46 340.96 0 338.26 0 334.91" class="st6"/>
+ </g>
+ <g id="shape39-94" v:mID="39" v:groupContext="shape" transform="translate(319.759,-307.625)">
+ <title>Sheet.39</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.52 326.64 C70.4 326.64 71.91 328.17 71.91 330.04 L71.91 343.63
+ C71.91 345.5 70.4 347.02 68.52 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st1"/>
+ </g>
+ <g id="shape40-96" v:mID="40" v:groupContext="shape" transform="translate(319.759,-307.625)">
+ <title>Sheet.40</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.52 326.64 C70.4 326.64 71.91 328.17 71.91 330.04 L71.91 343.63
+ C71.91 345.5 70.4 347.02 68.52 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st2"/>
+ </g>
+ <g id="shape41-98" v:mID="41" v:groupContext="shape" transform="translate(342.125,-309.155)">
+ <title>Sheet.41</title>
+ <desc>Key4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key4</text> </g>
+ <g id="shape42-102" v:mID="42" v:groupContext="shape" transform="translate(335.666,-286.954)">
+ <title>Sheet.42</title>
+ <path d="M0 336.65 L9.81 336.65 L9.81 326.28 L29.44 326.28 L29.44 336.65 L39.26 336.65 L19.63 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape43-104" v:mID="43" v:groupContext="shape" transform="translate(319.759,-266.044)">
+ <title>Sheet.43</title>
+ <path d="M0 329.94 C0 328.07 1.55 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.07 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape44-106" v:mID="44" v:groupContext="shape" transform="translate(319.759,-266.044)">
+ <title>Sheet.44</title>
+ <path d="M0 329.94 C0 328.07 1.55 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.07 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape45-108" v:mID="45" v:groupContext="shape" transform="translate(342.728,-267.636)">
+ <title>Sheet.45</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape46-112" v:mID="46" v:groupContext="shape" transform="translate(310.995,-225.183)">
+ <title>Sheet.46</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.15 327.48 C88.95 327.48 90.4 328.95 90.4 330.75 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st1"/>
+ </g>
+ <g id="shape47-114" v:mID="47" v:groupContext="shape" transform="translate(310.995,-225.183)">
+ <title>Sheet.47</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.15 327.48 C88.95 327.48 90.4 328.95 90.4 330.75 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st2"/>
+ </g>
+ <g id="shape48-116" v:mID="48" v:groupContext="shape" transform="translate(321.689,-226.322)">
+ <title>Sheet.48</title>
+ <desc>0x0104BEEF</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="41.4183" cy="339.824" width="82.84" height="14.3829"/>
+ <path d="M82.84 332.63 L0 332.63 L0 347.02 L82.84 347.02 L82.84 332.63" class="st3"/>
+ <text x="6.87" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0104BEEF</text> </g>
+ <g id="shape49-120" v:mID="49" v:groupContext="shape" transform="translate(335.666,-244.775)">
+ <title>Sheet.49</title>
+ <path d="M0 336.71 L9.81 336.71 L9.81 326.4 L29.44 326.4 L29.44 336.71 L39.26 336.71 L19.63 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape50-122" v:mID="50" v:groupContext="shape" transform="translate(332.725,-210.444)">
+ <title>Sheet.50</title>
+ <path d="M26.29 334.91 C26.29 338.27 25.84 340.96 25.29 340.96 L14.17 340.96 C13.61 340.96 13.15 343.67 13.15 347.02
+ C13.15 343.67 12.7 340.96 12.14 340.96 L1.02 340.96 C0.47 340.96 0 338.27 0 334.91" class="st6"/>
+ </g>
+ <g id="shape51-125" v:mID="51" v:groupContext="shape" transform="translate(416.884,-307.625)">
+ <title>Sheet.51</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.52 326.64 C70.4 326.64 71.91 328.17 71.91 330.04 L71.91 343.63
+ C71.91 345.5 70.4 347.02 68.52 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st1"/>
+ </g>
+ <g id="shape52-127" v:mID="52" v:groupContext="shape" transform="translate(416.884,-307.625)">
+ <title>Sheet.52</title>
+ <path d="M0 330.04 C0 328.17 1.53 326.64 3.41 326.64 L68.52 326.64 C70.4 326.64 71.91 328.17 71.91 330.04 L71.91 343.63
+ C71.91 345.5 70.4 347.02 68.52 347.02 L3.41 347.02 C1.53 347.02 0 345.5 0 343.63 L0 330.04 Z" class="st2"/>
+ </g>
+ <g id="shape53-129" v:mID="53" v:groupContext="shape" transform="translate(439.255,-309.155)">
+ <title>Sheet.53</title>
+ <desc>Key5</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key5</text> </g>
+ <g id="shape54-133" v:mID="54" v:groupContext="shape" transform="translate(432.791,-286.954)">
+ <title>Sheet.54</title>
+ <path d="M0 336.65 L9.81 336.65 L9.81 326.28 L29.44 326.28 L29.44 336.65 L39.26 336.65 L19.63 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape55-135" v:mID="55" v:groupContext="shape" transform="translate(416.884,-266.044)">
+ <title>Sheet.55</title>
+ <path d="M0 329.94 C0 328.07 1.55 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.07 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st1"/>
+ </g>
+ <g id="shape56-137" v:mID="56" v:groupContext="shape" transform="translate(416.884,-266.044)">
+ <title>Sheet.56</title>
+ <path d="M0 329.94 C0 328.07 1.55 326.52 3.42 326.52 L68.49 326.52 C70.38 326.52 71.91 328.07 71.91 329.94 L71.91 343.6
+ C71.91 345.49 70.38 347.02 68.49 347.02 L3.42 347.02 C1.55 347.02 0 345.49 0 343.6 L0 329.94 Z"
+ class="st2"/>
+ </g>
+ <g id="shape57-139" v:mID="57" v:groupContext="shape" transform="translate(439.858,-267.636)">
+ <title>Sheet.57</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape58-143" v:mID="58" v:groupContext="shape" transform="translate(408.12,-225.183)">
+ <title>Sheet.58</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.15 327.48 C88.95 327.48 90.4 328.95 90.4 330.75 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st1"/>
+ </g>
+ <g id="shape59-145" v:mID="59" v:groupContext="shape" transform="translate(408.12,-225.183)">
+ <title>Sheet.59</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.15 327.48 C88.95 327.48 90.4 328.95 90.4 330.75 L90.4 343.76
+ C90.4 345.56 88.95 347.02 87.15 347.02 L3.27 347.02 C1.47 347.02 0 345.56 0 343.76 L0 330.75 Z"
+ class="st2"/>
+ </g>
+ <g id="shape60-147" v:mID="60" v:groupContext="shape" transform="translate(416.778,-226.322)">
+ <title>Sheet.60</title>
+ <desc>0x0103DABD</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.7817" cy="339.824" width="87.57" height="14.3829"/>
+ <path d="M87.56 332.63 L0 332.63 L0 347.02 L87.56 347.02 L87.56 332.63" class="st3"/>
+ <text x="7.17" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0103DABD</text> </g>
+ <g id="shape61-151" v:mID="61" v:groupContext="shape" transform="translate(432.791,-244.775)">
+ <title>Sheet.61</title>
+ <path d="M0 336.71 L9.81 336.71 L9.81 326.4 L29.44 326.4 L29.44 336.71 L39.26 336.71 L19.63 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape62-153" v:mID="62" v:groupContext="shape" transform="translate(429.85,-210.444)">
+ <title>Sheet.62</title>
+ <path d="M26.29 334.91 C26.29 338.27 25.84 340.96 25.29 340.96 L14.17 340.96 C13.61 340.96 13.15 343.67 13.15 347.02
+ C13.15 343.67 12.7 340.96 12.14 340.96 L1.02 340.96 C0.47 340.96 0 338.27 0 334.91" class="st7"/>
+ </g>
+ <g id="shape63-156" v:mID="63" v:groupContext="shape" transform="translate(514.489,-307.625)">
+ <title>Sheet.63</title>
+ <path d="M0 330.06 C-0 328.17 1.53 326.64 3.42 326.64 L68.64 326.64 C70.53 326.64 72.03 328.17 72.03 330.06 L72.03 343.63
+ C72.03 345.52 70.53 347.02 68.64 347.02 L3.42 347.02 C1.53 347.02 0 345.52 0 343.63 L0 330.06 Z"
+ class="st1"/>
+ </g>
+ <g id="shape64-158" v:mID="64" v:groupContext="shape" transform="translate(514.489,-307.625)">
+ <title>Sheet.64</title>
+ <path d="M0 330.06 C-0 328.17 1.53 326.64 3.42 326.64 L68.64 326.64 C70.53 326.64 72.03 328.17 72.03 330.06 L72.03 343.63
+ C72.03 345.52 70.53 347.02 68.64 347.02 L3.42 347.02 C1.53 347.02 0 345.52 0 343.63 L0 330.06 Z"
+ class="st2"/>
+ </g>
+ <g id="shape65-160" v:mID="65" v:groupContext="shape" transform="translate(536.883,-309.19)">
+ <title>Sheet.65</title>
+ <desc>Key6</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key6</text> </g>
+ <g id="shape66-164" v:mID="66" v:groupContext="shape" transform="translate(530.396,-287.074)">
+ <title>Sheet.66</title>
+ <path d="M0 336.71 L9.81 336.71 L9.81 326.4 L29.44 326.4 L29.44 336.71 L39.26 336.71 L19.63 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape67-166" v:mID="67" v:groupContext="shape" transform="translate(514.489,-266.044)">
+ <title>Sheet.67</title>
+ <path d="M0 329.94 C0 328.08 1.56 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.08 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.56 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st1"/>
+ </g>
+ <g id="shape68-168" v:mID="68" v:groupContext="shape" transform="translate(514.489,-266.044)">
+ <title>Sheet.68</title>
+ <path d="M0 329.94 C0 328.08 1.56 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.08 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.56 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st2"/>
+ </g>
+ <g id="shape69-170" v:mID="69" v:groupContext="shape" transform="translate(537.486,-267.671)">
+ <title>Sheet.69</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape70-174" v:mID="70" v:groupContext="shape" transform="translate(505.725,-225.183)">
+ <title>Sheet.70</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.16 327.48 C88.96 327.48 90.4 328.95 90.4 330.75 L90.4 343.78
+ C90.4 345.58 88.96 347.02 87.16 347.02 L3.27 347.02 C1.47 347.02 0 345.58 0 343.78 L0 330.75 Z"
+ class="st1"/>
+ </g>
+ <g id="shape71-176" v:mID="71" v:groupContext="shape" transform="translate(505.725,-225.183)">
+ <title>Sheet.71</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.16 327.48 C88.96 327.48 90.4 328.95 90.4 330.75 L90.4 343.78
+ C90.4 345.58 88.96 347.02 87.16 347.02 L3.27 347.02 C1.47 347.02 0 345.58 0 343.78 L0 330.75 Z"
+ class="st2"/>
+ </g>
+ <g id="shape72-178" v:mID="72" v:groupContext="shape" transform="translate(514.766,-226.356)">
+ <title>Sheet.72</title>
+ <desc>0x0102ADCB</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.3615" cy="339.824" width="86.73" height="14.3829"/>
+ <path d="M86.72 332.63 L0 332.63 L0 347.02 L86.72 347.02 L86.72 332.63" class="st3"/>
+ <text x="7.12" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102ADCB</text> </g>
+ <g id="shape73-182" v:mID="73" v:groupContext="shape" transform="translate(530.396,-244.775)">
+ <title>Sheet.73</title>
+ <path d="M0 336.65 L9.81 336.65 L9.81 326.28 L29.44 326.28 L29.44 336.65 L39.26 336.65 L19.63 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape74-184" v:mID="74" v:groupContext="shape" transform="translate(527.455,-210.564)">
+ <title>Sheet.74</title>
+ <path d="M26.29 335.03 C26.29 338.36 25.87 341.02 25.3 341.02 L14.17 341.02 C13.6 341.02 13.15 343.72 13.15 347.02 C13.15
+ 343.72 12.73 341.02 12.16 341.02 L1.02 341.02 C0.45 341.02 0 338.36 0 335.03" class="st6"/>
+ </g>
+ <g id="shape75-187" v:mID="75" v:groupContext="shape" transform="translate(610.653,-307.505)">
+ <title>Sheet.75</title>
+ <path d="M0 329.94 C0 328.08 1.56 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.08 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.56 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st1"/>
+ </g>
+ <g id="shape76-189" v:mID="76" v:groupContext="shape" transform="translate(610.653,-307.505)">
+ <title>Sheet.76</title>
+ <path d="M0 329.94 C0 328.08 1.56 326.52 3.42 326.52 L68.61 326.52 C70.5 326.52 72.03 328.08 72.03 329.94 L72.03 343.6
+ C72.03 345.49 70.5 347.02 68.61 347.02 L3.42 347.02 C1.56 347.02 0 345.49 0 343.6 L0 329.94 Z" class="st2"/>
+ </g>
+ <g id="shape77-191" v:mID="77" v:groupContext="shape" transform="translate(633.086,-309.121)">
+ <title>Sheet.77</title>
+ <desc>Key7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="17.3237" cy="339.824" width="34.65" height="14.3829"/>
+ <path d="M34.65 332.63 L0 332.63 L0 347.02 L34.65 347.02 L34.65 332.63" class="st3"/>
+ <text x="3.72" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key7</text> </g>
+ <g id="shape78-195" v:mID="78" v:groupContext="shape" transform="translate(626.561,-286.954)">
+ <title>Sheet.78</title>
+ <path d="M0 336.65 L9.84 336.65 L9.84 326.28 L29.53 326.28 L29.53 336.65 L39.38 336.65 L19.69 347.02 L0 336.65 Z"
+ class="st5"/>
+ </g>
+ <g id="shape79-197" v:mID="79" v:groupContext="shape" transform="translate(610.653,-266.044)">
+ <title>Sheet.79</title>
+ <path d="M0 330.06 C-0 328.17 1.53 326.64 3.42 326.64 L68.64 326.64 C70.53 326.64 72.03 328.17 72.03 330.06 L72.03 343.63
+ C72.03 345.52 70.53 347.02 68.64 347.02 L3.42 347.02 C1.53 347.02 0 345.52 0 343.63 L0 330.06 Z"
+ class="st1"/>
+ </g>
+ <g id="shape80-199" v:mID="80" v:groupContext="shape" transform="translate(610.653,-266.044)">
+ <title>Sheet.80</title>
+ <path d="M0 330.06 C-0 328.17 1.53 326.64 3.42 326.64 L68.64 326.64 C70.53 326.64 72.03 328.17 72.03 330.06 L72.03 343.63
+ C72.03 345.52 70.53 347.02 68.64 347.02 L3.42 347.02 C1.53 347.02 0 345.52 0 343.63 L0 330.06 Z"
+ class="st2"/>
+ </g>
+ <g id="shape81-201" v:mID="81" v:groupContext="shape" transform="translate(633.689,-267.602)">
+ <title>Sheet.81</title>
+ <desc>hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="16.5866" cy="339.824" width="33.18" height="14.3829"/>
+ <path d="M33.17 332.63 L0 332.63 L0 347.02 L33.17 347.02 L33.17 332.63" class="st3"/>
+ <text x="3.63" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash</text> </g>
+ <g id="shape82-205" v:mID="82" v:groupContext="shape" transform="translate(601.889,-225.183)">
+ <title>Sheet.82</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.28 327.48 C89.08 327.48 90.52 328.95 90.52 330.75 L90.52 343.78
+ C90.52 345.58 89.08 347.02 87.28 347.02 L3.27 347.02 C1.47 347.02 0 345.58 0 343.78 L0 330.75 Z"
+ class="st1"/>
+ </g>
+ <g id="shape83-207" v:mID="83" v:groupContext="shape" transform="translate(601.889,-225.183)">
+ <title>Sheet.83</title>
+ <path d="M0 330.75 C0 328.95 1.47 327.48 3.27 327.48 L87.28 327.48 C89.08 327.48 90.52 328.95 90.52 330.75 L90.52 343.78
+ C90.52 345.58 89.08 347.02 87.28 347.02 L3.27 347.02 C1.47 347.02 0 345.58 0 343.78 L0 330.75 Z"
+ class="st2"/>
+ </g>
+ <g id="shape84-209" v:mID="84" v:groupContext="shape" transform="translate(610.969,-226.287)">
+ <title>Sheet.84</title>
+ <desc>0x0104DBCA</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="43.3615" cy="339.824" width="86.73" height="14.3829"/>
+ <path d="M86.72 332.63 L0 332.63 L0 347.02 L86.72 347.02 L86.72 332.63" class="st3"/>
+ <text x="7.12" y="343.42" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0104DBCA</text> </g>
+ <g id="shape85-213" v:mID="85" v:groupContext="shape" transform="translate(626.561,-244.775)">
+ <title>Sheet.85</title>
+ <path d="M0 336.71 L9.84 336.71 L9.84 326.4 L29.53 326.4 L29.53 336.71 L39.38 336.71 L19.69 347.02 L0 336.71 Z"
+ class="st5"/>
+ </g>
+ <g id="shape86-215" v:mID="86" v:groupContext="shape" transform="translate(623.619,-210.444)">
+ <title>Sheet.86</title>
+ <path d="M26.41 334.91 C26.41 338.27 25.96 340.96 25.42 340.96 L14.23 340.96 C13.69 340.96 13.21 343.69 13.21 347.02
+ C13.21 343.69 12.76 340.96 12.22 340.96 L1.02 340.96 C0.48 340.96 0 338.27 0 334.91" class="st8"/>
+ </g>
+ <g id="shape87-218" v:mID="87" v:groupContext="shape" transform="translate(242.323,-81.6288)">
+ <title>Sheet.87</title>
+ <path d="M0 281.23 L0 347.02 L41.18 347.02 L41.18 281.23 L0 281.23 L0 281.23 Z" class="st1"/>
+ </g>
+ <g id="shape88-220" v:mID="88" v:groupContext="shape" transform="translate(247.009,-81.6288)">
+ <title>Sheet.88</title>
+ <path d="M0 281.23 L41.18 281.23 L41.18 347.02 L0 347.02 L0 281.23" class="st9"/>
+ </g>
+ <g id="shape89-223" v:mID="89" v:groupContext="shape" transform="translate(245.254,-132.398)">
+ <title>Sheet.89</title>
+ <desc>0x0102</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.3211" cy="341.046" width="42.65" height="11.9384"/>
+ <path d="M42.64 335.08 L0 335.08 L0 347.02 L42.64 347.02 L42.64 335.08" class="st3"/>
+ <text x="4" y="344.03" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0102</text> </g>
+ <g id="shape90-227" v:mID="90" v:groupContext="shape" transform="translate(245.015,-82.7016)">
+ <title>Sheet.90</title>
+ <desc>4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.79425" cy="341.046" width="9.59" height="11.9384"/>
+ <path d="M9.59 335.08 L0 335.08 L0 347.02 L9.59 347.02 L9.59 335.08" class="st3"/>
+ <text x="1.84" y="344.03" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>4</text> </g>
+ <g id="shape91-231" v:mID="91" v:groupContext="shape" transform="translate(336.326,-81.6288)">
+ <title>Sheet.91</title>
+ <path d="M0 281.23 L0 347.02 L41.18 347.02 L41.18 281.23 L0 281.23 L0 281.23 Z" class="st1"/>
+ </g>
+ <g id="shape92-233" v:mID="92" v:groupContext="shape" transform="translate(339.598,-81.6288)">
+ <title>Sheet.92</title>
+ <path d="M0 281.23 L41.18 281.23 L41.18 347.02 L0 347.02 L0 281.23" class="st9"/>
+ </g>
+ <g id="shape93-236" v:mID="93" v:groupContext="shape" transform="translate(339.264,-132.398)">
+ <title>Sheet.93</title>
+ <desc>0x0103</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.3211" cy="341.046" width="42.65" height="11.9384"/>
+ <path d="M42.64 335.08 L0 335.08 L0 347.02 L42.64 347.02 L42.64 335.08" class="st3"/>
+ <text x="4" y="344.03" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0103</text> </g>
+ <g id="shape94-240" v:mID="94" v:groupContext="shape" transform="translate(339.024,-82.7016)">
+ <title>Sheet.94</title>
+ <desc>2</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.79425" cy="341.046" width="9.59" height="11.9384"/>
+ <path d="M9.59 335.08 L0 335.08 L0 347.02 L9.59 347.02 L9.59 335.08" class="st3"/>
+ <text x="1.84" y="344.03" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>2</text> </g>
+ <g id="shape95-244" v:mID="95" v:groupContext="shape" transform="translate(438.598,-81.5089)">
+ <title>Sheet.95</title>
+ <path d="M0 281.23 L0 347.02 L41.18 347.02 L41.18 281.23 L0 281.23 L0 281.23 Z" class="st1"/>
+ </g>
+ <g id="shape96-246" v:mID="96" v:groupContext="shape" transform="translate(438.598,-81.5089)">
+ <title>Sheet.96</title>
+ <path d="M0 281.23 L41.18 281.23 L41.18 347.02 L0 347.02 L0 281.23" class="st9"/>
+ </g>
+ <g id="shape97-249" v:mID="97" v:groupContext="shape" transform="translate(437.81,-132.27)">
+ <title>Sheet.97</title>
+ <desc>0x0104</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="21.3211" cy="341.046" width="42.65" height="11.9384"/>
+ <path d="M42.64 335.08 L0 335.08 L0 347.02 L42.64 347.02 L42.64 335.08" class="st3"/>
+ <text x="4" y="344.03" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>0x0104</text> </g>
+ <g id="shape98-253" v:mID="98" v:groupContext="shape" transform="translate(437.57,-82.5735)">
+ <title>Sheet.98</title>
+ <desc>1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="4.79425" cy="341.046" width="9.59" height="11.9384"/>
+ <path d="M9.59 335.08 L0 335.08 L0 347.02 L9.59 347.02 L9.59 335.08" class="st3"/>
+ <text x="1.84" y="344.03" class="st11" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>1</text> </g>
+ <g id="shape99-257" v:mID="99" v:groupContext="shape" transform="translate(53.5505,-147.924)">
+ <title>Sheet.99</title>
+ <path d="M0.59 283.52 L206.27 343.39 L205.7 345.34 L0 285.48 L0.59 283.52 L0.59 283.52 ZM205.85 341.14 L210.88 345.79
+ L204.14 347.02 L205.85 341.14 L205.85 341.14 Z" class="st12"/>
+ </g>
+ <g id="shape100-259" v:mID="100" v:groupContext="shape" transform="translate(151.516,-147.924)">
+ <title>Sheet.100</title>
+ <path d="M0.59 283.52 L202.41 343.41 L201.83 345.35 L0 285.48 L0.59 283.52 L0.59 283.52 ZM202.01 341.16 L207.01 345.83
+ L200.27 347.02 L202.01 341.16 L202.01 341.16 Z" class="st13"/>
+ </g>
+ <g id="shape101-261" v:mID="101" v:groupContext="shape" transform="translate(246.975,-147.37)">
+ <title>Sheet.101</title>
+ <path d="M2 283.72 L15.77 341.83 L13.79 342.3 L0 284.18 L2 283.72 L2 283.72 ZM17.53 340.36 L15.97 347.02 L11.57 341.77
+ L17.53 340.36 L17.53 340.36 Z" class="st12"/>
+ </g>
+ <g id="shape102-263" v:mID="102" v:groupContext="shape" transform="translate(262.972,-147.37)">
+ <title>Sheet.102</title>
+ <path d="M82.31 283.13 L3.45 343.12 L4.68 344.74 L83.54 284.76 L82.31 283.13 L82.31 283.13 ZM3.02 340.89 L0 347.02 L6.74
+ 345.74 L3.02 340.89 L3.02 340.89 Z" class="st12"/>
+ </g>
+ <g id="shape103-265" v:mID="103" v:groupContext="shape" transform="translate(358.537,-149.107)">
+ <title>Sheet.103</title>
+ <path d="M83.92 284.85 L3.53 343.2 L4.73 344.84 L85.12 286.5 L83.92 284.85 L83.92 284.85 ZM3.15 340.95 L0 347.02 L6.75
+ 345.89 L3.15 340.95 L3.15 340.95 Z" class="st13"/>
+ </g>
+ <g id="shape104-267" v:mID="104" v:groupContext="shape" transform="translate(264.413,-147.534)">
+ <title>Sheet.104</title>
+ <path d="M275.95 283 L4.77 343.27 L5.22 345.25 L276.37 285 L275.95 283 L275.95 283 ZM5.31 341.05 L0 345.37 L6.66 347.02
+ L5.31 341.05 L5.31 341.05 Z" class="st14"/>
+ </g>
+ <g id="shape105-269" v:mID="105" v:groupContext="shape" transform="translate(456.982,-148.103)">
+ <title>Sheet.105</title>
+ <path d="M179.48 283.72 L4.5 343.48 L5.16 345.43 L180.14 285.66 L179.48 283.72 L179.48 283.72 ZM4.8 341.23 L0 346.12
+ L6.81 347.02 L4.8 341.23 L4.8 341.23 Z" class="st15"/>
+ </g>
+ <g id="shape106-271" v:mID="106" v:groupContext="shape" transform="translate(335.628,-18)">
+ <title>Sheet.106</title>
+ <path d="M0 309.64 C0 305.52 2.99 302.16 6.65 302.16 L14.2 302.16 L8.01 284.85 L35.48 302.16 L78.47 302.16 C82.15 302.16
+ 85.12 305.52 85.12 309.64 L85.12 309.64 L85.12 320.85 L85.12 339.54 C85.12 343.68 82.15 347.02 78.47 347.02
+ L35.48 347.02 L14.2 347.02 L14.2 347.02 L6.65 347.02 C2.99 347.02 0 343.68 0 339.54 L0 320.85 L0 309.64
+ L0 309.64 Z" class="st5"/>
+ </g>
+ <g id="shape109-273" v:mID="109" v:groupContext="shape" transform="translate(157.564,-62.4234)">
+ <title>Sheet.109</title>
+ <path d="M16.21 347.02 C11.74 347.02 8.1 346.42 8.1 345.67 L8.1 303.49 C8.1 302.75 4.49 302.14 0 302.14 C4.49 302.14
+ 8.1 301.54 8.1 300.79 L8.1 258.61 C8.1 257.88 11.74 257.26 16.21 257.26" class="st7"/>
+ </g>
+ <g id="shape110-276" v:mID="110" v:groupContext="shape" transform="translate(113.844,-100.157)">
+ <title>Sheet.110</title>
+ <desc>Groups</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="20.2175" cy="341.046" width="40.44" height="11.9384"/>
+ <path d="M40.44 335.08 L0 335.08 L0 347.02 L40.44 347.02 L40.44 335.08" class="st3"/>
+ <text x="3.85" y="344.03" class="st16" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Groups</text> </g>
+ <g id="shape111-280" v:mID="111" v:groupContext="shape" transform="translate(196.718,-76.2186)">
+ <title>Sheet.111</title>
+ <path d="M0 331.97 C0 330.32 2.27 328.96 5.04 328.96 L37.61 328.96 L60.43 284.85 L53.72 328.96 L59.43 328.96 C62.22 328.96
+ 64.47 330.32 64.47 331.97 L64.47 331.97 L64.47 336.48 L64.47 344.01 C64.47 345.67 62.22 347.02 59.43 347.02
+ L53.72 347.02 L37.61 347.02 L37.61 347.02 L5.04 347.02 C2.27 347.02 0 345.67 0 344.01 L0 336.48 L0 331.97
+ L0 331.97 Z" class="st5"/>
+ </g>
+ <g id="shape112-282" v:mID="112" v:groupContext="shape" transform="translate(196.65,-80.2991)">
+ <title>Sheet.112</title>
+ <desc>group id</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="27.7691" cy="339.824" width="55.54" height="14.3829"/>
+ <path d="M55.54 332.63 L0 332.63 L0 347.02 L55.54 347.02 L55.54 332.63" class="st3"/>
+ <text x="5.09" y="343.42" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>group id</text> </g>
+ <g id="shape114-286" v:mID="114" v:groupContext="shape" transform="translate(506.433,-128.007)">
+ <title>Sheet.114</title>
+ <desc>-</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="6.63728" cy="336.229" width="13.28" height="21.5726"/>
+ <path d="M13.27 325.44 L0 325.44 L0 347.02 L13.27 347.02 L13.27 325.44" class="st3"/>
+ <text x="3.06" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>-</text> </g>
+ <g id="shape115-290" v:mID="115" v:groupContext="shape" transform="translate(529.004,-128.007)">
+ <title>Sheet.115</title>
+ <desc>Keys separated into</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="97.1729" cy="336.229" width="194.35" height="21.5726"/>
+ <path d="M194.35 325.44 L0 325.44 L0 347.02 L194.35 347.02 L194.35 325.44" class="st3"/>
+ <text x="17.06" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Keys separated into </text> </g>
+ <g id="shape116-294" v:mID="116" v:groupContext="shape" transform="translate(529.004,-106.438)">
+ <title>Sheet.116</title>
+ <desc>groups based on</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="83.1587" cy="336.229" width="166.32" height="21.5726"/>
+ <path d="M166.32 325.44 L0 325.44 L0 347.02 L166.32 347.02 L166.32 325.44" class="st3"/>
+ <text x="15.23" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>groups based on </text> </g>
+ <g id="shape117-298" v:mID="117" v:groupContext="shape" transform="translate(529.004,-84.869)">
+ <title>Sheet.117</title>
+ <desc>some bits from hash</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="97.731" cy="336.229" width="195.47" height="21.5726"/>
+ <path d="M195.46 325.44 L0 325.44 L0 347.02 L195.46 347.02 L195.46 325.44" class="st3"/>
+ <text x="14.94" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>some bits from hash</text> </g>
+ <g id="shape118-302" v:mID="118" v:groupContext="shape" transform="translate(506.433,-63.2999)">
+ <title>Sheet.118</title>
+ <desc>-</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="6.63728" cy="336.229" width="13.28" height="21.5726"/>
+ <path d="M13.27 325.44 L0 325.44 L0 347.02 L13.27 347.02 L13.27 325.44" class="st3"/>
+ <text x="3.06" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>-</text> </g>
+ <g id="shape119-306" v:mID="119" v:groupContext="shape" transform="translate(529.004,-63.2999)">
+ <title>Sheet.119</title>
+ <desc>Groups contain a</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="84.2539" cy="336.229" width="168.51" height="21.5726"/>
+ <path d="M168.51 325.44 L0 325.44 L0 347.02 L168.51 347.02 L168.51 325.44" class="st3"/>
+ <text x="15.38" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Groups contain a </text> </g>
+ <g id="shape120-310" v:mID="120" v:groupContext="shape" transform="translate(529.004,-41.7308)">
+ <title>Sheet.120</title>
+ <desc>small number of</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="81.4635" cy="336.229" width="162.93" height="21.5726"/>
+ <path d="M162.93 325.44 L0 325.44 L0 347.02 L162.93 347.02 L162.93 325.44" class="st3"/>
+ <text x="15.01" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>small number of </text> </g>
+ <g id="shape121-314" v:mID="121" v:groupContext="shape" transform="translate(529.004,-20.1617)">
+ <title>Sheet.121</title>
+ <desc>keys (&#60;28)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="50.4481" cy="336.229" width="100.9" height="21.5726"/>
+ <path d="M100.9 325.44 L0 325.44 L0 347.02 L100.9 347.02 L100.9 325.44" class="st3"/>
+ <text x="8.77" y="341.62" class="st18" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>keys (&#60;28)</text> </g>
+ <g id="shape122-318" v:mID="122" v:groupContext="shape" transform="translate(19.1996,-146.276)">
+ <title>Sheet.122</title>
+ <path d="M0 310.17 C-0 306.1 3.62 302.8 8.07 302.8 L14.46 302.8 L29.68 282.28 L36.14 302.8 L78.65 302.8 C83.11 302.8
+ 86.72 306.1 86.72 310.17 L86.72 310.17 L86.72 321.22 L86.72 339.65 C86.72 343.72 83.11 347.02 78.65 347.02
+ L36.14 347.02 L14.46 347.02 L14.46 347.02 L8.07 347.02 C3.62 347.02 0 343.72 0 339.65 L0 321.22 L0 310.17
+ L0 310.17 Z" class="st5"/>
+ </g>
+ <g id="shape123-320" v:mID="123" v:groupContext="shape" transform="translate(41.9777,-174.053)">
+ <title>Sheet.123</title>
+ <desc>Group</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="22.8289" cy="339.824" width="45.66" height="14.3829"/>
+ <path d="M45.66 332.63 L0 332.63 L0 347.02 L45.66 347.02 L45.66 332.63" class="st3"/>
+ <text x="5.9" y="343.42" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group </text> </g>
+ <g id="shape124-324" v:mID="124" v:groupContext="shape" transform="translate(34.4142,-159.674)">
+ <title>Sheet.124</title>
+ <desc>Identifier</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="31.5173" cy="339.824" width="63.04" height="14.3829"/>
+ <path d="M63.03 332.63 L0 332.63 L0 347.02 L63.03 347.02 L63.03 332.63" class="st3"/>
+ <text x="7.04" y="343.42" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Identifier </text> </g>
+ <g id="shape125-328" v:mID="125" v:groupContext="shape" transform="translate(28.7716,-145.295)">
+ <title>Sheet.125</title>
+ <desc>(simplified)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="36.2165" cy="339.824" width="72.44" height="14.3829"/>
+ <path d="M72.43 332.63 L0 332.63 L0 347.02 L72.43 347.02 L72.43 332.63" class="st3"/>
+ <text x="6.19" y="343.42" class="st17" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(simplified)</text> </g>
+ <g id="shape127-332" v:mID="127" v:groupContext="shape" transform="translate(517.688,-71.2991)">
+ <title>Sheet.127</title>
+ <desc>Keys separated into groups based on some bits from hash. Grou...</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="112.5" cy="302.139" width="225" height="89.7513"/>
+ <g id="shadow127-333" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st19">
+ <rect x="0" y="257.264" width="225" height="89.7513" class="st20"/>
+ </g>
+ <rect x="0" y="257.264" width="225" height="89.7513" class="st21"/>
+ <text x="4" y="281.09" class="st22" v:langID="1033"><v:paragraph v:indentFirst="-18" v:indentLeft="18" v:bullet="1"/><v:tabList/><tspan
+ class="st23" v:isBullet="true">·</tspan> <tspan class="st24">Keys separated into groups based </tspan><tspan
+ x="22" dy="1.204em" class="st24">on some bits from hash</tspan><tspan class="st24">.<v:newlineChar/></tspan><tspan
+ x="4" dy="1.211em" class="st23" v:isBullet="true">·</tspan> <tspan class="st24">Groups contain a small number of </tspan><tspan
+ x="22" dy="1.204em" class="st24">keys </tspan><tspan class="st24">(</tspan><tspan class="st24">&#60;</tspan><tspan
+ class="st24">28</tspan><tspan class="st24">)</tspan></text> </g>
+ <g id="shape129-349" v:mID="129" v:groupContext="shape" transform="translate(336.326,-26.2991)">
+ <title>Sheet.129</title>
+ <desc>Total # of keys in group so far</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="39.6784" cy="333.515" width="79.36" height="27"/>
+ <rect x="0" y="320.015" width="79.3567" height="27" class="st25"/>
+ <text x="4.5" y="329.92" class="st26" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Total # of keys <tspan
+ x="4.39" dy="1.2em" class="st23">in group so far</tspan></text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i8.svg b/doc/guides/prog_guide/img/efd_i8.svg
new file mode 100644
index 00000000..d0fd463a
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i8.svg
@@ -0,0 +1,182 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i9.svg Page-2 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="4.98372in" height="2.08442in"
+ viewBox="0 0 358.828 150.078" xml:space="preserve" color-interpolation-filters="sRGB" class="st8">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st2 {stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st3 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st4 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st5 {fill:#000000;font-family:Intel Clear;font-size:0.998566em}
+ .st6 {fill:#c00000;font-family:Intel Clear;font-size:0.828804em;font-weight:bold}
+ .st7 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st8 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="4" v:index="2" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-2</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape4-1" v:mID="4" v:groupContext="shape" transform="translate(206.306,-19.0195)">
+ <title>Sheet.4</title>
+ <path d="M0 38.04 L0 150.08 L133.5 150.08 L133.5 38.04 L0 38.04 L0 38.04 Z" class="st1"/>
+ </g>
+ <g id="shape5-3" v:mID="5" v:groupContext="shape" transform="translate(206.306,-19.0195)">
+ <title>Sheet.5</title>
+ <path d="M0 38.04 L133.5 38.04 L133.5 150.08 L0 150.08 L0 38.04" class="st2"/>
+ </g>
+ <g id="shape6-6" v:mID="6" v:groupContext="shape" transform="translate(215.55,-70.7853)">
+ <title>Sheet.6</title>
+ <path d="M0 121.92 C0 118.82 2.54 116.29 5.64 116.29 L110.69 116.29 C113.81 116.29 116.33 118.82 116.33 121.92 L116.33
+ 144.45 C116.33 147.56 113.81 150.08 110.69 150.08 L5.64 150.08 C2.54 150.08 0 147.56 0 144.45 L0 121.92
+ Z" class="st1"/>
+ </g>
+ <g id="shape7-8" v:mID="7" v:groupContext="shape" transform="translate(215.55,-70.7853)">
+ <title>Sheet.7</title>
+ <path d="M0 121.92 C0 118.82 2.54 116.29 5.64 116.29 L110.69 116.29 C113.81 116.29 116.33 118.82 116.33 121.92 L116.33
+ 144.45 C116.33 147.56 113.81 150.08 110.69 150.08 L5.64 150.08 C2.54 150.08 0 147.56 0 144.45 L0 121.92
+ Z" class="st3"/>
+ </g>
+ <g id="shape8-10" v:mID="8" v:groupContext="shape" transform="translate(242.756,-86.1914)">
+ <title>Sheet.8</title>
+ <desc>hash_index</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="36.9951" cy="142.887" width="74" height="14.3829"/>
+ <path d="M73.99 135.7 L0 135.7 L0 150.08 L73.99 150.08 L73.99 135.7" class="st4"/>
+ <text x="6.29" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash_index</text> </g>
+ <g id="shape9-14" v:mID="9" v:groupContext="shape" transform="translate(229.67,-71.812)">
+ <title>Sheet.9</title>
+ <desc>(integer, 16 bits)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="52.0635" cy="142.887" width="104.13" height="14.3829"/>
+ <path d="M104.13 135.7 L0 135.7 L0 150.08 L104.13 150.08 L104.13 135.7" class="st4"/>
+ <text x="8.25" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(integer, 16 bits)</text> </g>
+ <g id="shape10-18" v:mID="10" v:groupContext="shape" transform="translate(215.55,-27.1678)">
+ <title>Sheet.10</title>
+ <path d="M0 121.92 C0 118.82 2.54 116.29 5.64 116.29 L110.69 116.29 C113.81 116.29 116.33 118.82 116.33 121.92 L116.33
+ 144.45 C116.33 147.56 113.81 150.08 110.69 150.08 L5.64 150.08 C2.54 150.08 0 147.56 0 144.45 L0 121.92
+ Z" class="st1"/>
+ </g>
+ <g id="shape11-20" v:mID="11" v:groupContext="shape" transform="translate(215.55,-27.1678)">
+ <title>Sheet.11</title>
+ <path d="M0 121.92 C0 118.82 2.54 116.29 5.64 116.29 L110.69 116.29 C113.81 116.29 116.33 118.82 116.33 121.92 L116.33
+ 144.45 C116.33 147.56 113.81 150.08 110.69 150.08 L5.64 150.08 C2.54 150.08 0 147.56 0 144.45 L0 121.92
+ Z" class="st3"/>
+ </g>
+ <g id="shape12-22" v:mID="12" v:groupContext="shape" transform="translate(237.836,-42.6033)">
+ <title>Sheet.12</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="42.693" cy="142.887" width="85.39" height="14.3829"/>
+ <path d="M85.39 135.7 L0 135.7 L0 150.08 L85.39 150.08 L85.39 135.7" class="st4"/>
+ <text x="7.03" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape13-26" v:mID="13" v:groupContext="shape" transform="translate(251.643,-28.2239)">
+ <title>Sheet.13</title>
+ <desc>(16 bits)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="26.9562" cy="142.887" width="53.92" height="14.3829"/>
+ <path d="M53.91 135.7 L0 135.7 L0 150.08 L53.91 150.08 L53.91 135.7" class="st4"/>
+ <text x="4.98" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(16 bits)</text> </g>
+ <g id="shape14-30" v:mID="14" v:groupContext="shape" transform="translate(213.473,-114.303)">
+ <title>Sheet.14</title>
+ <desc>Group ID: 0x0102</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="47.976" cy="144.109" width="95.96" height="11.9384"/>
+ <path d="M95.95 138.14 L0 138.14 L0 150.08 L95.95 150.08 L95.95 138.14" class="st4"/>
+ <text x="7.47" y="147.09" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group ID: 0x0102</text> </g>
+ <g id="shape15-34" v:mID="15" v:groupContext="shape" transform="translate(19.0195,-99.4242)">
+ <title>Sheet.15</title>
+ <path d="M0 129.31 C0 127.02 1.87 125.15 4.16 125.15 L109.18 125.15 C111.47 125.15 113.33 127.02 113.33 129.31 L113.33
+ 145.93 C113.33 148.22 111.47 150.08 109.18 150.08 L4.16 150.08 C1.87 150.08 0 148.22 0 145.93 L0 129.31
+ Z" class="st1"/>
+ </g>
+ <g id="shape16-36" v:mID="16" v:groupContext="shape" transform="translate(19.0195,-99.4242)">
+ <title>Sheet.16</title>
+ <path d="M0 129.31 C0 127.02 1.87 125.15 4.16 125.15 L109.18 125.15 C111.47 125.15 113.33 127.02 113.33 129.31 L113.33
+ 145.93 C113.33 148.22 111.47 150.08 109.18 150.08 L4.16 150.08 C1.87 150.08 0 148.22 0 145.93 L0 129.31
+ Z" class="st3"/>
+ </g>
+ <g id="shape17-38" v:mID="17" v:groupContext="shape" transform="translate(33.9485,-103.285)">
+ <title>Sheet.17</title>
+ <desc>Key1: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="49.6862" cy="142.887" width="99.38" height="14.3829"/>
+ <path d="M99.37 135.7 L0 135.7 L0 150.08 L99.37 150.08 L99.37 135.7" class="st4"/>
+ <text x="7.94" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key1: Value = 0</text> </g>
+ <g id="shape18-42" v:mID="18" v:groupContext="shape" transform="translate(19.0195,-74.6198)">
+ <title>Sheet.18</title>
+ <path d="M0 129.31 C0 127.02 1.87 125.15 4.16 125.15 L109.18 125.15 C111.47 125.15 113.33 127.02 113.33 129.31 L113.33
+ 145.93 C113.33 148.22 111.47 150.08 109.18 150.08 L4.16 150.08 C1.87 150.08 0 148.22 0 145.93 L0 129.31
+ Z" class="st1"/>
+ </g>
+ <g id="shape19-44" v:mID="19" v:groupContext="shape" transform="translate(19.0195,-74.6198)">
+ <title>Sheet.19</title>
+ <path d="M0 129.31 C0 127.02 1.87 125.15 4.16 125.15 L109.18 125.15 C111.47 125.15 113.33 127.02 113.33 129.31 L113.33
+ 145.93 C113.33 148.22 111.47 150.08 109.18 150.08 L4.16 150.08 C1.87 150.08 0 148.22 0 145.93 L0 129.31
+ Z" class="st3"/>
+ </g>
+ <g id="shape20-46" v:mID="20" v:groupContext="shape" transform="translate(33.9485,-78.4626)">
+ <title>Sheet.20</title>
+ <desc>Key3: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="49.6862" cy="142.887" width="99.38" height="14.3829"/>
+ <path d="M99.37 135.7 L0 135.7 L0 150.08 L99.37 150.08 L99.37 135.7" class="st4"/>
+ <text x="7.94" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key3: Value = 1</text> </g>
+ <g id="shape21-50" v:mID="21" v:groupContext="shape" transform="translate(19.0195,-49.5757)">
+ <title>Sheet.21</title>
+ <path d="M0 129.21 C0 126.91 1.88 125.03 4.19 125.03 L109.15 125.03 C111.46 125.03 113.33 126.91 113.33 129.21 L113.33
+ 145.91 C113.33 148.21 111.46 150.08 109.15 150.08 L4.19 150.08 C1.88 150.08 0 148.21 0 145.91 L0 129.21
+ Z" class="st1"/>
+ </g>
+ <g id="shape22-52" v:mID="22" v:groupContext="shape" transform="translate(19.0195,-49.5757)">
+ <title>Sheet.22</title>
+ <path d="M0 129.21 C0 126.91 1.88 125.03 4.19 125.03 L109.15 125.03 C111.46 125.03 113.33 126.91 113.33 129.21 L113.33
+ 145.91 C113.33 148.21 111.46 150.08 109.15 150.08 L4.19 150.08 C1.88 150.08 0 148.21 0 145.91 L0 129.21
+ Z" class="st3"/>
+ </g>
+ <g id="shape23-54" v:mID="23" v:groupContext="shape" transform="translate(33.9485,-53.4903)">
+ <title>Sheet.23</title>
+ <desc>Key4: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="49.6862" cy="142.887" width="99.38" height="14.3829"/>
+ <path d="M99.37 135.7 L0 135.7 L0 150.08 L99.37 150.08 L99.37 135.7" class="st4"/>
+ <text x="7.94" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key4: Value = 0</text> </g>
+ <g id="shape24-58" v:mID="24" v:groupContext="shape" transform="translate(19.0195,-25.0109)">
+ <title>Sheet.24</title>
+ <path d="M0 129.21 C0 126.91 1.88 125.03 4.19 125.03 L109.15 125.03 C111.46 125.03 113.33 126.91 113.33 129.21 L113.33
+ 145.91 C113.33 148.21 111.46 150.08 109.15 150.08 L4.19 150.08 C1.88 150.08 0 148.21 0 145.91 L0 129.21
+ Z" class="st1"/>
+ </g>
+ <g id="shape25-60" v:mID="25" v:groupContext="shape" transform="translate(19.0195,-25.0109)">
+ <title>Sheet.25</title>
+ <path d="M0 129.21 C0 126.91 1.88 125.03 4.19 125.03 L109.15 125.03 C111.46 125.03 113.33 126.91 113.33 129.21 L113.33
+ 145.91 C113.33 148.21 111.46 150.08 109.15 150.08 L4.19 150.08 C1.88 150.08 0 148.21 0 145.91 L0 129.21
+ Z" class="st3"/>
+ </g>
+ <g id="shape26-62" v:mID="26" v:groupContext="shape" transform="translate(33.9485,-28.927)">
+ <title>Sheet.26</title>
+ <desc>Key7: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="49.6862" cy="142.887" width="99.38" height="14.3829"/>
+ <path d="M99.37 135.7 L0 135.7 L0 150.08 L99.37 150.08 L99.37 135.7" class="st4"/>
+ <text x="7.94" y="146.48" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key7: Value = 1</text> </g>
+ <g id="shape27-66" v:mID="27" v:groupContext="shape" transform="translate(141.536,-51.5529)">
+ <title>Sheet.27</title>
+ <path d="M0 115.39 L22.75 115.39 L22.75 103.82 L45.5 126.95 L22.75 150.08 L22.75 138.51 L0 138.51 L0 115.39 Z"
+ class="st7"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/efd_i9.svg b/doc/guides/prog_guide/img/efd_i9.svg
new file mode 100644
index 00000000..b2e385d1
--- /dev/null
+++ b/doc/guides/prog_guide/img/efd_i9.svg
@@ -0,0 +1,390 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i10.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="9.8125in" height="3.76365in"
+ viewBox="0 0 706.5 270.983" xml:space="preserve" color-interpolation-filters="sRGB" class="st9">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#ffffff;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st2 {fill:none;stroke:#00aeef;stroke-linecap:round;stroke-linejoin:round;stroke-width:2.03901}
+ .st3 {stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st4 {fill:#000000;font-family:Arial;font-size:0.998566em}
+ .st5 {fill:#000000;font-family:Arial;font-size:0.918686em}
+ .st6 {fill:#0071c5;stroke:none;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.75}
+ .st7 {fill:#ffffff;font-family:Arial;font-size:0.998566em}
+ .st8 {fill:#ffffff;font-family:Arial;font-size:1.49785em}
+ .st9 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape68-1" v:mID="68" v:groupContext="shape" transform="translate(196.523,-158.978)">
+ <title>Sheet.68</title>
+ <path d="M0 250.22 C0 247.95 1.89 246.06 4.17 246.06 L317.25 246.06 C319.53 246.06 321.39 247.95 321.39 250.22 L321.39
+ 266.85 C321.39 269.13 319.53 270.98 317.25 270.98 L4.17 270.98 C1.89 270.98 0 269.13 0 266.85 L0 250.22
+ Z" class="st1"/>
+ </g>
+ <g id="shape69-3" v:mID="69" v:groupContext="shape" transform="translate(196.523,-158.978)">
+ <title>Sheet.69</title>
+ <path d="M0 250.22 C0 247.95 1.89 246.06 4.17 246.06 L317.25 246.06 C319.53 246.06 321.39 247.95 321.39 250.22 L321.39
+ 266.85 C321.39 269.13 319.53 270.98 317.25 270.98 L4.17 270.98 C1.89 270.98 0 269.13 0 266.85 L0 250.22
+ Z" class="st2"/>
+ </g>
+ <g id="shape70-5" v:mID="70" v:groupContext="shape" transform="translate(186.139,-162.437)">
+ <title>Sheet.70</title>
+ <desc>(hash(key, seed1) + hash_index *</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="106.671" cy="263.792" width="213.35" height="14.3829"/>
+ <path d="M213.34 256.6 L0 256.6 L0 270.98 L213.34 270.98 L213.34 256.6" class="st3"/>
+ <text x="17.24" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>(hash(key, seed1) + hash_index * </text> </g>
+ <g id="shape71-9" v:mID="71" v:groupContext="shape" transform="translate(381.48,-162.845)">
+ <title>Sheet.71</title>
+ <desc>hash(key</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="27.4843" cy="264.367" width="54.97" height="13.2327"/>
+ <path d="M54.97 257.75 L0 257.75 L0 270.98 L54.97 270.98 L54.97 257.75" class="st3"/>
+ <text x="5.12" y="267.67" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash(key</text> </g>
+ <g id="shape72-13" v:mID="72" v:groupContext="shape" transform="translate(424.755,-162.437)">
+ <title>Sheet.72</title>
+ <desc>, seed2)) % 16</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="46.7254" cy="263.792" width="93.46" height="14.3829"/>
+ <path d="M93.45 256.6 L0 256.6 L0 270.98 L93.45 270.98 L93.45 256.6" class="st3"/>
+ <text x="7.76" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>, seed2)) % 16</text> </g>
+ <g id="shape73-17" v:mID="73" v:groupContext="shape" transform="translate(524.094,-148.373)">
+ <title>Sheet.73</title>
+ <path d="M0 236.29 L22.75 236.29 L22.75 224.73 L45.5 247.86 L22.75 270.98 L22.75 259.42 L0 259.42 L0 236.29 Z"
+ class="st6"/>
+ </g>
+ <g id="shape74-19" v:mID="74" v:groupContext="shape" transform="translate(574.148,-217.574)">
+ <title>Sheet.74</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st1"/>
+ </g>
+ <g id="shape75-21" v:mID="75" v:groupContext="shape" transform="translate(574.148,-217.574)">
+ <title>Sheet.75</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st2"/>
+ </g>
+ <g id="shape76-23" v:mID="76" v:groupContext="shape" transform="translate(584.296,-231.499)">
+ <title>Sheet.76</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="40.993" cy="263.792" width="81.99" height="14.3829"/>
+ <path d="M81.99 256.6 L0 256.6 L0 270.98 L81.99 270.98 L81.99 256.6" class="st3"/>
+ <text x="7.01" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape77-27" v:mID="77" v:groupContext="shape" transform="translate(655.369,-231.499)">
+ <title>Sheet.77</title>
+ <desc>bit</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="11.1076" cy="263.792" width="22.22" height="14.3829"/>
+ <path d="M22.22 256.6 L0 256.6 L0 270.98 L22.22 270.98 L22.22 256.6" class="st3"/>
+ <text x="4.78" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit </text> </g>
+ <g id="shape78-31" v:mID="78" v:groupContext="shape" transform="translate(588.858,-217.12)">
+ <title>Sheet.78</title>
+ <desc>index for key1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="44.8113" cy="263.792" width="89.63" height="14.3829"/>
+ <path d="M89.62 256.6 L0 256.6 L0 270.98 L89.62 270.98 L89.62 256.6" class="st3"/>
+ <text x="7.51" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>index for key1</text> </g>
+ <g id="shape79-35" v:mID="79" v:groupContext="shape" transform="translate(573.548,-178.869)">
+ <title>Sheet.79</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st1"/>
+ </g>
+ <g id="shape80-37" v:mID="80" v:groupContext="shape" transform="translate(573.548,-178.869)">
+ <title>Sheet.80</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st2"/>
+ </g>
+ <g id="shape81-39" v:mID="81" v:groupContext="shape" transform="translate(584.296,-192.768)">
+ <title>Sheet.81</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="40.993" cy="263.792" width="81.99" height="14.3829"/>
+ <path d="M81.99 256.6 L0 256.6 L0 270.98 L81.99 270.98 L81.99 256.6" class="st3"/>
+ <text x="7.01" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape82-43" v:mID="82" v:groupContext="shape" transform="translate(655.369,-192.768)">
+ <title>Sheet.82</title>
+ <desc>bit</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="11.1076" cy="263.792" width="22.22" height="14.3829"/>
+ <path d="M22.22 256.6 L0 256.6 L0 270.98 L22.22 270.98 L22.22 256.6" class="st3"/>
+ <text x="4.78" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit </text> </g>
+ <g id="shape83-47" v:mID="83" v:groupContext="shape" transform="translate(588.858,-178.388)">
+ <title>Sheet.83</title>
+ <desc>index for key3</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="44.8113" cy="263.792" width="89.63" height="14.3829"/>
+ <path d="M89.62 256.6 L0 256.6 L0 270.98 L89.62 270.98 L89.62 256.6" class="st3"/>
+ <text x="7.51" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>index for key3</text> </g>
+ <g id="shape84-51" v:mID="84" v:groupContext="shape" transform="translate(574.148,-139.326)">
+ <title>Sheet.84</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st1"/>
+ </g>
+ <g id="shape85-53" v:mID="85" v:groupContext="shape" transform="translate(574.148,-139.326)">
+ <title>Sheet.85</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st2"/>
+ </g>
+ <g id="shape86-55" v:mID="86" v:groupContext="shape" transform="translate(584.296,-153.227)">
+ <title>Sheet.86</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="40.993" cy="263.792" width="81.99" height="14.3829"/>
+ <path d="M81.99 256.6 L0 256.6 L0 270.98 L81.99 270.98 L81.99 256.6" class="st3"/>
+ <text x="7.01" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape87-59" v:mID="87" v:groupContext="shape" transform="translate(655.369,-153.227)">
+ <title>Sheet.87</title>
+ <desc>bit</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="11.1076" cy="263.792" width="22.22" height="14.3829"/>
+ <path d="M22.22 256.6 L0 256.6 L0 270.98 L22.22 270.98 L22.22 256.6" class="st3"/>
+ <text x="4.78" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit </text> </g>
+ <g id="shape88-63" v:mID="88" v:groupContext="shape" transform="translate(588.858,-138.848)">
+ <title>Sheet.88</title>
+ <desc>index for key4</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="44.8113" cy="263.792" width="89.63" height="14.3829"/>
+ <path d="M89.62 256.6 L0 256.6 L0 270.98 L89.62 270.98 L89.62 256.6" class="st3"/>
+ <text x="7.51" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>index for key4</text> </g>
+ <g id="shape89-67" v:mID="89" v:groupContext="shape" transform="translate(574.148,-100.622)">
+ <title>Sheet.89</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st1"/>
+ </g>
+ <g id="shape90-69" v:mID="90" v:groupContext="shape" transform="translate(574.148,-100.622)">
+ <title>Sheet.90</title>
+ <path d="M0 244.83 C-0 241.95 2.37 239.59 5.25 239.59 L108.11 239.59 C110.99 239.59 113.33 241.95 113.33 244.83 L113.33
+ 265.77 C113.33 268.65 110.99 270.98 108.11 270.98 L5.25 270.98 C2.37 270.98 0 268.65 0 265.77 L0 244.83
+ Z" class="st2"/>
+ </g>
+ <g id="shape91-71" v:mID="91" v:groupContext="shape" transform="translate(584.296,-114.496)">
+ <title>Sheet.91</title>
+ <desc>lookup_table</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="40.993" cy="263.792" width="81.99" height="14.3829"/>
+ <path d="M81.99 256.6 L0 256.6 L0 270.98 L81.99 270.98 L81.99 256.6" class="st3"/>
+ <text x="7.01" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>lookup_table</text> </g>
+ <g id="shape92-75" v:mID="92" v:groupContext="shape" transform="translate(655.369,-114.496)">
+ <title>Sheet.92</title>
+ <desc>bit</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="11.1076" cy="263.792" width="22.22" height="14.3829"/>
+ <path d="M22.22 256.6 L0 256.6 L0 270.98 L22.22 270.98 L22.22 256.6" class="st3"/>
+ <text x="4.78" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit </text> </g>
+ <g id="shape93-79" v:mID="93" v:groupContext="shape" transform="translate(588.858,-100.117)">
+ <title>Sheet.93</title>
+ <desc>index for key7</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="44.8113" cy="263.792" width="89.63" height="14.3829"/>
+ <path d="M89.62 256.6 L0 256.6 L0 270.98 L89.62 270.98 L89.62 256.6" class="st3"/>
+ <text x="7.51" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>index for key7</text> </g>
+ <g id="shape94-83" v:mID="94" v:groupContext="shape" transform="translate(205.227,-191.137)">
+ <title>Sheet.94</title>
+ <path d="M0 217.76 C0 213 3.87 209.14 8.64 209.14 L14.53 209.14 L14.53 209.14 L36.32 209.14 L78.52 209.14 C83.3 209.14
+ 87.16 213 87.16 217.76 L87.16 239.33 L87.16 239.33 L87.16 252.27 L87.16 252.27 C87.16 257.05 83.3 260.9
+ 78.52 260.9 L36.32 260.9 L18.46 270.98 L14.53 260.9 L8.64 260.9 C3.87 260.9 0 257.05 0 252.27 L0 239.33
+ L0 239.33 L0 217.76 Z" class="st6"/>
+ </g>
+ <g id="shape95-85" v:mID="95" v:groupContext="shape" transform="translate(214.98,-225.215)">
+ <title>Sheet.95</title>
+ <desc>CRC32 (32</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="38.2947" cy="263.792" width="76.59" height="14.3829"/>
+ <path d="M76.59 256.6 L0 256.6 L0 270.98 L76.59 270.98 L76.59 256.6" class="st3"/>
+ <text x="8.33" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>CRC32 (32 </text> </g>
+ <g id="shape96-89" v:mID="96" v:groupContext="shape" transform="translate(222.123,-210.835)">
+ <title>Sheet.96</title>
+ <desc>bit output)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="32.5584" cy="263.792" width="65.12" height="14.3829"/>
+ <path d="M65.12 256.6 L0 256.6 L0 270.98 L65.12 270.98 L65.12 256.6" class="st3"/>
+ <text x="5.91" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit output)</text> </g>
+ <g id="shape97-93" v:mID="97" v:groupContext="shape" transform="translate(305.473,-188.366)">
+ <title>Sheet.97</title>
+ <path d="M0 226.84 C0 223.28 2.9 220.39 6.47 220.39 L21.37 220.39 L21.37 220.39 L53.42 220.39 L121.77 220.39 C125.34
+ 220.39 128.22 223.28 128.22 226.84 L128.22 242.97 L128.22 242.97 L128.22 252.65 L128.22 252.65 C128.22 256.21
+ 125.34 259.09 121.77 259.09 L53.42 259.09 L38.73 270.98 L21.37 259.09 L6.47 259.09 C2.9 259.09 0 256.21
+ 0 252.65 L0 242.97 L0 242.97 L0 226.84 Z" class="st6"/>
+ </g>
+ <g id="shape98-95" v:mID="98" v:groupContext="shape" transform="translate(318.48,-217.733)">
+ <title>Sheet.98</title>
+ <desc>Goal: Find a valid</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="57.4478" cy="263.792" width="114.9" height="14.3829"/>
+ <path d="M114.9 256.6 L0 256.6 L0 270.98 L114.9 270.98 L114.9 256.6" class="st3"/>
+ <text x="10.82" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Goal: Find a valid </text> </g>
+ <g id="shape99-99" v:mID="99" v:groupContext="shape" transform="translate(339.077,-203.354)">
+ <title>Sheet.99</title>
+ <desc>hash_index</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="37.1611" cy="263.792" width="74.33" height="14.3829"/>
+ <path d="M74.32 256.6 L0 256.6 L0 270.98 L74.32 270.98 L74.32 256.6" class="st3"/>
+ <text x="6.51" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>hash_index</text> </g>
+ <g id="shape100-103" v:mID="100" v:groupContext="shape" transform="translate(438.135,-185.939)">
+ <title>Sheet.100</title>
+ <path d="M0 217.36 C0 213.8 2.91 210.89 6.48 210.89 L21.37 210.89 L21.37 210.89 L53.42 210.89 L121.77 210.89 C125.34
+ 210.89 128.22 213.8 128.22 217.36 L128.22 233.48 L128.22 233.48 L128.22 243.15 L128.22 243.15 C128.22 246.72
+ 125.34 249.59 121.77 249.59 L53.42 249.59 L54.75 270.98 L21.37 249.59 L6.48 249.59 C2.91 249.59 0 246.72
+ 0 243.15 L0 233.48 L0 233.48 L0 217.36 Z" class="st6"/>
+ </g>
+ <g id="shape101-105" v:mID="101" v:groupContext="shape" transform="translate(448.763,-224.802)">
+ <title>Sheet.101</title>
+ <desc>Lookup Table has</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="58.6085" cy="263.792" width="117.22" height="14.3829"/>
+ <path d="M117.22 256.6 L0 256.6 L0 270.98 L117.22 270.98 L117.22 256.6" class="st3"/>
+ <text x="10.98" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Lookup Table has </text> </g>
+ <g id="shape102-109" v:mID="102" v:groupContext="shape" transform="translate(484.549,-210.423)">
+ <title>Sheet.102</title>
+ <desc>16 bits</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="22.2166" cy="263.792" width="44.44" height="14.3829"/>
+ <path d="M44.43 256.6 L0 256.6 L0 270.98 L44.43 270.98 L44.43 256.6" class="st3"/>
+ <text x="4.56" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>16 bits</text> </g>
+ <g id="shape103-113" v:mID="103" v:groupContext="shape" transform="translate(369.583,-90.8555)">
+ <title>Sheet.103</title>
+ <path d="M0 227.76 C0 222.98 3.89 219.1 8.67 219.1 L14.53 219.1 L34.47 205.09 L36.32 219.1 L78.5 219.1 C83.29 219.1 87.16
+ 222.98 87.16 227.76 L87.16 227.76 L87.16 240.73 L87.16 262.34 C87.16 267.12 83.29 270.98 78.5 270.98 L36.32
+ 270.98 L14.53 270.98 L14.53 270.98 L8.67 270.98 C3.89 270.98 0 267.12 0 262.34 L0 240.73 L0 227.76 L0 227.76
+ Z" class="st6"/>
+ </g>
+ <g id="shape104-115" v:mID="104" v:groupContext="shape" transform="translate(383.264,-114.932)">
+ <title>Sheet.104</title>
+ <desc>CRC32 (32</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="38.2947" cy="263.792" width="76.59" height="14.3829"/>
+ <path d="M76.59 256.6 L0 256.6 L0 270.98 L76.59 270.98 L76.59 256.6" class="st3"/>
+ <text x="8.33" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>CRC32 (32 </text> </g>
+ <g id="shape105-119" v:mID="105" v:groupContext="shape" transform="translate(386.505,-100.553)">
+ <title>Sheet.105</title>
+ <desc>bit output)</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="32.5584" cy="263.792" width="65.12" height="14.3829"/>
+ <path d="M65.12 256.6 L0 256.6 L0 270.98 L65.12 270.98 L65.12 256.6" class="st3"/>
+ <text x="5.91" y="267.39" class="st7" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>bit output)</text> </g>
+ <g id="shape106-123" v:mID="106" v:groupContext="shape" transform="translate(313.397,-18)">
+ <title>Sheet.106</title>
+ <path d="M0 226.35 C0 221.43 4.02 217.42 8.94 217.42 L347.02 217.42 C351.97 217.42 355.96 221.43 355.96 226.35 L355.96
+ 262.06 C355.96 267 351.97 270.98 347.02 270.98 L8.94 270.98 C4.02 270.98 0 267 0 262.06 L0 226.35 Z"
+ class="st6"/>
+ </g>
+ <g id="shape107-125" v:mID="107" v:groupContext="shape" transform="translate(313.98,-41.963)">
+ <title>Sheet.107</title>
+ <desc>Goal is to find a hash_index that produces</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="177.75" cy="260.197" width="355.5" height="21.5726"/>
+ <path d="M355.5 249.41 L0 249.41 L0 270.98 L355.5 270.98 L355.5 249.41" class="st3"/>
+ <text x="9.88" y="265.59" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Goal is to find a hash_index that produces </text> </g>
+ <g id="shape108-129" v:mID="108" v:groupContext="shape" transform="translate(318.48,-20.3939)">
+ <title>Sheet.108</title>
+ <desc>a lookup_table with no contradictions</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="175.5" cy="260.197" width="351" height="21.5726"/>
+ <path d="M351 249.41 L0 249.41 L0 270.98 L351 270.98 L351 249.41" class="st3"/>
+ <text x="28.12" y="265.59" class="st8" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>a lookup_table with no contradictions</text> </g>
+ <g id="shape109-133" v:mID="109" v:groupContext="shape" transform="translate(18,-196.244)">
+ <title>Sheet.109</title>
+ <path d="M0 250.22 C0 247.92 1.87 246.06 4.16 246.06 L109.18 246.06 C111.47 246.06 113.33 247.92 113.33 250.22 L113.33
+ 266.83 C113.33 269.13 111.47 270.98 109.18 270.98 L4.16 270.98 C1.87 270.98 0 269.13 0 266.83 L0 250.22
+ Z" class="st1"/>
+ </g>
+ <g id="shape110-135" v:mID="110" v:groupContext="shape" transform="translate(29.8201,-196.244)">
+ <title>Sheet.110</title>
+ <path d="M0 250.22 C-0 247.92 1.67 246.06 3.73 246.06 L97.79 246.06 C99.85 246.06 101.51 247.92 101.51 250.22 L101.51
+ 266.83 C101.51 269.13 99.85 270.98 97.79 270.98 L3.73 270.98 C1.67 270.98 0 269.13 0 266.83 L0 250.22 Z"
+ class="st2"/>
+ </g>
+ <g id="shape111-137" v:mID="111" v:groupContext="shape" transform="translate(32.5663,-199.746)">
+ <title>Sheet.111</title>
+ <desc>Key1: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="50.7562" cy="263.792" width="101.52" height="14.3829"/>
+ <path d="M101.51 256.6 L0 256.6 L0 270.98 L101.51 270.98 L101.51 256.6" class="st3"/>
+ <text x="8.29" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key1: Value = 0</text> </g>
+ <g id="shape112-141" v:mID="112" v:groupContext="shape" transform="translate(18,-171.44)">
+ <title>Sheet.112</title>
+ <path d="M0 250.22 C0 247.92 1.87 246.06 4.16 246.06 L109.18 246.06 C111.47 246.06 113.33 247.92 113.33 250.22 L113.33
+ 266.83 C113.33 269.13 111.47 270.98 109.18 270.98 L4.16 270.98 C1.87 270.98 0 269.13 0 266.83 L0 250.22
+ Z" class="st1"/>
+ </g>
+ <g id="shape113-143" v:mID="113" v:groupContext="shape" transform="translate(29.8201,-171.44)">
+ <title>Sheet.113</title>
+ <path d="M0 250.22 C0 247.92 1.67 246.06 3.73 246.06 L97.79 246.06 C99.85 246.06 101.51 247.92 101.51 250.22 L101.51
+ 266.83 C101.51 269.13 99.85 270.98 97.79 270.98 L3.73 270.98 C1.67 270.98 0 269.13 0 266.83 L0 250.22 Z"
+ class="st2"/>
+ </g>
+ <g id="shape114-145" v:mID="114" v:groupContext="shape" transform="translate(32.5663,-174.923)">
+ <title>Sheet.114</title>
+ <desc>Key3: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="50.7562" cy="263.792" width="101.52" height="14.3829"/>
+ <path d="M101.51 256.6 L0 256.6 L0 270.98 L101.51 270.98 L101.51 256.6" class="st3"/>
+ <text x="8.29" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key3: Value = 1</text> </g>
+ <g id="shape115-149" v:mID="115" v:groupContext="shape" transform="translate(18,-146.396)">
+ <title>Sheet.115</title>
+ <path d="M0 250.12 C0 247.81 1.88 245.94 4.19 245.94 L109.15 245.94 C111.46 245.94 113.33 247.81 113.33 250.12 L113.33
+ 266.81 C113.33 269.12 111.46 270.98 109.15 270.98 L4.19 270.98 C1.88 270.98 0 269.12 0 266.81 L0 250.12
+ Z" class="st1"/>
+ </g>
+ <g id="shape116-151" v:mID="116" v:groupContext="shape" transform="translate(29.8201,-146.396)">
+ <title>Sheet.116</title>
+ <path d="M0 250.12 C0 247.81 1.68 245.94 3.75 245.94 L97.77 245.94 C99.84 245.94 101.51 247.81 101.51 250.12 L101.51
+ 266.81 C101.51 269.12 99.84 270.98 97.77 270.98 L3.75 270.98 C1.68 270.98 0 269.12 0 266.81 L0 250.12 Z"
+ class="st2"/>
+ </g>
+ <g id="shape117-153" v:mID="117" v:groupContext="shape" transform="translate(32.5663,-149.951)">
+ <title>Sheet.117</title>
+ <desc>Key4: Value = 0</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="50.7562" cy="263.792" width="101.52" height="14.3829"/>
+ <path d="M101.51 256.6 L0 256.6 L0 270.98 L101.51 270.98 L101.51 256.6" class="st3"/>
+ <text x="8.29" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key4: Value = 0</text> </g>
+ <g id="shape118-157" v:mID="118" v:groupContext="shape" transform="translate(18,-121.831)">
+ <title>Sheet.118</title>
+ <path d="M0 250.12 C0 247.81 1.88 245.94 4.19 245.94 L109.15 245.94 C111.46 245.94 113.33 247.81 113.33 250.12 L113.33
+ 266.81 C113.33 269.12 111.46 270.98 109.15 270.98 L4.19 270.98 C1.88 270.98 0 269.12 0 266.81 L0 250.12
+ Z" class="st1"/>
+ </g>
+ <g id="shape119-159" v:mID="119" v:groupContext="shape" transform="translate(29.8201,-121.831)">
+ <title>Sheet.119</title>
+ <path d="M0 250.12 C0 247.81 1.68 245.94 3.75 245.94 L97.77 245.94 C99.84 245.94 101.51 247.81 101.51 250.12 L101.51
+ 266.81 C101.51 269.12 99.84 270.98 97.77 270.98 L3.75 270.98 C1.68 270.98 0 269.12 0 266.81 L0 250.12 Z"
+ class="st2"/>
+ </g>
+ <g id="shape120-161" v:mID="120" v:groupContext="shape" transform="translate(32.5663,-125.388)">
+ <title>Sheet.120</title>
+ <desc>Key7: Value = 1</desc>
+ <v:textBlock v:margins="rect(0,0,0,0)"/>
+ <v:textRect cx="50.7562" cy="263.792" width="101.52" height="14.3829"/>
+ <path d="M101.51 256.6 L0 256.6 L0 270.98 L101.51 270.98 L101.51 256.6" class="st3"/>
+ <text x="8.29" y="267.39" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key7: Value = 1</text> </g>
+ <g id="shape121-165" v:mID="121" v:groupContext="shape" transform="translate(140.517,-148.373)">
+ <title>Sheet.121</title>
+ <path d="M0 236.29 L22.75 236.29 L22.75 224.73 L45.5 247.86 L22.75 270.98 L22.75 259.42 L0 259.42 L0 236.29 Z"
+ class="st6"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index e5a50a88..ef5a02ad 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,10 +42,12 @@ Programmer's Guide
mempool_lib
mbuf_lib
poll_mode_drv
+ rte_flow
cryptodev_lib
link_bonding_poll_mode_drv_lib
timer_lib
hash_lib
+ efd_lib
lpm_lib
lpm6_lib
packet_distrib_lib
@@ -60,6 +62,7 @@ Programmer's Guide
packet_classif_access_ctrl
packet_framework
vhost_lib
+ metrics_lib
port_hotplug_framework
source_org
dev_kit_build_system
@@ -125,10 +128,6 @@ Programmer's Guide
:numref:`figure_pkt_flow_kni` :ref:`figure_pkt_flow_kni`
-:numref:`figure_vhost_net_arch2` :ref:`figure_vhost_net_arch2`
-
-:numref:`figure_kni_traffic_flow` :ref:`figure_kni_traffic_flow`
-
:numref:`figure_pkt_proc_pipeline_qos` :ref:`figure_pkt_proc_pipeline_qos`
@@ -166,6 +165,28 @@ Programmer's Guide
:numref:`figure_figure39` :ref:`figure_figure39`
+:numref:`figure_efd1` :ref:`figure_efd1`
+
+:numref:`figure_efd2` :ref:`figure_efd2`
+
+:numref:`figure_efd3` :ref:`figure_efd3`
+
+:numref:`figure_efd4` :ref:`figure_efd4`
+
+:numref:`figure_efd5` :ref:`figure_efd5`
+
+:numref:`figure_efd6` :ref:`figure_efd6`
+
+:numref:`figure_efd7` :ref:`figure_efd7`
+
+:numref:`figure_efd8` :ref:`figure_efd8`
+
+:numref:`figure_efd9` :ref:`figure_efd9`
+
+:numref:`figure_efd10` :ref:`figure_efd10`
+
+:numref:`figure_efd11` :ref:`figure_efd11`
+
**Tables**
diff --git a/doc/guides/prog_guide/intro.rst b/doc/guides/prog_guide/intro.rst
index d6daab37..9fe1f0c3 100644
--- a/doc/guides/prog_guide/intro.rst
+++ b/doc/guides/prog_guide/intro.rst
@@ -44,7 +44,7 @@ Documentation Roadmap
The following is a list of DPDK documents in the suggested reading order:
-* **Release Notes** (this document): Provides release-specific information, including supported features,
+* **Release Notes** : Provides release-specific information, including supported features,
limitations, fixed issues, known issues and so on.
Also, provides the answers to frequently asked questions in FAQ format.
diff --git a/doc/guides/prog_guide/kernel_nic_interface.rst b/doc/guides/prog_guide/kernel_nic_interface.rst
index eb16e2e3..6f7fd287 100644
--- a/doc/guides/prog_guide/kernel_nic_interface.rst
+++ b/doc/guides/prog_guide/kernel_nic_interface.rst
@@ -168,114 +168,3 @@ The application handlers can be registered upon interface creation or explicitly
This provides flexibility in multiprocess scenarios
(where the KNI is created in the primary process but the callbacks are handled in the secondary one).
The constraint is that a single process can register and handle the requests.
-
-KNI Working as a Kernel vHost Backend
--------------------------------------
-
-vHost is a kernel module usually working as the backend of virtio (a para- virtualization driver framework)
-to accelerate the traffic from the guest to the host.
-The DPDK Kernel NIC interface provides the ability to hookup vHost traffic into userspace DPDK application.
-Together with the DPDK PMD virtio, it significantly improves the throughput between guest and host.
-In the scenario where DPDK is running as fast path in the host, kni-vhost is an efficient path for the traffic.
-
-Overview
-~~~~~~~~
-
-vHost-net has three kinds of real backend implementations. They are: 1) tap, 2) macvtap and 3) RAW socket.
-The main idea behind kni-vhost is making the KNI work as a RAW socket, attaching it as the backend instance of vHost-net.
-It is using the existing interface with vHost-net, so it does not require any kernel hacking,
-and is fully-compatible with the kernel vhost module.
-As vHost is still taking responsibility for communicating with the front-end virtio,
-it naturally supports both legacy virtio -net and the DPDK PMD virtio.
-There is a little penalty that comes from the non-polling mode of vhost.
-However, it scales throughput well when using KNI in multi-thread mode.
-
-.. _figure_vhost_net_arch2:
-
-.. figure:: img/vhost_net_arch.*
-
- vHost-net Architecture Overview
-
-
-Packet Flow
-~~~~~~~~~~~
-
-There is only a minor difference from the original KNI traffic flows.
-On transmit side, vhost kthread calls the RAW socket's ops sendmsg and it puts the packets into the KNI transmit FIFO.
-On the receive side, the kni kthread gets packets from the KNI receive FIFO, puts them into the queue of the raw socket,
-and wakes up the task in vhost kthread to begin receiving.
-All the packet copying, irrespective of whether it is on the transmit or receive side,
-happens in the context of vhost kthread.
-Every vhost-net device is exposed to a front end virtio device in the guest.
-
-.. _figure_kni_traffic_flow:
-
-.. figure:: img/kni_traffic_flow.*
-
- KNI Traffic Flow
-
-
-Sample Usage
-~~~~~~~~~~~~
-
-Before starting to use KNI as the backend of vhost, the CONFIG_RTE_KNI_VHOST configuration option must be turned on.
-Otherwise, by default, KNI will not enable its backend support capability.
-
-Of course, as a prerequisite, the vhost/vhost-net kernel CONFIG should be chosen before compiling the kernel.
-
-#. Compile the DPDK and insert uio_pci_generic/igb_uio kernel modules as normal.
-
-#. Insert the KNI kernel module:
-
- .. code-block:: console
-
- insmod ./rte_kni.ko
-
- If using KNI in multi-thread mode, use the following command line:
-
- .. code-block:: console
-
- insmod ./rte_kni.ko kthread_mode=multiple
-
-#. Running the KNI sample application:
-
- .. code-block:: console
-
- examples/kni/build/app/kni -c -0xf0 -n 4 -- -p 0x3 -P --config="(0,4,6),(1,5,7)"
-
- This command runs the kni sample application with two physical ports.
- Each port pins two forwarding cores (ingress/egress) in user space.
-
-#. Assign a raw socket to vhost-net during qemu-kvm startup.
- The DPDK does not provide a script to do this since it is easy for the user to customize.
- The following shows the key steps to launch qemu-kvm with kni-vhost:
-
- .. code-block:: bash
-
- #!/bin/bash
- echo 1 > /sys/class/net/vEth0/sock_en
- fd=`cat /sys/class/net/vEth0/sock_fd`
- qemu-kvm \
- -name vm1 -cpu host -m 2048 -smp 1 -hda /opt/vm-fc16.img \
- -netdev tap,fd=$fd,id=hostnet1,vhost=on \
- -device virti-net-pci,netdev=hostnet1,id=net1,bus=pci.0,addr=0x4
-
-It is simple to enable raw socket using sysfs sock_en and get raw socket fd using sock_fd under the KNI device node.
-
-Then, using the qemu-kvm command with the -netdev option to assign such raw socket fd as vhost's backend.
-
-.. note::
-
- The key word tap must exist as qemu-kvm now only supports vhost with a tap backend, so here we cheat qemu-kvm by an existing fd.
-
-Compatibility Configure Option
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-There is a CONFIG_RTE_KNI_VHOST_VNET_HDR_EN configuration option in DPDK configuration file.
-By default, it set to n, which means do not turn on the virtio net header,
-which is used to support additional features (such as, csum offload, vlan offload, generic-segmentation and so on),
-since the kni-vhost does not yet support those features.
-
-Even if the option is turned on, kni-vhost will ignore the information that the header contains.
-When working with legacy virtio on the guest, it is better to turn off unsupported offload features using ethtool -K.
-Otherwise, there may be problems such as an incorrect L4 checksum error.
diff --git a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
index 65813c9e..1ef231df 100644
--- a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
+++ b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
@@ -356,7 +356,7 @@ Using Link Bonding Devices from the EAL Command Line
Link bonding devices can be created at application startup time using the
``--vdev`` EAL command line option. The device name must start with the
-net_bond prefix followed by numbers or letters. The name must be unique for
+net_bonding prefix followed by numbers or letters. The name must be unique for
each device. Each device can have multiple options arranged in a comma
separated list. Multiple devices definitions can be arranged by calling the
``--vdev`` option multiple times.
@@ -365,7 +365,7 @@ Device names and bonding options must be separated by commas as shown below:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c f -n 4 --vdev 'net_bond0,bond_opt0=..,bond opt1=..'--vdev 'net_bond1,bond _opt0=..,bond_opt1=..'
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,bond_opt0=..,bond opt1=..'--vdev 'net_bonding1,bond _opt0=..,bond_opt1=..'
Link Bonding EAL Options
^^^^^^^^^^^^^^^^^^^^^^^^
@@ -373,7 +373,7 @@ Link Bonding EAL Options
There are multiple ways of definitions that can be assessed and combined as
long as the following two rules are respected:
-* A unique device name, in the format of net_bondX is provided,
+* A unique device name, in the format of net_bondingX is provided,
where X can be any combination of numbers and/or letters,
and the name is no greater than 32 characters long.
@@ -465,22 +465,22 @@ Create a bonded device in round robin mode with two slaves specified by their PC
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 --vdev 'net_bond0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00' -- --port-topology=chained
Create a bonded device in round robin mode with two slaves specified by their PCI address and an overriding MAC address:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 --vdev 'net_bond0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=0, slave=0000:00a:00.01,slave=0000:004:00.00,mac=00:1e:67:1d:fd:1d' -- --port-topology=chained
Create a bonded device in active backup mode with two slaves specified, and a primary slave specified by their PCI addresses:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 --vdev 'net_bond0,mode=1, slave=0000:00a:00.01,slave=0000:004:00.00,primary=0000:00a:00.01' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=1, slave=0000:00a:00.01,slave=0000:004:00.00,primary=0000:00a:00.01' -- --port-topology=chained
Create a bonded device in balance mode with two slaves specified by their PCI addresses, and a transmission policy of layer 3 + 4 forwarding:
.. code-block:: console
- $RTE_TARGET/app/testpmd -c '0xf' -n 4 --vdev 'net_bond0,mode=2, slave=0000:00a:00.01,slave=0000:004:00.00,xmit_policy=l34' -- --port-topology=chained
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 --vdev 'net_bonding0,mode=2, slave=0000:00a:00.01,slave=0000:004:00.00,xmit_policy=l34' -- --port-topology=chained
diff --git a/doc/guides/prog_guide/lpm6_lib.rst b/doc/guides/prog_guide/lpm6_lib.rst
index 0aea5c5c..f7915073 100644
--- a/doc/guides/prog_guide/lpm6_lib.rst
+++ b/doc/guides/prog_guide/lpm6_lib.rst
@@ -53,7 +53,7 @@ several thousand IPv6 rules, but the number can vary depending on the case.
An LPM prefix is represented by a pair of parameters (128-bit key, depth), with depth in the range of 1 to 128.
An LPM rule is represented by an LPM prefix and some user data associated with the prefix.
The prefix serves as the unique identifier for the LPM rule.
-In this implementation, the user data is 1-byte long and is called "next hop",
+In this implementation, the user data is 21-bits long and is called "next hop",
which corresponds to its main use of storing the ID of the next hop in a routing table entry.
The main methods exported for the LPM component are:
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 8e616826..6e73fc5a 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -175,8 +175,8 @@ a vxlan-encapsulated tcp packet:
set out_ip checksum to 0 in the packet
set out_udp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
- and DEV_TX_OFFLOAD_UDP_CKSUM.
+ This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM
+ and DEV_TX_OFFLOAD_UDP_CKSUM.
- calculate checksum of in_ip::
@@ -228,8 +228,8 @@ a vxlan-encapsulated tcp packet:
set in_ip checksum to 0 in the packet
set in_tcp checksum to pseudo header using rte_ipv4_phdr_cksum()
- This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
- DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
+ This is supported on hardware advertising DEV_TX_OFFLOAD_IPV4_CKSUM,
+ DEV_TX_OFFLOAD_UDP_CKSUM and DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM.
The list of flags and their precise meaning is described in the mbuf API
documentation (rte_mbuf.h). Also refer to the testpmd source code
@@ -253,7 +253,8 @@ Similarly, whenever the indirect buffer is detached, the reference counter on th
If the resulting reference counter is equal to 0, the direct buffer is freed since it is no longer in use.
There are a few things to remember when dealing with indirect buffers.
-First of all, it is not possible to attach an indirect buffer to another indirect buffer.
+First of all, an indirect buffer is never attached to another indirect buffer.
+Attempting to attach buffer A to indirect buffer B that is attached to C, makes rte_pktmbuf_attach() automatically attach A to C, effectively cloning B.
Secondly, for a buffer to become indirect, its reference counter must be equal to 1,
that is, it must not be already referenced by another indirect buffer.
Finally, it is not possible to reattach an indirect buffer to the direct buffer (unless it is detached first).
diff --git a/doc/guides/prog_guide/metrics_lib.rst b/doc/guides/prog_guide/metrics_lib.rst
new file mode 100644
index 00000000..702c29d9
--- /dev/null
+++ b/doc/guides/prog_guide/metrics_lib.rst
@@ -0,0 +1,299 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _Metrics_Library:
+
+Metrics Library
+===============
+
+The Metrics library implements a mechanism by which *producers* can
+publish numeric information for later querying by *consumers*. In
+practice producers will typically be other libraries or primary
+processes, whereas consumers will typically be applications.
+
+Metrics themselves are statistics that are not generated by PMDs. Metric
+information is populated using a push model, where producers update the
+values contained within the metric library by calling an update function
+on the relevant metrics. Consumers receive metric information by querying
+the central metric data, which is held in shared memory.
+
+For each metric, a separate value is maintained for each port id, and
+when publishing metric values the producers need to specify which port is
+being updated. In addition there is a special id ``RTE_METRICS_GLOBAL``
+that is intended for global statistics that are not associated with any
+individual device. Since the metrics library is self-contained, the only
+restriction on port numbers is that they are less than ``RTE_MAX_ETHPORTS``
+- there is no requirement for the ports to actually exist.
+
+Initialising the library
+------------------------
+
+Before the library can be used, it has to be initialized by calling
+``rte_metrics_init()`` which sets up the metric store in shared memory.
+This is where producers will publish metric information to, and where
+consumers will query it from.
+
+.. code-block:: c
+
+ rte_metrics_init(rte_socket_id());
+
+This function **must** be called from a primary process, but otherwise
+producers and consumers can be in either primary or secondary processes.
+
+Registering metrics
+-------------------
+
+Metrics must first be *registered*, which is the way producers declare
+the names of the metrics they will be publishing. Registration can either
+be done individually, or a set of metrics can be registered as a group.
+Individual registration is done using ``rte_metrics_reg_name()``:
+
+.. code-block:: c
+
+ id_1 = rte_metrics_reg_name("mean_bits_in");
+ id_2 = rte_metrics_reg_name("mean_bits_out");
+ id_3 = rte_metrics_reg_name("peak_bits_in");
+ id_4 = rte_metrics_reg_name("peak_bits_out");
+
+or alternatively, a set of metrics can be registered together using
+``rte_metrics_reg_names()``:
+
+.. code-block:: c
+
+ const char * const names[] = {
+ "mean_bits_in", "mean_bits_out",
+ "peak_bits_in", "peak_bits_out",
+ };
+ id_set = rte_metrics_reg_names(&names[0], 4);
+
+If the return value is negative, it means registration failed. Otherwise
+the return value is the *key* for the metric, which is used when updating
+values. A table mapping together these key values and the metrics' names
+can be obtained using ``rte_metrics_get_names()``.
+
+Updating metric values
+----------------------
+
+Once registered, producers can update the metric for a given port using
+the ``rte_metrics_update_value()`` function. This uses the metric key
+that is returned when registering the metric, and can also be looked up
+using ``rte_metrics_get_names()``.
+
+.. code-block:: c
+
+ rte_metrics_update_value(port_id, id_1, values[0]);
+ rte_metrics_update_value(port_id, id_2, values[1]);
+ rte_metrics_update_value(port_id, id_3, values[2]);
+ rte_metrics_update_value(port_id, id_4, values[3]);
+
+if metrics were registered as a single set, they can either be updated
+individually using ``rte_metrics_update_value()``, or updated together
+using the ``rte_metrics_update_values()`` function:
+
+.. code-block:: c
+
+ rte_metrics_update_value(port_id, id_set, values[0]);
+ rte_metrics_update_value(port_id, id_set + 1, values[1]);
+ rte_metrics_update_value(port_id, id_set + 2, values[2]);
+ rte_metrics_update_value(port_id, id_set + 3, values[3]);
+
+ rte_metrics_update_values(port_id, id_set, values, 4);
+
+Note that ``rte_metrics_update_values()`` cannot be used to update
+metric values from *multiple* *sets*, as there is no guarantee two
+sets registered one after the other have contiguous id values.
+
+Querying metrics
+----------------
+
+Consumers can obtain metric values by querying the metrics library using
+the ``rte_metrics_get_values()`` function that return an array of
+``struct rte_metric_value``. Each entry within this array contains a metric
+value and its associated key. A key-name mapping can be obtained using the
+``rte_metrics_get_names()`` function that returns an array of
+``struct rte_metric_name`` that is indexed by the key. The following will
+print out all metrics for a given port:
+
+.. code-block:: c
+
+ void print_metrics() {
+ struct rte_metric_name *names;
+ int len;
+
+ len = rte_metrics_get_names(NULL, 0);
+ if (len < 0) {
+ printf("Cannot get metrics count\n");
+ return;
+ }
+ if (len == 0) {
+ printf("No metrics to display (none have been registered)\n");
+ return;
+ }
+ metrics = malloc(sizeof(struct rte_metric_value) * len);
+ names = malloc(sizeof(struct rte_metric_name) * len);
+ if (metrics == NULL || names == NULL) {
+ printf("Cannot allocate memory\n");
+ free(metrics);
+ free(names);
+ return;
+ }
+ ret = rte_metrics_get_values(port_id, metrics, len);
+ if (ret < 0 || ret > len) {
+ printf("Cannot get metrics values\n");
+ free(metrics);
+ free(names);
+ return;
+ }
+ printf("Metrics for port %i:\n", port_id);
+ for (i = 0; i < len; i++)
+ printf(" %s: %"PRIu64"\n",
+ names[metrics[i].key].name, metrics[i].value);
+ free(metrics);
+ free(names);
+ }
+
+
+Bit-rate statistics library
+---------------------------
+
+The bit-rate library calculates the exponentially-weighted moving
+average and peak bit-rates for each active port (i.e. network device).
+These statistics are reported via the metrics library using the
+following names:
+
+ - ``mean_bits_in``: Average inbound bit-rate
+ - ``mean_bits_out``: Average outbound bit-rate
+ - ``ewma_bits_in``: Average inbound bit-rate (EWMA smoothed)
+ - ``ewma_bits_out``: Average outbound bit-rate (EWMA smoothed)
+ - ``peak_bits_in``: Peak inbound bit-rate
+ - ``peak_bits_out``: Peak outbound bit-rate
+
+Once initialised and clocked at the appropriate frequency, these
+statistics can be obtained by querying the metrics library.
+
+Initialization
+~~~~~~~~~~~~~~
+
+Before the library can be used, it has to be initialised by calling
+``rte_stats_bitrate_create()``, which will return a bit-rate
+calculation object. Since the bit-rate library uses the metrics library
+to report the calculated statistics, the bit-rate library then needs to
+register the calculated statistics with the metrics library. This is
+done using the helper function ``rte_stats_bitrate_reg()``.
+
+.. code-block:: c
+
+ struct rte_stats_bitrates *bitrate_data;
+
+ bitrate_data = rte_stats_bitrate_create();
+ if (bitrate_data == NULL)
+ rte_exit(EXIT_FAILURE, "Could not allocate bit-rate data.\n");
+ rte_stats_bitrate_reg(bitrate_data);
+
+Controlling the sampling rate
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since the library works by periodic sampling but does not use an
+internal thread, the application has to periodically call
+``rte_stats_bitrate_calc()``. The frequency at which this function
+is called should be the intended sampling rate required for the
+calculated statistics. For instance if per-second statistics are
+desired, this function should be called once a second.
+
+.. code-block:: c
+
+ tics_datum = rte_rdtsc();
+ tics_per_1sec = rte_get_timer_hz();
+
+ while( 1 ) {
+ /* ... */
+ tics_current = rte_rdtsc();
+ if (tics_current - tics_datum >= tics_per_1sec) {
+ /* Periodic bitrate calculation */
+ for (idx_port = 0; idx_port < cnt_ports; idx_port++)
+ rte_stats_bitrate_calc(bitrate_data, idx_port);
+ tics_datum = tics_current;
+ }
+ /* ... */
+ }
+
+
+Latency statistics library
+--------------------------
+
+The latency statistics library calculates the latency of packet
+processing by a DPDK application, reporting the minimum, average,
+and maximum nano-seconds that packet processing takes, as well as
+the jitter in processing delay. These statistics are then reported
+via the metrics library using the following names:
+
+ - ``min_latency_ns``: Minimum processing latency (nano-seconds)
+ - ``avg_latency_ns``: Average processing latency (nano-seconds)
+ - ``mac_latency_ns``: Maximum processing latency (nano-seconds)
+ - ``jitter_ns``: Variance in processing latency (nano-seconds)
+
+Once initialised and clocked at the appropriate frequency, these
+statistics can be obtained by querying the metrics library.
+
+Initialization
+~~~~~~~~~~~~~~
+
+Before the library can be used, it has to be initialised by calling
+``rte_latencystats_init()``.
+
+.. code-block:: c
+
+ lcoreid_t latencystats_lcore_id = -1;
+
+ int ret = rte_latencystats_init(1, NULL);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "Could not allocate latency data.\n");
+
+
+Triggering statistic updates
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_latencystats_update()`` function needs to be called
+periodically so that latency statistics can be updated.
+
+.. code-block:: c
+
+ if (latencystats_lcore_id == rte_lcore_id())
+ rte_latencystats_update();
+
+Library shutdown
+~~~~~~~~~~~~~~~~
+
+When finished, ``rte_latencystats_uninit()`` needs to be called to
+de-initialise the latency library.
+
+.. code-block:: c
+
+ rte_latencystats_uninit();
diff --git a/doc/guides/prog_guide/multi_proc_support.rst b/doc/guides/prog_guide/multi_proc_support.rst
index 2a996ae8..9a9dca7f 100644
--- a/doc/guides/prog_guide/multi_proc_support.rst
+++ b/doc/guides/prog_guide/multi_proc_support.rst
@@ -173,7 +173,7 @@ Some of these are documented below:
so it is recommended that it be disabled only when absolutely necessary,
and only when the implications of this change have been understood.
-* All DPDK processes running as a single application and using shared memory must have distinct coremask arguments.
+* All DPDK processes running as a single application and using shared memory must have distinct coremask/corelist arguments.
It is not possible to have a primary and secondary instance, or two secondary instances,
using any of the same logical cores.
Attempting to do so can cause corruption of memory pool caches, among other issues.
diff --git a/doc/guides/prog_guide/packet_classif_access_ctrl.rst b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
index 5fd3d348..a6bee9ba 100644
--- a/doc/guides/prog_guide/packet_classif_access_ctrl.rst
+++ b/doc/guides/prog_guide/packet_classif_access_ctrl.rst
@@ -329,8 +329,9 @@ When creating a set of rules, for each rule, additional information must be supp
Each set could be assigned its own category and by combining them into a single database,
one lookup returns a result for each of the four sets.
-* **userdata**: A user-defined field that could be any value except zero.
+* **userdata**: A user-defined value.
For each category, a successful match returns the userdata field of the highest priority matched rule.
+ When no rules match, returned value is zero.
.. note::
diff --git a/doc/guides/prog_guide/packet_distrib_lib.rst b/doc/guides/prog_guide/packet_distrib_lib.rst
index b5bdabbf..ee2b1a66 100644
--- a/doc/guides/prog_guide/packet_distrib_lib.rst
+++ b/doc/guides/prog_guide/packet_distrib_lib.rst
@@ -42,6 +42,10 @@ The model of operation is shown in the diagram below.
Packet Distributor mode of operation
+There are two modes of operation of the API in the distributor library,
+one which sends one packet at a time to workers using 32-bits for flow_id,
+and an optimized mode which sends bursts of up to 8 packets at a time to workers, using 15 bits of flow_id.
+The mode is selected by the type field in the ``rte_distributor_create()`` function.
Distributor Core Operation
--------------------------
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index bf3ea9fd..4987f70a 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -200,8 +200,8 @@ Ethernet* flow control (pause frame) can be configured on the individual port.
Refer to the testpmd source code for details.
Also, L4 (UDP/TCP/ SCTP) checksum offload by the NIC can be enabled for an individual packet as long as the packet mbuf is set up correctly. See `Hardware Offload`_ for details.
-Configuration of Transmit and Receive Queues
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Configuration of Transmit Queues
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Each transmit queue is independently configured with the following information:
@@ -249,6 +249,42 @@ One descriptor in the TX ring is used as a sentinel to avoid a hardware race con
When configuring for DCB operation, at port initialization, both the number of transmit queues and the number of receive queues must be set to 128.
+Free Tx mbuf on Demand
+~~~~~~~~~~~~~~~~~~~~~~
+
+Many of the drivers do not release the mbuf back to the mempool, or local cache,
+immediately after the packet has been transmitted.
+Instead, they leave the mbuf in their Tx ring and
+either perform a bulk release when the ``tx_rs_thresh`` has been crossed
+or free the mbuf when a slot in the Tx ring is needed.
+
+An application can request the driver to release used mbufs with the ``rte_eth_tx_done_cleanup()`` API.
+This API requests the driver to release mbufs that are no longer in use,
+independent of whether or not the ``tx_rs_thresh`` has been crossed.
+There are two scenarios when an application may want the mbuf released immediately:
+
+* When a given packet needs to be sent to multiple destination interfaces
+ (either for Layer 2 flooding or Layer 3 multi-cast).
+ One option is to make a copy of the packet or a copy of the header portion that needs to be manipulated.
+ A second option is to transmit the packet and then poll the ``rte_eth_tx_done_cleanup()`` API
+ until the reference count on the packet is decremented.
+ Then the same packet can be transmitted to the next destination interface.
+ The application is still responsible for managing any packet manipulations needed
+ between the different destination interfaces, but a packet copy can be avoided.
+ This API is independent of whether the packet was transmitted or dropped,
+ only that the mbuf is no longer in use by the interface.
+
+* Some applications are designed to make multiple runs, like a packet generator.
+ For performance reasons and consistency between runs,
+ the application may want to reset back to an initial state
+ between each run, where all mbufs are returned to the mempool.
+ In this case, it can call the ``rte_eth_tx_done_cleanup()`` API
+ for each destination interface it has been using
+ to request it to release of all its used mbufs.
+
+To determine if a driver supports this API, check for the *Free Tx mbuf on demand* feature
+in the *Network Interface Controller Drivers* document.
+
Hardware Offload
~~~~~~~~~~~~~~~~
@@ -298,24 +334,21 @@ The Ethernet device API exported by the Ethernet PMDs is described in the *DPDK
Extended Statistics API
~~~~~~~~~~~~~~~~~~~~~~~
-The extended statistics API allows each individual PMD to expose a unique set
-of statistics. Accessing these from application programs is done via two
-functions:
-
-* ``rte_eth_xstats_get``: Fills in an array of ``struct rte_eth_xstat``
- with extended statistics.
-* ``rte_eth_xstats_get_names``: Fills in an array of
- ``struct rte_eth_xstat_name`` with extended statistic name lookup
- information.
-
-Each ``struct rte_eth_xstat`` contains an identifier and value pair, and
-each ``struct rte_eth_xstat_name`` contains a string. Each identifier
-within the ``struct rte_eth_xstat`` lookup array must have a corresponding
-entry in the ``struct rte_eth_xstat_name`` lookup array. Within the latter
-the index of the entry is the identifier the string is associated with.
-These identifiers, as well as the number of extended statistic exposed, must
-remain constant during runtime. Note that extended statistic identifiers are
+The extended statistics API allows a PMD to expose all statistics that are
+available to it, including statistics that are unique to the device.
+Each statistic has three properties ``name``, ``id`` and ``value``:
+
+* ``name``: A human readable string formatted by the scheme detailed below.
+* ``id``: An integer that represents only that statistic.
+* ``value``: A unsigned 64-bit integer that is the value of the statistic.
+
+Note that extended statistic identifiers are
driver-specific, and hence might not be the same for different ports.
+The API consists of various ``rte_eth_xstats_*()`` functions, and allows an
+application to be flexible in how it retrieves statistics.
+
+Scheme for Human Readable Names
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A naming scheme exists for the strings exposed to clients of the API. This is
to allow scraping of the API for statistics of interest. The naming scheme uses
@@ -356,3 +389,139 @@ Some additions in the metadata scheme are as follows:
An example where queue numbers are used is as follows: ``tx_q7_bytes`` which
indicates this statistic applies to queue number 7, and represents the number
of transmitted bytes on that queue.
+
+API Design
+^^^^^^^^^^
+
+The xstats API uses the ``name``, ``id``, and ``value`` to allow performant
+lookup of specific statistics. Performant lookup means two things;
+
+* No string comparisons with the ``name`` of the statistic in fast-path
+* Allow requesting of only the statistics of interest
+
+The API ensures these requirements are met by mapping the ``name`` of the
+statistic to a unique ``id``, which is used as a key for lookup in the fast-path.
+The API allows applications to request an array of ``id`` values, so that the
+PMD only performs the required calculations. Expected usage is that the
+application scans the ``name`` of each statistic, and caches the ``id``
+if it has an interest in that statistic. On the fast-path, the integer can be used
+to retrieve the actual ``value`` of the statistic that the ``id`` represents.
+
+API Functions
+^^^^^^^^^^^^^
+
+The API is built out of a small number of functions, which can be used to
+retrieve the number of statistics and the names, IDs and values of those
+statistics.
+
+* ``rte_eth_xstats_get_names_by_id()``: returns the names of the statistics. When given a
+ ``NULL`` parameter the function returns the number of statistics that are available.
+
+* ``rte_eth_xstats_get_id_by_name()``: Searches for the statistic ID that matches
+ ``xstat_name``. If found, the ``id`` integer is set.
+
+* ``rte_eth_xstats_get_by_id()``: Fills in an array of ``uint64_t`` values
+ with matching the provided ``ids`` array. If the ``ids`` array is NULL, it
+ returns all statistics that are available.
+
+
+Application Usage
+^^^^^^^^^^^^^^^^^
+
+Imagine an application that wants to view the dropped packet count. If no
+packets are dropped, the application does not read any other metrics for
+performance reasons. If packets are dropped, the application has a particular
+set of statistics that it requests. This "set" of statistics allows the app to
+decide what next steps to perform. The following code-snippets show how the
+xstats API can be used to achieve this goal.
+
+First step is to get all statistics names and list them:
+
+.. code-block:: c
+
+ struct rte_eth_xstat_name *xstats_names;
+ uint64_t *values;
+ int len, i;
+
+ /* Get number of stats */
+ len = rte_eth_xstats_get_names_by_id(port_id, NULL, NULL, 0);
+ if (len < 0) {
+ printf("Cannot get xstats count\n");
+ goto err;
+ }
+
+ xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * len);
+ if (xstats_names == NULL) {
+ printf("Cannot allocate memory for xstat names\n");
+ goto err;
+ }
+
+ /* Retrieve xstats names, passing NULL for IDs to return all statistics */
+ if (len != rte_eth_xstats_get_names_by_id(port_id, xstats_names, NULL, len)) {
+ printf("Cannot get xstat names\n");
+ goto err;
+ }
+
+ values = malloc(sizeof(values) * len);
+ if (values == NULL) {
+ printf("Cannot allocate memory for xstats\n");
+ goto err;
+ }
+
+ /* Getting xstats values */
+ if (len != rte_eth_xstats_get_by_id(port_id, NULL, values, len)) {
+ printf("Cannot get xstat values\n");
+ goto err;
+ }
+
+ /* Print all xstats names and values */
+ for (i = 0; i < len; i++) {
+ printf("%s: %"PRIu64"\n", xstats_names[i].name, values[i]);
+ }
+
+The application has access to the names of all of the statistics that the PMD
+exposes. The application can decide which statistics are of interest, cache the
+ids of those statistics by looking up the name as follows:
+
+.. code-block:: c
+
+ uint64_t id;
+ uint64_t value;
+ const char *xstat_name = "rx_errors";
+
+ if(!rte_eth_xstats_get_id_by_name(port_id, xstat_name, &id)) {
+ rte_eth_xstats_get_by_id(port_id, &id, &value, 1);
+ printf("%s: %"PRIu64"\n", xstat_name, value);
+ }
+ else {
+ printf("Cannot find xstats with a given name\n");
+ goto err;
+ }
+
+The API provides flexibility to the application so that it can look up multiple
+statistics using an array containing multiple ``id`` numbers. This reduces the
+function call overhead of retrieving statistics, and makes lookup of multiple
+statistics simpler for the application.
+
+.. code-block:: c
+
+ #define APP_NUM_STATS 4
+ /* application cached these ids previously; see above */
+ uint64_t ids_array[APP_NUM_STATS] = {3,4,7,21};
+ uint64_t value_array[APP_NUM_STATS];
+
+ /* Getting multiple xstats values from array of IDs */
+ rte_eth_xstats_get_by_id(port_id, ids_array, value_array, APP_NUM_STATS);
+
+ uint32_t i;
+ for(i = 0; i < APP_NUM_STATS; i++) {
+ printf("%d: %"PRIu64"\n", ids_array[i], value_array[i]);
+ }
+
+
+This array lookup API for xstats allows the application create multiple
+"groups" of statistics, and look up the values of those IDs using a single API
+call. As an end result, the application is able to achieve its goal of
+monitoring a single statistic ("rx_errors" in this case), and if that shows
+packets being dropped, it can easily retrieve a "set" of statistics using the
+IDs array parameter to ``rte_eth_xstats_get_by_id`` function.
diff --git a/doc/guides/prog_guide/port_hotplug_framework.rst b/doc/guides/prog_guide/port_hotplug_framework.rst
index 6e4436e5..ee765773 100644
--- a/doc/guides/prog_guide/port_hotplug_framework.rst
+++ b/doc/guides/prog_guide/port_hotplug_framework.rst
@@ -102,9 +102,9 @@ Limitations
* The framework can only be enabled with Linux. BSD is not supported.
* To detach a port, the port should be backed by a device that igb_uio
- manages. VFIO is not supported.
+ or VFIO manages.
* Not all PMDs support detaching feature.
To know whether a PMD can support detaching, search for the
- "RTE_PCI_DRV_DETACHABLE" flag in PMD implementation. If the flag is
+ "RTE_ETH_DEV_DETACHABLE" flag in rte_eth_dev::data::dev_flags. If the flag is
defined in the PMD, detaching is supported.
diff --git a/doc/guides/prog_guide/ring_lib.rst b/doc/guides/prog_guide/ring_lib.rst
index 9f697538..b31ab7a4 100644
--- a/doc/guides/prog_guide/ring_lib.rst
+++ b/doc/guides/prog_guide/ring_lib.rst
@@ -102,21 +102,6 @@ Name
A ring is identified by a unique name.
It is not possible to create two rings with the same name (rte_ring_create() returns NULL if this is attempted).
-Water Marking
-~~~~~~~~~~~~~
-
-The ring can have a high water mark (threshold).
-Once an enqueue operation reaches the high water mark, the producer is notified, if the water mark is configured.
-
-This mechanism can be used, for example, to exert a back pressure on I/O to inform the LAN to PAUSE.
-
-Debug
-~~~~~
-
-When debug is enabled (CONFIG_RTE_LIBRTE_RING_DEBUG is set),
-the library stores some per-ring statistic counters about the number of enqueues/dequeues.
-These statistics are per-core to avoid concurrent accesses or atomic operations.
-
Use Cases
---------
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
new file mode 100644
index 00000000..b587ba99
--- /dev/null
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -0,0 +1,2101 @@
+.. BSD LICENSE
+ Copyright 2016 6WIND S.A.
+ Copyright 2016 Mellanox.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of 6WIND S.A. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _Generic_flow_API:
+
+Generic flow API (rte_flow)
+===========================
+
+Overview
+--------
+
+This API provides a generic means to configure hardware to match specific
+ingress or egress traffic, alter its fate and query related counters
+according to any number of user-defined rules.
+
+It is named *rte_flow* after the prefix used for all its symbols, and is
+defined in ``rte_flow.h``.
+
+- Matching can be performed on packet data (protocol headers, payload) and
+ properties (e.g. associated physical port, virtual device function ID).
+
+- Possible operations include dropping traffic, diverting it to specific
+ queues, to virtual/physical device functions or ports, performing tunnel
+ offloads, adding marks and so on.
+
+It is slightly higher-level than the legacy filtering framework which it
+encompasses and supersedes (including all functions and filter types) in
+order to expose a single interface with an unambiguous behavior that is
+common to all poll-mode drivers (PMDs).
+
+Several methods to migrate existing applications are described in `API
+migration`_.
+
+Flow rule
+---------
+
+Description
+~~~~~~~~~~~
+
+A flow rule is the combination of attributes with a matching pattern and a
+list of actions. Flow rules form the basis of this API.
+
+Flow rules can have several distinct actions (such as counting,
+encapsulating, decapsulating before redirecting packets to a particular
+queue, etc.), instead of relying on several rules to achieve this and having
+applications deal with hardware implementation details regarding their
+order.
+
+Support for different priority levels on a rule basis is provided, for
+example in order to force a more specific rule to come before a more generic
+one for packets matched by both. However hardware support for more than a
+single priority level cannot be guaranteed. When supported, the number of
+available priority levels is usually low, which is why they can also be
+implemented in software by PMDs (e.g. missing priority levels may be
+emulated by reordering rules).
+
+In order to remain as hardware-agnostic as possible, by default all rules
+are considered to have the same priority, which means that the order between
+overlapping rules (when a packet is matched by several filters) is
+undefined.
+
+PMDs may refuse to create overlapping rules at a given priority level when
+they can be detected (e.g. if a pattern matches an existing filter).
+
+Thus predictable results for a given priority level can only be achieved
+with non-overlapping rules, using perfect matching on all protocol layers.
+
+Flow rules can also be grouped, the flow rule priority is specific to the
+group they belong to. All flow rules in a given group are thus processed
+either before or after another group.
+
+Support for multiple actions per rule may be implemented internally on top
+of non-default hardware priorities, as a result both features may not be
+simultaneously available to applications.
+
+Considering that allowed pattern/actions combinations cannot be known in
+advance and would result in an impractically large number of capabilities to
+expose, a method is provided to validate a given rule from the current
+device configuration state.
+
+This enables applications to check if the rule types they need is supported
+at initialization time, before starting their data path. This method can be
+used anytime, its only requirement being that the resources needed by a rule
+should exist (e.g. a target RX queue should be configured first).
+
+Each defined rule is associated with an opaque handle managed by the PMD,
+applications are responsible for keeping it. These can be used for queries
+and rules management, such as retrieving counters or other data and
+destroying them.
+
+To avoid resource leaks on the PMD side, handles must be explicitly
+destroyed by the application before releasing associated resources such as
+queues and ports.
+
+The following sections cover:
+
+- **Attributes** (represented by ``struct rte_flow_attr``): properties of a
+ flow rule such as its direction (ingress or egress) and priority.
+
+- **Pattern item** (represented by ``struct rte_flow_item``): part of a
+ matching pattern that either matches specific packet data or traffic
+ properties. It can also describe properties of the pattern itself, such as
+ inverted matching.
+
+- **Matching pattern**: traffic properties to look for, a combination of any
+ number of items.
+
+- **Actions** (represented by ``struct rte_flow_action``): operations to
+ perform whenever a packet is matched by a pattern.
+
+Attributes
+~~~~~~~~~~
+
+Attribute: Group
+^^^^^^^^^^^^^^^^
+
+Flow rules can be grouped by assigning them a common group number. Lower
+values have higher priority. Group 0 has the highest priority.
+
+Although optional, applications are encouraged to group similar rules as
+much as possible to fully take advantage of hardware capabilities
+(e.g. optimized matching) and work around limitations (e.g. a single pattern
+type possibly allowed in a given group).
+
+Note that support for more than a single group is not guaranteed.
+
+Attribute: Priority
+^^^^^^^^^^^^^^^^^^^
+
+A priority level can be assigned to a flow rule. Like groups, lower values
+denote higher priority, with 0 as the maximum.
+
+A rule with priority 0 in group 8 is always matched after a rule with
+priority 8 in group 0.
+
+Group and priority levels are arbitrary and up to the application, they do
+not need to be contiguous nor start from 0, however the maximum number
+varies between devices and may be affected by existing flow rules.
+
+If a packet is matched by several rules of a given group for a given
+priority level, the outcome is undefined. It can take any path, may be
+duplicated or even cause unrecoverable errors.
+
+Note that support for more than a single priority level is not guaranteed.
+
+Attribute: Traffic direction
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
+
+Several pattern items and actions are valid and can be used in both
+directions. At least one direction must be specified.
+
+Specifying both directions at once for a given rule is not recommended but
+may be valid in a few cases (e.g. shared counters).
+
+Pattern item
+~~~~~~~~~~~~
+
+Pattern items fall in two categories:
+
+- Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
+ IPV6, ICMP, UDP, TCP, SCTP, VXLAN, MPLS, GRE and so on), usually
+ associated with a specification structure.
+
+- Matching meta-data or affecting pattern processing (END, VOID, INVERT, PF,
+ VF, PORT and so on), often without a specification structure.
+
+Item specification structures are used to match specific values among
+protocol fields (or item properties). Documentation describes for each item
+whether they are associated with one and their type name if so.
+
+Up to three structures of the same type can be set for a given item:
+
+- ``spec``: values to match (e.g. a given IPv4 address).
+
+- ``last``: upper bound for an inclusive range with corresponding fields in
+ ``spec``.
+
+- ``mask``: bit-mask applied to both ``spec`` and ``last`` whose purpose is
+ to distinguish the values to take into account and/or partially mask them
+ out (e.g. in order to match an IPv4 address prefix).
+
+Usage restrictions and expected behavior:
+
+- Setting either ``mask`` or ``last`` without ``spec`` is an error.
+
+- Field values in ``last`` which are either 0 or equal to the corresponding
+ values in ``spec`` are ignored; they do not generate a range. Nonzero
+ values lower than those in ``spec`` are not supported.
+
+- Setting ``spec`` and optionally ``last`` without ``mask`` causes the PMD
+ to use the default mask defined for that item (defined as
+ ``rte_flow_item_{name}_mask`` constants).
+
+- Not setting any of them (assuming item type allows it) is equivalent to
+ providing an empty (zeroed) ``mask`` for broad (nonspecific) matching.
+
+- ``mask`` is a simple bit-mask applied before interpreting the contents of
+ ``spec`` and ``last``, which may yield unexpected results if not used
+ carefully. For example, if for an IPv4 address field, ``spec`` provides
+ *10.1.2.3*, ``last`` provides *10.3.4.5* and ``mask`` provides
+ *255.255.0.0*, the effective range becomes *10.1.0.0* to *10.3.255.255*.
+
+Example of an item specification matching an Ethernet header:
+
+.. _table_rte_flow_pattern_item_example:
+
+.. table:: Ethernet item
+
+ +----------+----------+--------------------+
+ | Field | Subfield | Value |
+ +==========+==========+====================+
+ | ``spec`` | ``src`` | ``00:01:02:03:04`` |
+ | +----------+--------------------+
+ | | ``dst`` | ``00:2a:66:00:01`` |
+ | +----------+--------------------+
+ | | ``type`` | ``0x22aa`` |
+ +----------+----------+--------------------+
+ | ``last`` | unspecified |
+ +----------+----------+--------------------+
+ | ``mask`` | ``src`` | ``00:ff:ff:ff:00`` |
+ | +----------+--------------------+
+ | | ``dst`` | ``00:00:00:00:ff`` |
+ | +----------+--------------------+
+ | | ``type`` | ``0x0000`` |
+ +----------+----------+--------------------+
+
+Non-masked bits stand for any value (shown as ``?`` below), Ethernet headers
+with the following properties are thus matched:
+
+- ``src``: ``??:01:02:03:??``
+- ``dst``: ``??:??:??:??:01``
+- ``type``: ``0x????``
+
+Matching pattern
+~~~~~~~~~~~~~~~~
+
+A pattern is formed by stacking items starting from the lowest protocol
+layer to match. This stacking restriction does not apply to meta items which
+can be placed anywhere in the stack without affecting the meaning of the
+resulting pattern.
+
+Patterns are terminated by END items.
+
+Examples:
+
+.. _table_rte_flow_tcpv4_as_l4:
+
+.. table:: TCPv4 as L4
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | Ethernet |
+ +-------+----------+
+ | 1 | IPv4 |
+ +-------+----------+
+ | 2 | TCP |
+ +-------+----------+
+ | 3 | END |
+ +-------+----------+
+
+|
+
+.. _table_rte_flow_tcpv6_in_vxlan:
+
+.. table:: TCPv6 in VXLAN
+
+ +-------+------------+
+ | Index | Item |
+ +=======+============+
+ | 0 | Ethernet |
+ +-------+------------+
+ | 1 | IPv4 |
+ +-------+------------+
+ | 2 | UDP |
+ +-------+------------+
+ | 3 | VXLAN |
+ +-------+------------+
+ | 4 | Ethernet |
+ +-------+------------+
+ | 5 | IPv6 |
+ +-------+------------+
+ | 6 | TCP |
+ +-------+------------+
+ | 7 | END |
+ +-------+------------+
+
+|
+
+.. _table_rte_flow_tcpv4_as_l4_meta:
+
+.. table:: TCPv4 as L4 with meta items
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | VOID |
+ +-------+----------+
+ | 1 | Ethernet |
+ +-------+----------+
+ | 2 | VOID |
+ +-------+----------+
+ | 3 | IPv4 |
+ +-------+----------+
+ | 4 | TCP |
+ +-------+----------+
+ | 5 | VOID |
+ +-------+----------+
+ | 6 | VOID |
+ +-------+----------+
+ | 7 | END |
+ +-------+----------+
+
+The above example shows how meta items do not affect packet data matching
+items, as long as those remain stacked properly. The resulting matching
+pattern is identical to "TCPv4 as L4".
+
+.. _table_rte_flow_udpv6_anywhere:
+
+.. table:: UDPv6 anywhere
+
+ +-------+------+
+ | Index | Item |
+ +=======+======+
+ | 0 | IPv6 |
+ +-------+------+
+ | 1 | UDP |
+ +-------+------+
+ | 2 | END |
+ +-------+------+
+
+If supported by the PMD, omitting one or several protocol layers at the
+bottom of the stack as in the above example (missing an Ethernet
+specification) enables looking up anywhere in packets.
+
+It is unspecified whether the payload of supported encapsulations
+(e.g. VXLAN payload) is matched by such a pattern, which may apply to inner,
+outer or both packets.
+
+.. _table_rte_flow_invalid_l3:
+
+.. table:: Invalid, missing L3
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | Ethernet |
+ +-------+----------+
+ | 1 | UDP |
+ +-------+----------+
+ | 2 | END |
+ +-------+----------+
+
+The above pattern is invalid due to a missing L3 specification between L2
+(Ethernet) and L4 (UDP). Doing so is only allowed at the bottom and at the
+top of the stack.
+
+Meta item types
+~~~~~~~~~~~~~~~
+
+They match meta-data or affect pattern processing instead of matching packet
+data directly, most of them do not need a specification structure. This
+particularity allows them to be specified anywhere in the stack without
+causing any side effect.
+
+Item: ``END``
+^^^^^^^^^^^^^
+
+End marker for item lists. Prevents further processing of items, thereby
+ending the pattern.
+
+- Its numeric value is 0 for convenience.
+- PMD support is mandatory.
+- ``spec``, ``last`` and ``mask`` are ignored.
+
+.. _table_rte_flow_item_end:
+
+.. table:: END
+
+ +----------+---------+
+ | Field | Value |
+ +==========+=========+
+ | ``spec`` | ignored |
+ +----------+---------+
+ | ``last`` | ignored |
+ +----------+---------+
+ | ``mask`` | ignored |
+ +----------+---------+
+
+Item: ``VOID``
+^^^^^^^^^^^^^^
+
+Used as a placeholder for convenience. It is ignored and simply discarded by
+PMDs.
+
+- PMD support is mandatory.
+- ``spec``, ``last`` and ``mask`` are ignored.
+
+.. _table_rte_flow_item_void:
+
+.. table:: VOID
+
+ +----------+---------+
+ | Field | Value |
+ +==========+=========+
+ | ``spec`` | ignored |
+ +----------+---------+
+ | ``last`` | ignored |
+ +----------+---------+
+ | ``mask`` | ignored |
+ +----------+---------+
+
+One usage example for this type is generating rules that share a common
+prefix quickly without reallocating memory, only by updating item types:
+
+.. _table_rte_flow_item_void_example:
+
+.. table:: TCP, UDP or ICMP as L4
+
+ +-------+--------------------+
+ | Index | Item |
+ +=======+====================+
+ | 0 | Ethernet |
+ +-------+--------------------+
+ | 1 | IPv4 |
+ +-------+------+------+------+
+ | 2 | UDP | VOID | VOID |
+ +-------+------+------+------+
+ | 3 | VOID | TCP | VOID |
+ +-------+------+------+------+
+ | 4 | VOID | VOID | ICMP |
+ +-------+------+------+------+
+ | 5 | END |
+ +-------+--------------------+
+
+Item: ``INVERT``
+^^^^^^^^^^^^^^^^
+
+Inverted matching, i.e. process packets that do not match the pattern.
+
+- ``spec``, ``last`` and ``mask`` are ignored.
+
+.. _table_rte_flow_item_invert:
+
+.. table:: INVERT
+
+ +----------+---------+
+ | Field | Value |
+ +==========+=========+
+ | ``spec`` | ignored |
+ +----------+---------+
+ | ``last`` | ignored |
+ +----------+---------+
+ | ``mask`` | ignored |
+ +----------+---------+
+
+Usage example, matching non-TCPv4 packets only:
+
+.. _table_rte_flow_item_invert_example:
+
+.. table:: Anything but TCPv4
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | INVERT |
+ +-------+----------+
+ | 1 | Ethernet |
+ +-------+----------+
+ | 2 | IPv4 |
+ +-------+----------+
+ | 3 | TCP |
+ +-------+----------+
+ | 4 | END |
+ +-------+----------+
+
+Item: ``PF``
+^^^^^^^^^^^^
+
+Matches packets addressed to the physical function of the device.
+
+If the underlying device function differs from the one that would normally
+receive the matched traffic, specifying this item prevents it from reaching
+that device unless the flow rule contains a `Action: PF`_. Packets are not
+duplicated between device instances by default.
+
+- Likely to return an error or never match any traffic if applied to a VF
+ device.
+- Can be combined with any number of `Item: VF`_ to match both PF and VF
+ traffic.
+- ``spec``, ``last`` and ``mask`` must not be set.
+
+.. _table_rte_flow_item_pf:
+
+.. table:: PF
+
+ +----------+-------+
+ | Field | Value |
+ +==========+=======+
+ | ``spec`` | unset |
+ +----------+-------+
+ | ``last`` | unset |
+ +----------+-------+
+ | ``mask`` | unset |
+ +----------+-------+
+
+Item: ``VF``
+^^^^^^^^^^^^
+
+Matches packets addressed to a virtual function ID of the device.
+
+If the underlying device function differs from the one that would normally
+receive the matched traffic, specifying this item prevents it from reaching
+that device unless the flow rule contains a `Action: VF`_. Packets are not
+duplicated between device instances by default.
+
+- Likely to return an error or never match any traffic if this causes a VF
+ device to match traffic addressed to a different VF.
+- Can be specified multiple times to match traffic addressed to several VF
+ IDs.
+- Can be combined with a PF item to match both PF and VF traffic.
+- Default ``mask`` matches any VF ID.
+
+.. _table_rte_flow_item_vf:
+
+.. table:: VF
+
+ +----------+----------+---------------------------+
+ | Field | Subfield | Value |
+ +==========+==========+===========================+
+ | ``spec`` | ``id`` | destination VF ID |
+ +----------+----------+---------------------------+
+ | ``last`` | ``id`` | upper range value |
+ +----------+----------+---------------------------+
+ | ``mask`` | ``id`` | zeroed to match any VF ID |
+ +----------+----------+---------------------------+
+
+Item: ``PORT``
+^^^^^^^^^^^^^^
+
+Matches packets coming from the specified physical port of the underlying
+device.
+
+The first PORT item overrides the physical port normally associated with the
+specified DPDK input port (port_id). This item can be provided several times
+to match additional physical ports.
+
+Note that physical ports are not necessarily tied to DPDK input ports
+(port_id) when those are not under DPDK control. Possible values are
+specific to each device, they are not necessarily indexed from zero and may
+not be contiguous.
+
+As a device property, the list of allowed values as well as the value
+associated with a port_id should be retrieved by other means.
+
+- Default ``mask`` matches any port index.
+
+.. _table_rte_flow_item_port:
+
+.. table:: PORT
+
+ +----------+-----------+--------------------------------+
+ | Field | Subfield | Value |
+ +==========+===========+================================+
+ | ``spec`` | ``index`` | physical port index |
+ +----------+-----------+--------------------------------+
+ | ``last`` | ``index`` | upper range value |
+ +----------+-----------+--------------------------------+
+ | ``mask`` | ``index`` | zeroed to match any port index |
+ +----------+-----------+--------------------------------+
+
+Data matching item types
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Most of these are basically protocol header definitions with associated
+bit-masks. They must be specified (stacked) from lowest to highest protocol
+layer to form a matching pattern.
+
+The following list is not exhaustive, new protocols will be added in the
+future.
+
+Item: ``ANY``
+^^^^^^^^^^^^^
+
+Matches any protocol in place of the current layer, a single ANY may also
+stand for several protocol layers.
+
+This is usually specified as the first pattern item when looking for a
+protocol anywhere in a packet.
+
+- Default ``mask`` stands for any number of layers.
+
+.. _table_rte_flow_item_any:
+
+.. table:: ANY
+
+ +----------+----------+--------------------------------------+
+ | Field | Subfield | Value |
+ +==========+==========+======================================+
+ | ``spec`` | ``num`` | number of layers covered |
+ +----------+----------+--------------------------------------+
+ | ``last`` | ``num`` | upper range value |
+ +----------+----------+--------------------------------------+
+ | ``mask`` | ``num`` | zeroed to cover any number of layers |
+ +----------+----------+--------------------------------------+
+
+Example for VXLAN TCP payload matching regardless of outer L3 (IPv4 or IPv6)
+and L4 (UDP) both matched by the first ANY specification, and inner L3 (IPv4
+or IPv6) matched by the second ANY specification:
+
+.. _table_rte_flow_item_any_example:
+
+.. table:: TCP in VXLAN with wildcards
+
+ +-------+------+----------+----------+-------+
+ | Index | Item | Field | Subfield | Value |
+ +=======+======+==========+==========+=======+
+ | 0 | Ethernet |
+ +-------+------+----------+----------+-------+
+ | 1 | ANY | ``spec`` | ``num`` | 2 |
+ +-------+------+----------+----------+-------+
+ | 2 | VXLAN |
+ +-------+------------------------------------+
+ | 3 | Ethernet |
+ +-------+------+----------+----------+-------+
+ | 4 | ANY | ``spec`` | ``num`` | 1 |
+ +-------+------+----------+----------+-------+
+ | 5 | TCP |
+ +-------+------------------------------------+
+ | 6 | END |
+ +-------+------------------------------------+
+
+Item: ``RAW``
+^^^^^^^^^^^^^
+
+Matches a byte string of a given length at a given offset.
+
+Offset is either absolute (using the start of the packet) or relative to the
+end of the previous matched item in the stack, in which case negative values
+are allowed.
+
+If search is enabled, offset is used as the starting point. The search area
+can be delimited by setting limit to a nonzero value, which is the maximum
+number of bytes after offset where the pattern may start.
+
+Matching a zero-length pattern is allowed, doing so resets the relative
+offset for subsequent items.
+
+- This type does not support ranges (``last`` field).
+- Default ``mask`` matches all fields exactly.
+
+.. _table_rte_flow_item_raw:
+
+.. table:: RAW
+
+ +----------+--------------+-------------------------------------------------+
+ | Field | Subfield | Value |
+ +==========+==============+=================================================+
+ | ``spec`` | ``relative`` | look for pattern after the previous item |
+ | +--------------+-------------------------------------------------+
+ | | ``search`` | search pattern from offset (see also ``limit``) |
+ | +--------------+-------------------------------------------------+
+ | | ``reserved`` | reserved, must be set to zero |
+ | +--------------+-------------------------------------------------+
+ | | ``offset`` | absolute or relative offset for ``pattern`` |
+ | +--------------+-------------------------------------------------+
+ | | ``limit`` | search area limit for start of ``pattern`` |
+ | +--------------+-------------------------------------------------+
+ | | ``length`` | ``pattern`` length |
+ | +--------------+-------------------------------------------------+
+ | | ``pattern`` | byte string to look for |
+ +----------+--------------+-------------------------------------------------+
+ | ``last`` | if specified, either all 0 or with the same values as ``spec`` |
+ +----------+----------------------------------------------------------------+
+ | ``mask`` | bit-mask applied to ``spec`` values with usual behavior |
+ +----------+----------------------------------------------------------------+
+
+Example pattern looking for several strings at various offsets of a UDP
+payload, using combined RAW items:
+
+.. _table_rte_flow_item_raw_example:
+
+.. table:: UDP payload matching
+
+ +-------+------+----------+--------------+-------+
+ | Index | Item | Field | Subfield | Value |
+ +=======+======+==========+==============+=======+
+ | 0 | Ethernet |
+ +-------+----------------------------------------+
+ | 1 | IPv4 |
+ +-------+----------------------------------------+
+ | 2 | UDP |
+ +-------+------+----------+--------------+-------+
+ | 3 | RAW | ``spec`` | ``relative`` | 1 |
+ | | | +--------------+-------+
+ | | | | ``search`` | 1 |
+ | | | +--------------+-------+
+ | | | | ``offset`` | 10 |
+ | | | +--------------+-------+
+ | | | | ``limit`` | 0 |
+ | | | +--------------+-------+
+ | | | | ``length`` | 3 |
+ | | | +--------------+-------+
+ | | | | ``pattern`` | "foo" |
+ +-------+------+----------+--------------+-------+
+ | 4 | RAW | ``spec`` | ``relative`` | 1 |
+ | | | +--------------+-------+
+ | | | | ``search`` | 0 |
+ | | | +--------------+-------+
+ | | | | ``offset`` | 20 |
+ | | | +--------------+-------+
+ | | | | ``limit`` | 0 |
+ | | | +--------------+-------+
+ | | | | ``length`` | 3 |
+ | | | +--------------+-------+
+ | | | | ``pattern`` | "bar" |
+ +-------+------+----------+--------------+-------+
+ | 5 | RAW | ``spec`` | ``relative`` | 1 |
+ | | | +--------------+-------+
+ | | | | ``search`` | 0 |
+ | | | +--------------+-------+
+ | | | | ``offset`` | -29 |
+ | | | +--------------+-------+
+ | | | | ``limit`` | 0 |
+ | | | +--------------+-------+
+ | | | | ``length`` | 3 |
+ | | | +--------------+-------+
+ | | | | ``pattern`` | "baz" |
+ +-------+------+----------+--------------+-------+
+ | 6 | END |
+ +-------+----------------------------------------+
+
+This translates to:
+
+- Locate "foo" at least 10 bytes deep inside UDP payload.
+- Locate "bar" after "foo" plus 20 bytes.
+- Locate "baz" after "bar" minus 29 bytes.
+
+Such a packet may be represented as follows (not to scale)::
+
+ 0 >= 10 B == 20 B
+ | |<--------->| |<--------->|
+ | | | | |
+ |-----|------|-----|-----|-----|-----|-----------|-----|------|
+ | ETH | IPv4 | UDP | ... | baz | foo | ......... | bar | .... |
+ |-----|------|-----|-----|-----|-----|-----------|-----|------|
+ | |
+ |<--------------------------->|
+ == 29 B
+
+Note that matching subsequent pattern items would resume after "baz", not
+"bar" since matching is always performed after the previous item of the
+stack.
+
+Item: ``ETH``
+^^^^^^^^^^^^^
+
+Matches an Ethernet header.
+
+- ``dst``: destination MAC.
+- ``src``: source MAC.
+- ``type``: EtherType.
+- Default ``mask`` matches destination and source addresses only.
+
+Item: ``VLAN``
+^^^^^^^^^^^^^^
+
+Matches an 802.1Q/ad VLAN tag.
+
+- ``tpid``: tag protocol identifier.
+- ``tci``: tag control information.
+- Default ``mask`` matches TCI only.
+
+Item: ``IPV4``
+^^^^^^^^^^^^^^
+
+Matches an IPv4 header.
+
+Note: IPv4 options are handled by dedicated pattern items.
+
+- ``hdr``: IPv4 header definition (``rte_ip.h``).
+- Default ``mask`` matches source and destination addresses only.
+
+Item: ``IPV6``
+^^^^^^^^^^^^^^
+
+Matches an IPv6 header.
+
+Note: IPv6 options are handled by dedicated pattern items.
+
+- ``hdr``: IPv6 header definition (``rte_ip.h``).
+- Default ``mask`` matches source and destination addresses only.
+
+Item: ``ICMP``
+^^^^^^^^^^^^^^
+
+Matches an ICMP header.
+
+- ``hdr``: ICMP header definition (``rte_icmp.h``).
+- Default ``mask`` matches ICMP type and code only.
+
+Item: ``UDP``
+^^^^^^^^^^^^^
+
+Matches a UDP header.
+
+- ``hdr``: UDP header definition (``rte_udp.h``).
+- Default ``mask`` matches source and destination ports only.
+
+Item: ``TCP``
+^^^^^^^^^^^^^
+
+Matches a TCP header.
+
+- ``hdr``: TCP header definition (``rte_tcp.h``).
+- Default ``mask`` matches source and destination ports only.
+
+Item: ``SCTP``
+^^^^^^^^^^^^^^
+
+Matches a SCTP header.
+
+- ``hdr``: SCTP header definition (``rte_sctp.h``).
+- Default ``mask`` matches source and destination ports only.
+
+Item: ``VXLAN``
+^^^^^^^^^^^^^^^
+
+Matches a VXLAN header (RFC 7348).
+
+- ``flags``: normally 0x08 (I flag).
+- ``rsvd0``: reserved, normally 0x000000.
+- ``vni``: VXLAN network identifier.
+- ``rsvd1``: reserved, normally 0x00.
+- Default ``mask`` matches VNI only.
+
+Item: ``E_TAG``
+^^^^^^^^^^^^^^^
+
+Matches an IEEE 802.1BR E-Tag header.
+
+- ``tpid``: tag protocol identifier (0x893F)
+- ``epcp_edei_in_ecid_b``: E-Tag control information (E-TCI), E-PCP (3b),
+ E-DEI (1b), ingress E-CID base (12b).
+- ``rsvd_grp_ecid_b``: reserved (2b), GRP (2b), E-CID base (12b).
+- ``in_ecid_e``: ingress E-CID ext.
+- ``ecid_e``: E-CID ext.
+- Default ``mask`` simultaneously matches GRP and E-CID base.
+
+Item: ``NVGRE``
+^^^^^^^^^^^^^^^
+
+Matches a NVGRE header (RFC 7637).
+
+- ``c_k_s_rsvd0_ver``: checksum (1b), undefined (1b), key bit (1b),
+ sequence number (1b), reserved 0 (9b), version (3b). This field must have
+ value 0x2000 according to RFC 7637.
+- ``protocol``: protocol type (0x6558).
+- ``tni``: virtual subnet ID.
+- ``flow_id``: flow ID.
+- Default ``mask`` matches TNI only.
+
+Item: ``MPLS``
+^^^^^^^^^^^^^^
+
+Matches a MPLS header.
+
+- ``label_tc_s_ttl``: label, TC, Bottom of Stack and TTL.
+- Default ``mask`` matches label only.
+
+Item: ``GRE``
+^^^^^^^^^^^^^^
+
+Matches a GRE header.
+
+- ``c_rsvd0_ver``: checksum, reserved 0 and version.
+- ``protocol``: protocol type.
+- Default ``mask`` matches protocol only.
+
+Actions
+~~~~~~~
+
+Each possible action is represented by a type. Some have associated
+configuration structures. Several actions combined in a list can be affected
+to a flow rule. That list is not ordered.
+
+They fall in three categories:
+
+- Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
+ processing matched packets by subsequent flow rules, unless overridden
+ with PASSTHRU.
+
+- Non-terminating actions (PASSTHRU, DUP) that leave matched packets up for
+ additional processing by subsequent flow rules.
+
+- Other non-terminating meta actions that do not affect the fate of packets
+ (END, VOID, MARK, FLAG, COUNT).
+
+When several actions are combined in a flow rule, they should all have
+different types (e.g. dropping a packet twice is not possible).
+
+Only the last action of a given type is taken into account. PMDs still
+perform error checking on the entire list.
+
+Like matching patterns, action lists are terminated by END items.
+
+*Note that PASSTHRU is the only action able to override a terminating rule.*
+
+Example of action that redirects packets to queue index 10:
+
+.. _table_rte_flow_action_example:
+
+.. table:: Queue action
+
+ +-----------+-------+
+ | Field | Value |
+ +===========+=======+
+ | ``index`` | 10 |
+ +-----------+-------+
+
+Action lists examples, their order is not significant, applications must
+consider all actions to be performed simultaneously:
+
+.. _table_rte_flow_count_and_drop:
+
+.. table:: Count and drop
+
+ +-------+--------+
+ | Index | Action |
+ +=======+========+
+ | 0 | COUNT |
+ +-------+--------+
+ | 1 | DROP |
+ +-------+--------+
+ | 2 | END |
+ +-------+--------+
+
+|
+
+.. _table_rte_flow_mark_count_redirect:
+
+.. table:: Mark, count and redirect
+
+ +-------+--------+-----------+-------+
+ | Index | Action | Field | Value |
+ +=======+========+===========+=======+
+ | 0 | MARK | ``mark`` | 0x2a |
+ +-------+--------+-----------+-------+
+ | 1 | COUNT |
+ +-------+--------+-----------+-------+
+ | 2 | QUEUE | ``queue`` | 10 |
+ +-------+--------+-----------+-------+
+ | 3 | END |
+ +-------+----------------------------+
+
+|
+
+.. _table_rte_flow_redirect_queue_5:
+
+.. table:: Redirect to queue 5
+
+ +-------+--------+-----------+-------+
+ | Index | Action | Field | Value |
+ +=======+========+===========+=======+
+ | 0 | DROP |
+ +-------+--------+-----------+-------+
+ | 1 | QUEUE | ``queue`` | 5 |
+ +-------+--------+-----------+-------+
+ | 2 | END |
+ +-------+----------------------------+
+
+In the above example, considering both actions are performed simultaneously,
+the end result is that only QUEUE has any effect.
+
+.. _table_rte_flow_redirect_queue_3:
+
+.. table:: Redirect to queue 3
+
+ +-------+--------+-----------+-------+
+ | Index | Action | Field | Value |
+ +=======+========+===========+=======+
+ | 0 | QUEUE | ``queue`` | 5 |
+ +-------+--------+-----------+-------+
+ | 1 | VOID |
+ +-------+--------+-----------+-------+
+ | 2 | QUEUE | ``queue`` | 3 |
+ +-------+--------+-----------+-------+
+ | 3 | END |
+ +-------+----------------------------+
+
+As previously described, only the last action of a given type found in the
+list is taken into account. The above example also shows that VOID is
+ignored.
+
+Action types
+~~~~~~~~~~~~
+
+Common action types are described in this section. Like pattern item types,
+this list is not exhaustive as new actions will be added in the future.
+
+Action: ``END``
+^^^^^^^^^^^^^^^
+
+End marker for action lists. Prevents further processing of actions, thereby
+ending the list.
+
+- Its numeric value is 0 for convenience.
+- PMD support is mandatory.
+- No configurable properties.
+
+.. _table_rte_flow_action_end:
+
+.. table:: END
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``VOID``
+^^^^^^^^^^^^^^^^
+
+Used as a placeholder for convenience. It is ignored and simply discarded by
+PMDs.
+
+- PMD support is mandatory.
+- No configurable properties.
+
+.. _table_rte_flow_action_void:
+
+.. table:: VOID
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``PASSTHRU``
+^^^^^^^^^^^^^^^^^^^^
+
+Leaves packets up for additional processing by subsequent flow rules. This
+is the default when a rule does not contain a terminating action, but can be
+specified to force a rule to become non-terminating.
+
+- No configurable properties.
+
+.. _table_rte_flow_action_passthru:
+
+.. table:: PASSTHRU
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Example to copy a packet to a queue and continue processing by subsequent
+flow rules:
+
+.. _table_rte_flow_action_passthru_example:
+
+.. table:: Copy to queue 8
+
+ +-------+--------+-----------+-------+
+ | Index | Action | Field | Value |
+ +=======+========+===========+=======+
+ | 0 | PASSTHRU |
+ +-------+--------+-----------+-------+
+ | 1 | QUEUE | ``queue`` | 8 |
+ +-------+--------+-----------+-------+
+ | 2 | END |
+ +-------+----------------------------+
+
+Action: ``MARK``
+^^^^^^^^^^^^^^^^
+
+Attaches an integer value to packets and sets ``PKT_RX_FDIR`` and
+``PKT_RX_FDIR_ID`` mbuf flags.
+
+This value is arbitrary and application-defined. Maximum allowed value
+depends on the underlying implementation. It is returned in the
+``hash.fdir.hi`` mbuf field.
+
+.. _table_rte_flow_action_mark:
+
+.. table:: MARK
+
+ +--------+--------------------------------------+
+ | Field | Value |
+ +========+======================================+
+ | ``id`` | integer value to return with packets |
+ +--------+--------------------------------------+
+
+Action: ``FLAG``
+^^^^^^^^^^^^^^^^
+
+Flags packets. Similar to `Action: MARK`_ without a specific value; only
+sets the ``PKT_RX_FDIR`` mbuf flag.
+
+- No configurable properties.
+
+.. _table_rte_flow_action_flag:
+
+.. table:: FLAG
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``QUEUE``
+^^^^^^^^^^^^^^^^^
+
+Assigns packets to a given queue index.
+
+- Terminating by default.
+
+.. _table_rte_flow_action_queue:
+
+.. table:: QUEUE
+
+ +-----------+--------------------+
+ | Field | Value |
+ +===========+====================+
+ | ``index`` | queue index to use |
+ +-----------+--------------------+
+
+Action: ``DROP``
+^^^^^^^^^^^^^^^^
+
+Drop packets.
+
+- No configurable properties.
+- Terminating by default.
+- PASSTHRU overrides this action if both are specified.
+
+.. _table_rte_flow_action_drop:
+
+.. table:: DROP
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``COUNT``
+^^^^^^^^^^^^^^^^^
+
+Enables counters for this rule.
+
+These counters can be retrieved and reset through ``rte_flow_query()``, see
+``struct rte_flow_query_count``.
+
+- Counters can be retrieved with ``rte_flow_query()``.
+- No configurable properties.
+
+.. _table_rte_flow_action_count:
+
+.. table:: COUNT
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Query structure to retrieve and reset flow rule counters:
+
+.. _table_rte_flow_query_count:
+
+.. table:: COUNT query
+
+ +---------------+-----+-----------------------------------+
+ | Field | I/O | Value |
+ +===============+=====+===================================+
+ | ``reset`` | in | reset counter after query |
+ +---------------+-----+-----------------------------------+
+ | ``hits_set`` | out | ``hits`` field is set |
+ +---------------+-----+-----------------------------------+
+ | ``bytes_set`` | out | ``bytes`` field is set |
+ +---------------+-----+-----------------------------------+
+ | ``hits`` | out | number of hits for this rule |
+ +---------------+-----+-----------------------------------+
+ | ``bytes`` | out | number of bytes through this rule |
+ +---------------+-----+-----------------------------------+
+
+Action: ``DUP``
+^^^^^^^^^^^^^^^
+
+Duplicates packets to a given queue index.
+
+This is normally combined with QUEUE, however when used alone, it is
+actually similar to QUEUE + PASSTHRU.
+
+- Non-terminating by default.
+
+.. _table_rte_flow_action_dup:
+
+.. table:: DUP
+
+ +-----------+------------------------------------+
+ | Field | Value |
+ +===========+====================================+
+ | ``index`` | queue index to duplicate packet to |
+ +-----------+------------------------------------+
+
+Action: ``RSS``
+^^^^^^^^^^^^^^^
+
+Similar to QUEUE, except RSS is additionally performed on packets to spread
+them among several queues according to the provided parameters.
+
+Note: RSS hash result is stored in the ``hash.rss`` mbuf field which
+overlaps ``hash.fdir.lo``. Since `Action: MARK`_ sets the ``hash.fdir.hi``
+field only, both can be requested simultaneously.
+
+- Terminating by default.
+
+.. _table_rte_flow_action_rss:
+
+.. table:: RSS
+
+ +--------------+------------------------------+
+ | Field | Value |
+ +==============+==============================+
+ | ``rss_conf`` | RSS parameters |
+ +--------------+------------------------------+
+ | ``num`` | number of entries in queue[] |
+ +--------------+------------------------------+
+ | ``queue[]`` | queue indices to use |
+ +--------------+------------------------------+
+
+Action: ``PF``
+^^^^^^^^^^^^^^
+
+Redirects packets to the physical function (PF) of the current device.
+
+- No configurable properties.
+- Terminating by default.
+
+.. _table_rte_flow_action_pf:
+
+.. table:: PF
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``VF``
+^^^^^^^^^^^^^^
+
+Redirects packets to a virtual function (VF) of the current device.
+
+Packets matched by a VF pattern item can be redirected to their original VF
+ID instead of the specified one. This parameter may not be available and is
+not guaranteed to work properly if the VF part is matched by a prior flow
+rule or if packets are not addressed to a VF in the first place.
+
+- Terminating by default.
+
+.. _table_rte_flow_action_vf:
+
+.. table:: VF
+
+ +--------------+--------------------------------+
+ | Field | Value |
+ +==============+================================+
+ | ``original`` | use original VF ID if possible |
+ +--------------+--------------------------------+
+ | ``vf`` | VF ID to redirect packets to |
+ +--------------+--------------------------------+
+
+Negative types
+~~~~~~~~~~~~~~
+
+All specified pattern items (``enum rte_flow_item_type``) and actions
+(``enum rte_flow_action_type``) use positive identifiers.
+
+The negative space is reserved for dynamic types generated by PMDs during
+run-time. PMDs may encounter them as a result but must not accept negative
+identifiers they are not aware of.
+
+A method to generate them remains to be defined.
+
+Planned types
+~~~~~~~~~~~~~
+
+Pattern item types will be added as new protocols are implemented.
+
+Variable headers support through dedicated pattern items, for example in
+order to match specific IPv4 options and IPv6 extension headers would be
+stacked after IPv4/IPv6 items.
+
+Other action types are planned but are not defined yet. These include the
+ability to alter packet data in several ways, such as performing
+encapsulation/decapsulation of tunnel headers.
+
+Rules management
+----------------
+
+A rather simple API with few functions is provided to fully manage flow
+rules.
+
+Each created flow rule is associated with an opaque, PMD-specific handle
+pointer. The application is responsible for keeping it until the rule is
+destroyed.
+
+Flows rules are represented by ``struct rte_flow`` objects.
+
+Validation
+~~~~~~~~~~
+
+Given that expressing a definite set of device capabilities is not
+practical, a dedicated function is provided to check if a flow rule is
+supported and can be created.
+
+.. code-block:: c
+
+ int
+ rte_flow_validate(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+The flow rule is validated for correctness and whether it could be accepted
+by the device given sufficient resources. The rule is checked against the
+current device mode and queue configuration. The flow rule may also
+optionally be validated against existing flow rules and device resources.
+This function has no effect on the target device.
+
+The returned value is guaranteed to remain valid only as long as no
+successful calls to ``rte_flow_create()`` or ``rte_flow_destroy()`` are made
+in the meantime and no device parameter affecting flow rules in any way are
+modified, due to possible collisions or resource limitations (although in
+such cases ``EINVAL`` should not be returned).
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``attr``: flow rule attributes.
+- ``pattern``: pattern specification (list terminated by the END pattern
+ item).
+- ``actions``: associated actions (list terminated by the END action).
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+- 0 if flow rule is valid and can be created. A negative errno value
+ otherwise (``rte_errno`` is also set), the following errors are defined.
+- ``-ENOSYS``: underlying device does not support this functionality.
+- ``-EINVAL``: unknown or invalid rule specification.
+- ``-ENOTSUP``: valid but unsupported rule specification (e.g. partial
+ bit-masks are unsupported).
+- ``EEXIST``: collision with an existing rule. Only returned if device
+ supports flow rule collision checking and there was a flow rule
+ collision. Not receiving this return code is no guarantee that creating
+ the rule will not fail due to a collision.
+- ``ENOMEM``: not enough memory to execute the function, or if the device
+ supports resource validation, resource limitation on the device.
+- ``-EBUSY``: action cannot be performed due to busy device resources, may
+ succeed if the affected queues or even the entire port are in a stopped
+ state (see ``rte_eth_dev_rx_queue_stop()`` and ``rte_eth_dev_stop()``).
+
+Creation
+~~~~~~~~
+
+Creating a flow rule is similar to validating one, except the rule is
+actually created and a handle returned.
+
+.. code-block:: c
+
+ struct rte_flow *
+ rte_flow_create(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action *actions[],
+ struct rte_flow_error *error);
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``attr``: flow rule attributes.
+- ``pattern``: pattern specification (list terminated by the END pattern
+ item).
+- ``actions``: associated actions (list terminated by the END action).
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+A valid handle in case of success, NULL otherwise and ``rte_errno`` is set
+to the positive version of one of the error codes defined for
+``rte_flow_validate()``.
+
+Destruction
+~~~~~~~~~~~
+
+Flow rules destruction is not automatic, and a queue or a port should not be
+released if any are still attached to them. Applications must take care of
+performing this step before releasing resources.
+
+.. code-block:: c
+
+ int
+ rte_flow_destroy(uint8_t port_id,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+
+Failure to destroy a flow rule handle may occur when other flow rules depend
+on it, and destroying it would result in an inconsistent state.
+
+This function is only guaranteed to succeed if handles are destroyed in
+reverse order of their creation.
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``flow``: flow rule handle to destroy.
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+
+Flush
+~~~~~
+
+Convenience function to destroy all flow rule handles associated with a
+port. They are released as with successive calls to ``rte_flow_destroy()``.
+
+.. code-block:: c
+
+ int
+ rte_flow_flush(uint8_t port_id,
+ struct rte_flow_error *error);
+
+In the unlikely event of failure, handles are still considered destroyed and
+no longer valid but the port must be assumed to be in an inconsistent state.
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+
+Query
+~~~~~
+
+Query an existing flow rule.
+
+This function allows retrieving flow-specific data such as counters. Data
+is gathered by special actions which must be present in the flow rule
+definition.
+
+.. code-block:: c
+
+ int
+ rte_flow_query(uint8_t port_id,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action,
+ void *data,
+ struct rte_flow_error *error);
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``flow``: flow rule handle to query.
+- ``action``: action type to query.
+- ``data``: pointer to storage for the associated query data type.
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+ this structure in case of error only.
+
+Return values:
+
+- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+
+Verbose error reporting
+-----------------------
+
+The defined *errno* values may not be accurate enough for users or
+application developers who want to investigate issues related to flow rules
+management. A dedicated error object is defined for this purpose:
+
+.. code-block:: c
+
+ enum rte_flow_error_type {
+ RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
+ RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
+ RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
+ RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
+ };
+
+ struct rte_flow_error {
+ enum rte_flow_error_type type; /**< Cause field and error types. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+ };
+
+Error type ``RTE_FLOW_ERROR_TYPE_NONE`` stands for no error, in which case
+remaining fields can be ignored. Other error types describe the type of the
+object pointed by ``cause``.
+
+If non-NULL, ``cause`` points to the object responsible for the error. For a
+flow rule, this may be a pattern item or an individual action.
+
+If non-NULL, ``message`` provides a human-readable error message.
+
+This object is normally allocated by applications and set by PMDs in case of
+error, the message points to a constant string which does not need to be
+freed by the application, however its pointer can be considered valid only
+as long as its associated DPDK port remains configured. Closing the
+underlying device or unloading the PMD invalidates it.
+
+Caveats
+-------
+
+- DPDK does not keep track of flow rules definitions or flow rule objects
+ automatically. Applications may keep track of the former and must keep
+ track of the latter. PMDs may also do it for internal needs, however this
+ must not be relied on by applications.
+
+- Flow rules are not maintained between successive port initializations. An
+ application exiting without releasing them and restarting must re-create
+ them from scratch.
+
+- API operations are synchronous and blocking (``EAGAIN`` cannot be
+ returned).
+
+- There is no provision for reentrancy/multi-thread safety, although nothing
+ should prevent different devices from being configured at the same
+ time. PMDs may protect their control path functions accordingly.
+
+- Stopping the data path (TX/RX) should not be necessary when managing flow
+ rules. If this cannot be achieved naturally or with workarounds (such as
+ temporarily replacing the burst function pointers), an appropriate error
+ code must be returned (``EBUSY``).
+
+- PMDs, not applications, are responsible for maintaining flow rules
+ configuration when stopping and restarting a port or performing other
+ actions which may affect them. They can only be destroyed explicitly by
+ applications.
+
+For devices exposing multiple ports sharing global settings affected by flow
+rules:
+
+- All ports under DPDK control must behave consistently, PMDs are
+ responsible for making sure that existing flow rules on a port are not
+ affected by other ports.
+
+- Ports not under DPDK control (unaffected or handled by other applications)
+ are user's responsibility. They may affect existing flow rules and cause
+ undefined behavior. PMDs aware of this may prevent flow rules creation
+ altogether in such cases.
+
+PMD interface
+-------------
+
+The PMD interface is defined in ``rte_flow_driver.h``. It is not subject to
+API/ABI versioning constraints as it is not exposed to applications and may
+evolve independently.
+
+It is currently implemented on top of the legacy filtering framework through
+filter type *RTE_ETH_FILTER_GENERIC* that accepts the single operation
+*RTE_ETH_FILTER_GET* to return PMD-specific *rte_flow* callbacks wrapped
+inside ``struct rte_flow_ops``.
+
+This overhead is temporarily necessary in order to keep compatibility with
+the legacy filtering framework, which should eventually disappear.
+
+- PMD callbacks implement exactly the interface described in `Rules
+ management`_, except for the port ID argument which has already been
+ converted to a pointer to the underlying ``struct rte_eth_dev``.
+
+- Public API functions do not process flow rules definitions at all before
+ calling PMD functions (no basic error checking, no validation
+ whatsoever). They only make sure these callbacks are non-NULL or return
+ the ``ENOSYS`` (function not supported) error.
+
+This interface additionally defines the following helper functions:
+
+- ``rte_flow_ops_get()``: get generic flow operations structure from a
+ port.
+
+- ``rte_flow_error_set()``: initialize generic flow error structure.
+
+More will be added over time.
+
+Device compatibility
+--------------------
+
+No known implementation supports all the described features.
+
+Unsupported features or combinations are not expected to be fully emulated
+in software by PMDs for performance reasons. Partially supported features
+may be completed in software as long as hardware performs most of the work
+(such as queue redirection and packet recognition).
+
+However PMDs are expected to do their best to satisfy application requests
+by working around hardware limitations as long as doing so does not affect
+the behavior of existing flow rules.
+
+The following sections provide a few examples of such cases and describe how
+PMDs should handle them, they are based on limitations built into the
+previous APIs.
+
+Global bit-masks
+~~~~~~~~~~~~~~~~
+
+Each flow rule comes with its own, per-layer bit-masks, while hardware may
+support only a single, device-wide bit-mask for a given layer type, so that
+two IPv4 rules cannot use different bit-masks.
+
+The expected behavior in this case is that PMDs automatically configure
+global bit-masks according to the needs of the first flow rule created.
+
+Subsequent rules are allowed only if their bit-masks match those, the
+``EEXIST`` error code should be returned otherwise.
+
+Unsupported layer types
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Many protocols can be simulated by crafting patterns with the `Item: RAW`_
+type.
+
+PMDs can rely on this capability to simulate support for protocols with
+headers not directly recognized by hardware.
+
+``ANY`` pattern item
+~~~~~~~~~~~~~~~~~~~~
+
+This pattern item stands for anything, which can be difficult to translate
+to something hardware would understand, particularly if followed by more
+specific types.
+
+Consider the following pattern:
+
+.. _table_rte_flow_unsupported_any:
+
+.. table:: Pattern with ANY as L3
+
+ +-------+-----------------------+
+ | Index | Item |
+ +=======+=======================+
+ | 0 | ETHER |
+ +-------+-----+---------+-------+
+ | 1 | ANY | ``num`` | ``1`` |
+ +-------+-----+---------+-------+
+ | 2 | TCP |
+ +-------+-----------------------+
+ | 3 | END |
+ +-------+-----------------------+
+
+Knowing that TCP does not make sense with something other than IPv4 and IPv6
+as L3, such a pattern may be translated to two flow rules instead:
+
+.. _table_rte_flow_unsupported_any_ipv4:
+
+.. table:: ANY replaced with IPV4
+
+ +-------+--------------------+
+ | Index | Item |
+ +=======+====================+
+ | 0 | ETHER |
+ +-------+--------------------+
+ | 1 | IPV4 (zeroed mask) |
+ +-------+--------------------+
+ | 2 | TCP |
+ +-------+--------------------+
+ | 3 | END |
+ +-------+--------------------+
+
+|
+
+.. _table_rte_flow_unsupported_any_ipv6:
+
+.. table:: ANY replaced with IPV6
+
+ +-------+--------------------+
+ | Index | Item |
+ +=======+====================+
+ | 0 | ETHER |
+ +-------+--------------------+
+ | 1 | IPV6 (zeroed mask) |
+ +-------+--------------------+
+ | 2 | TCP |
+ +-------+--------------------+
+ | 3 | END |
+ +-------+--------------------+
+
+Note that as soon as a ANY rule covers several layers, this approach may
+yield a large number of hidden flow rules. It is thus suggested to only
+support the most common scenarios (anything as L2 and/or L3).
+
+Unsupported actions
+~~~~~~~~~~~~~~~~~~~
+
+- When combined with `Action: QUEUE`_, packet counting (`Action: COUNT`_)
+ and tagging (`Action: MARK`_ or `Action: FLAG`_) may be implemented in
+ software as long as the target queue is used by a single rule.
+
+- A rule specifying both `Action: DUP`_ + `Action: QUEUE`_ may be translated
+ to two hidden rules combining `Action: QUEUE`_ and `Action: PASSTHRU`_.
+
+- When a single target queue is provided, `Action: RSS`_ can also be
+ implemented through `Action: QUEUE`_.
+
+Flow rules priority
+~~~~~~~~~~~~~~~~~~~
+
+While it would naturally make sense, flow rules cannot be assumed to be
+processed by hardware in the same order as their creation for several
+reasons:
+
+- They may be managed internally as a tree or a hash table instead of a
+ list.
+- Removing a flow rule before adding another one can either put the new rule
+ at the end of the list or reuse a freed entry.
+- Duplication may occur when packets are matched by several rules.
+
+For overlapping rules (particularly in order to use `Action: PASSTHRU`_)
+predictable behavior is only guaranteed by using different priority levels.
+
+Priority levels are not necessarily implemented in hardware, or may be
+severely limited (e.g. a single priority bit).
+
+For these reasons, priority levels may be implemented purely in software by
+PMDs.
+
+- For devices expecting flow rules to be added in the correct order, PMDs
+ may destroy and re-create existing rules after adding a new one with
+ a higher priority.
+
+- A configurable number of dummy or empty rules can be created at
+ initialization time to save high priority slots for later.
+
+- In order to save priority levels, PMDs may evaluate whether rules are
+ likely to collide and adjust their priority accordingly.
+
+Future evolutions
+-----------------
+
+- A device profile selection function which could be used to force a
+ permanent profile instead of relying on its automatic configuration based
+ on existing flow rules.
+
+- A method to optimize *rte_flow* rules with specific pattern items and
+ action types generated on the fly by PMDs. DPDK should assign negative
+ numbers to these in order to not collide with the existing types. See
+ `Negative types`_.
+
+- Adding specific egress pattern items and actions as described in
+ `Attribute: Traffic direction`_.
+
+- Optional software fallback when PMDs are unable to handle requested flow
+ rules so applications do not have to implement their own.
+
+API migration
+-------------
+
+Exhaustive list of deprecated filter types (normally prefixed with
+*RTE_ETH_FILTER_*) found in ``rte_eth_ctrl.h`` and methods to convert them
+to *rte_flow* rules.
+
+``MACVLAN`` to ``ETH`` → ``VF``, ``PF``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*MACVLAN* can be translated to a basic `Item: ETH`_ flow rule with a
+terminating `Action: VF`_ or `Action: PF`_.
+
+.. _table_rte_flow_migration_macvlan:
+
+.. table:: MACVLAN conversion
+
+ +--------------------------+---------+
+ | Pattern | Actions |
+ +===+=====+==========+=====+=========+
+ | 0 | ETH | ``spec`` | any | VF, |
+ | | +----------+-----+ PF |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-----+----------+-----+---------+
+ | 1 | END | END |
+ +---+----------------------+---------+
+
+``ETHERTYPE`` to ``ETH`` → ``QUEUE``, ``DROP``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*ETHERTYPE* is basically an `Item: ETH`_ flow rule with a terminating
+`Action: QUEUE`_ or `Action: DROP`_.
+
+.. _table_rte_flow_migration_ethertype:
+
+.. table:: ETHERTYPE conversion
+
+ +--------------------------+---------+
+ | Pattern | Actions |
+ +===+=====+==========+=====+=========+
+ | 0 | ETH | ``spec`` | any | QUEUE, |
+ | | +----------+-----+ DROP |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-----+----------+-----+---------+
+ | 1 | END | END |
+ +---+----------------------+---------+
+
+``FLEXIBLE`` to ``RAW`` → ``QUEUE``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*FLEXIBLE* can be translated to one `Item: RAW`_ pattern with a terminating
+`Action: QUEUE`_ and a defined priority level.
+
+.. _table_rte_flow_migration_flexible:
+
+.. table:: FLEXIBLE conversion
+
+ +--------------------------+---------+
+ | Pattern | Actions |
+ +===+=====+==========+=====+=========+
+ | 0 | RAW | ``spec`` | any | QUEUE |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-----+----------+-----+---------+
+ | 1 | END | END |
+ +---+----------------------+---------+
+
+``SYN`` to ``TCP`` → ``QUEUE``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*SYN* is a `Item: TCP`_ rule with only the ``syn`` bit enabled and masked,
+and a terminating `Action: QUEUE`_.
+
+Priority level can be set to simulate the high priority bit.
+
+.. _table_rte_flow_migration_syn:
+
+.. table:: SYN conversion
+
+ +-----------------------------------+---------+
+ | Pattern | Actions |
+ +===+======+==========+=============+=========+
+ | 0 | ETH | ``spec`` | unset | QUEUE |
+ | | +----------+-------------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------------+ |
+ | | | ``mask`` | unset | |
+ +---+------+----------+-------------+---------+
+ | 1 | IPV4 | ``spec`` | unset | END |
+ | | +----------+-------------+ |
+ | | | ``mask`` | unset | |
+ | | +----------+-------------+ |
+ | | | ``mask`` | unset | |
+ +---+------+----------+---------+---+ |
+ | 2 | TCP | ``spec`` | ``syn`` | 1 | |
+ | | +----------+---------+---+ |
+ | | | ``mask`` | ``syn`` | 1 | |
+ +---+------+----------+---------+---+ |
+ | 3 | END | |
+ +---+-------------------------------+---------+
+
+``NTUPLE`` to ``IPV4``, ``TCP``, ``UDP`` → ``QUEUE``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*NTUPLE* is similar to specifying an empty L2, `Item: IPV4`_ as L3 with
+`Item: TCP`_ or `Item: UDP`_ as L4 and a terminating `Action: QUEUE`_.
+
+A priority level can be specified as well.
+
+.. _table_rte_flow_migration_ntuple:
+
+.. table:: NTUPLE conversion
+
+ +-----------------------------+---------+
+ | Pattern | Actions |
+ +===+======+==========+=======+=========+
+ | 0 | ETH | ``spec`` | unset | QUEUE |
+ | | +----------+-------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------+ |
+ | | | ``mask`` | unset | |
+ +---+------+----------+-------+---------+
+ | 1 | IPV4 | ``spec`` | any | END |
+ | | +----------+-------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------+ |
+ | | | ``mask`` | any | |
+ +---+------+----------+-------+ |
+ | 2 | TCP, | ``spec`` | any | |
+ | | UDP +----------+-------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------+ |
+ | | | ``mask`` | any | |
+ +---+------+----------+-------+ |
+ | 3 | END | |
+ +---+-------------------------+---------+
+
+``TUNNEL`` to ``ETH``, ``IPV4``, ``IPV6``, ``VXLAN`` (or other) → ``QUEUE``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*TUNNEL* matches common IPv4 and IPv6 L3/L4-based tunnel types.
+
+In the following table, `Item: ANY`_ is used to cover the optional L4.
+
+.. _table_rte_flow_migration_tunnel:
+
+.. table:: TUNNEL conversion
+
+ +-------------------------------------------------------+---------+
+ | Pattern | Actions |
+ +===+==========================+==========+=============+=========+
+ | 0 | ETH | ``spec`` | any | QUEUE |
+ | | +----------+-------------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------------+ |
+ | | | ``mask`` | any | |
+ +---+--------------------------+----------+-------------+---------+
+ | 1 | IPV4, IPV6 | ``spec`` | any | END |
+ | | +----------+-------------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------------+ |
+ | | | ``mask`` | any | |
+ +---+--------------------------+----------+-------------+ |
+ | 2 | ANY | ``spec`` | any | |
+ | | +----------+-------------+ |
+ | | | ``last`` | unset | |
+ | | +----------+---------+---+ |
+ | | | ``mask`` | ``num`` | 0 | |
+ +---+--------------------------+----------+---------+---+ |
+ | 3 | VXLAN, GENEVE, TEREDO, | ``spec`` | any | |
+ | | NVGRE, GRE, ... +----------+-------------+ |
+ | | | ``last`` | unset | |
+ | | +----------+-------------+ |
+ | | | ``mask`` | any | |
+ +---+--------------------------+----------+-------------+ |
+ | 4 | END | |
+ +---+---------------------------------------------------+---------+
+
+``FDIR`` to most item types → ``QUEUE``, ``DROP``, ``PASSTHRU``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*FDIR* is more complex than any other type, there are several methods to
+emulate its functionality. It is summarized for the most part in the table
+below.
+
+A few features are intentionally not supported:
+
+- The ability to configure the matching input set and masks for the entire
+ device, PMDs should take care of it automatically according to the
+ requested flow rules.
+
+ For example if a device supports only one bit-mask per protocol type,
+ source/address IPv4 bit-masks can be made immutable by the first created
+ rule. Subsequent IPv4 or TCPv4 rules can only be created if they are
+ compatible.
+
+ Note that only protocol bit-masks affected by existing flow rules are
+ immutable, others can be changed later. They become mutable again after
+ the related flow rules are destroyed.
+
+- Returning four or eight bytes of matched data when using flex bytes
+ filtering. Although a specific action could implement it, it conflicts
+ with the much more useful 32 bits tagging on devices that support it.
+
+- Side effects on RSS processing of the entire device. Flow rules that
+ conflict with the current device configuration should not be
+ allowed. Similarly, device configuration should not be allowed when it
+ affects existing flow rules.
+
+- Device modes of operation. "none" is unsupported since filtering cannot be
+ disabled as long as a flow rule is present.
+
+- "MAC VLAN" or "tunnel" perfect matching modes should be automatically set
+ according to the created flow rules.
+
+- Signature mode of operation is not defined but could be handled through a
+ specific item type if needed.
+
+.. _table_rte_flow_migration_fdir:
+
+.. table:: FDIR conversion
+
+ +----------------------------------------+-----------------------+
+ | Pattern | Actions |
+ +===+===================+==========+=====+=======================+
+ | 0 | ETH, RAW | ``spec`` | any | QUEUE, DROP, PASSTHRU |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-------------------+----------+-----+-----------------------+
+ | 1 | IPV4, IPv6 | ``spec`` | any | MARK |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-------------------+----------+-----+-----------------------+
+ | 2 | TCP, UDP, SCTP | ``spec`` | any | END |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-------------------+----------+-----+ |
+ | 3 | VF, PF (optional) | ``spec`` | any | |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | any | |
+ +---+-------------------+----------+-----+ |
+ | 4 | END | |
+ +---+------------------------------------+-----------------------+
+
+``HASH``
+~~~~~~~~
+
+There is no counterpart to this filter type because it translates to a
+global device setting instead of a pattern item. Device settings are
+automatically set according to the created flow rules.
+
+``L2_TUNNEL`` to ``VOID`` → ``VXLAN`` (or others)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All packets are matched. This type alters incoming packets to encapsulate
+them in a chosen tunnel type, optionally redirect them to a VF as well.
+
+The destination pool for tag based forwarding can be emulated with other
+flow rules using `Action: DUP`_.
+
+.. _table_rte_flow_migration_l2tunnel:
+
+.. table:: L2_TUNNEL conversion
+
+ +---------------------------+--------------------+
+ | Pattern | Actions |
+ +===+======+==========+=====+====================+
+ | 0 | VOID | ``spec`` | N/A | VXLAN, GENEVE, ... |
+ | | | | | |
+ | | | | | |
+ | | +----------+-----+ |
+ | | | ``last`` | N/A | |
+ | | +----------+-----+ |
+ | | | ``mask`` | N/A | |
+ | | | | | |
+ +---+------+----------+-----+--------------------+
+ | 1 | END | VF (optional) |
+ +---+ +--------------------+
+ | 2 | | END |
+ +---+-----------------------+--------------------+
diff --git a/doc/guides/prog_guide/source_org.rst b/doc/guides/prog_guide/source_org.rst
index d9c140f7..d5d01f38 100644
--- a/doc/guides/prog_guide/source_org.rst
+++ b/doc/guides/prog_guide/source_org.rst
@@ -140,7 +140,6 @@ The examples directory contains sample applications that show how libraries can
examples
+-- cmdline # Example of using the cmdline library
- +-- dpdk_qat # Sample integration with Intel QuickAssist
+-- exception_path # Sending packets to and from Linux TAP device
+-- helloworld # Basic Hello World example
+-- ip_reassembly # Example showing IP reassembly
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 4f997d47..59792907 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -53,7 +53,7 @@ vhost library should be able to:
Vhost API Overview
------------------
-The following is an overview of the Vhost API functions:
+The following is an overview of some key Vhost API functions:
* ``rte_vhost_driver_register(path, flags)``
@@ -95,7 +95,7 @@ The following is an overview of the Vhost API functions:
* for VM2NIC case, the ``nb_tx_desc`` has to be small enough: <= 64 if virtio
indirect feature is not enabled and <= 128 if it is enabled.
- The is because when dequeue zero copy is enabled, guest Tx used vring will
+ This is because when dequeue zero copy is enabled, guest Tx used vring will
be updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
has to be small enough so that the PMD driver will run out of available
Tx descriptors and free mbufs timely. Otherwise, guest Tx vring would be
@@ -110,13 +110,13 @@ The following is an overview of the Vhost API functions:
of those segments, thus the fewer the segments, the quicker we will get
the mapping. NOTE: we may speed it by using tree searching in future.
-* ``rte_vhost_driver_session_start()``
+* ``rte_vhost_driver_set_features(path, features)``
- This function starts the vhost session loop to handle vhost messages. It
- starts an infinite loop, therefore it should be called in a dedicated
- thread.
+ This function sets the feature bits the vhost-user driver supports. The
+ vhost-user driver could be vhost-user net, yet it could be something else,
+ say, vhost-user SCSI.
-* ``rte_vhost_driver_callback_register(virtio_net_device_ops)``
+* ``rte_vhost_driver_callback_register(path, vhost_device_ops)``
This function registers a set of callbacks, to let DPDK applications take
the appropriate action when some events happen. The following events are
@@ -124,12 +124,12 @@ The following is an overview of the Vhost API functions:
* ``new_device(int vid)``
- This callback is invoked when a virtio net device becomes ready. ``vid``
- is the virtio net device ID.
+ This callback is invoked when a virtio device becomes ready. ``vid``
+ is the vhost device ID.
* ``destroy_device(int vid)``
- This callback is invoked when a virtio net device shuts down (or when the
+ This callback is invoked when a virtio device shuts down (or when the
vhost connection is broken).
* ``vring_state_changed(int vid, uint16_t queue_id, int enable)``
@@ -137,20 +137,30 @@ The following is an overview of the Vhost API functions:
This callback is invoked when a specific queue's state is changed, for
example to enabled or disabled.
-* ``rte_vhost_enqueue_burst(vid, queue_id, pkts, count)``
-
- Transmits (enqueues) ``count`` packets from host to guest.
-
-* ``rte_vhost_dequeue_burst(vid, queue_id, mbuf_pool, pkts, count)``
+ * ``features_changed(int vid, uint64_t features)``
- Receives (dequeues) ``count`` packets from guest, and stored them at ``pkts``.
+ This callback is invoked when the features is changed. For example,
+ ``VHOST_F_LOG_ALL`` will be set/cleared at the start/end of live
+ migration, respectively.
-* ``rte_vhost_feature_disable/rte_vhost_feature_enable(feature_mask)``
+* ``rte_vhost_driver_disable/enable_features(path, features))``
This function disables/enables some features. For example, it can be used to
disable mergeable buffers and TSO features, which both are enabled by
default.
+* ``rte_vhost_driver_start(path)``
+
+ This function triggers the vhost-user negotiation. It should be invoked at
+ the end of initializing a vhost-user driver.
+
+* ``rte_vhost_enqueue_burst(vid, queue_id, pkts, count)``
+
+ Transmits (enqueues) ``count`` packets from host to guest.
+
+* ``rte_vhost_dequeue_burst(vid, queue_id, mbuf_pool, pkts, count)``
+
+ Receives (dequeues) ``count`` packets from guest, and stored them at ``pkts``.
Vhost-user Implementations
--------------------------
diff --git a/doc/guides/prog_guide/writing_efficient_code.rst b/doc/guides/prog_guide/writing_efficient_code.rst
index 78d2afa9..8223acee 100644
--- a/doc/guides/prog_guide/writing_efficient_code.rst
+++ b/doc/guides/prog_guide/writing_efficient_code.rst
@@ -124,7 +124,7 @@ The code algorithm that dequeues messages may be something similar to the follow
while (1) {
/* Process as many elements as can be dequeued. */
- count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK);
+ count = rte_ring_dequeue_burst(ring, obj_table, MAX_BULK, NULL);
if (unlikely(count == 0))
continue;
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 2d17bc6e..ba9b5a21 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -8,66 +8,123 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
+* eal: the following functions are deprecated starting from 17.05 and will
+ be removed in 17.08:
+
+ - ``rte_set_log_level``, replaced by ``rte_log_set_global_level``
+ - ``rte_get_log_level``, replaced by ``rte_log_get_global_level``
+ - ``rte_set_log_type``, replaced by ``rte_log_set_level``
+ - ``rte_get_log_type``, replaced by ``rte_log_get_level``
+
+* devargs: An ABI change is planned for 17.08 for the structure ``rte_devargs``.
+ The current version is dependent on bus-specific device identifier, which will
+ be made generic and abstracted, in order to make the EAL bus-agnostic.
+
+ Accompanying this evolution, device command line parameters will thus support
+ explicit bus definition in a device declaration.
+
* igb_uio: iomem mapping and sysfs files created for iomem and ioport in
igb_uio will be removed, because we are able to detect these from what Linux
has exposed, like the way we have done with uio-pci-generic. This change
- targets release 17.02.
-
-* ABI/API changes are planned for 17.02: ``rte_device``, ``rte_driver`` will be
- impacted because of introduction of a new ``rte_bus`` hierarchy. This would
- also impact the way devices are identified by EAL. A bus-device-driver model
- will be introduced providing a hierarchical view of devices.
-
-* ``eth_driver`` is planned to be removed in 17.02. This currently serves as
- a placeholder for PMDs to register themselves. Changes for ``rte_bus`` will
- provide a way to handle device initialization currently being done in
- ``eth_driver``.
-
-* In 17.02 ABI changes are planned: the ``rte_eth_dev`` structure will be
- extended with new function pointer ``tx_pkt_prepare`` allowing verification
- and processing of packet burst to meet HW specific requirements before
- transmit. Also new fields will be added to the ``rte_eth_desc_lim`` structure:
- ``nb_seg_max`` and ``nb_mtu_seg_max`` providing information about number of
- segments limit to be transmitted by device for TSO/non-TSO packets.
-
-* In 17.02 ABI change is planned: the ``rte_eth_dev_info`` structure
- will be extended with a new member ``fw_version`` in order to store
- the NIC firmware version.
-
-* ethdev: an API change is planned for 17.02 for the function
- ``_rte_eth_dev_callback_process``. In 17.02 the function will return an ``int``
- instead of ``void`` and a fourth parameter ``void *ret_param`` will be added.
+ targets release 17.05.
-* ethdev: for 17.02 it is planned to deprecate the following five functions
- and move them in ixgbe:
+* The VDEV subsystem will be converted as driver of the new bus model.
+ It may imply some EAL API changes in 17.08.
- ``rte_eth_dev_set_vf_rxmode``
+* The struct ``rte_pci_driver`` is planned to be removed from
+ ``rte_cryptodev_driver`` and ``rte_eventdev_driver`` in 17.08.
- ``rte_eth_dev_set_vf_rx``
+* ethdev: An API change is planned for 17.08 for the function
+ ``_rte_eth_dev_callback_process``. In 17.08 the function will return an ``int``
+ instead of ``void`` and a fourth parameter ``void *ret_param`` will be added.
+
+* ethdev: for 17.08 it is planned to deprecate the following nine rte_eth_dev_*
+ functions and move them into the ixgbe PMD:
+
+ ``rte_eth_dev_bypass_init``, ``rte_eth_dev_bypass_state_set``,
+ ``rte_eth_dev_bypass_state_show``, ``rte_eth_dev_bypass_event_store``,
+ ``rte_eth_dev_bypass_event_show``, ``rte_eth_dev_wd_timeout_store``,
+ ``rte_eth_dev_bypass_wd_timeout_show``, ``rte_eth_dev_bypass_ver_show``,
+ ``rte_eth_dev_bypass_wd_reset``.
- ``rte_eth_dev_set_vf_tx``
+ The following fields will be removed from ``struct eth_dev_ops``:
- ``rte_eth_dev_set_vf_vlan_filter``
+ ``bypass_init_t``, ``bypass_state_set_t``, ``bypass_state_show_t``,
+ ``bypass_event_set_t``, ``bypass_event_show_t``, ``bypass_wd_timeout_set_t``,
+ ``bypass_wd_timeout_show_t``, ``bypass_ver_show_t``, ``bypass_wd_reset_t``.
- ``rte_eth_set_vf_rate_limit``
+ The functions will be renamed to the following, and moved to the ``ixgbe`` PMD:
-* ABI changes are planned for 17.02 in the ``rte_mbuf`` structure: some fields
- may be reordered to facilitate the writing of ``data_off``, ``refcnt``, and
- ``nb_segs`` in one operation, because some platforms have an overhead if the
- store address is not naturally aligned. Other mbuf fields, such as the
- ``port`` field, may be moved or removed as part of this mbuf work. A
- ``timestamp`` will also be added.
+ ``rte_pmd_ixgbe_bypass_init``, ``rte_pmd_ixgbe_bypass_state_set``,
+ ``rte_pmd_ixgbe_bypass_state_show``, ``rte_pmd_ixgbe_bypass_event_set``,
+ ``rte_pmd_ixgbe_bypass_event_show``, ``rte_pmd_ixgbe_bypass_wd_timeout_set``,
+ ``rte_pmd_ixgbe_bypass_wd_timeout_show``, ``rte_pmd_ixgbe_bypass_ver_show``,
+ ``rte_pmd_ixgbe_bypass_wd_reset``.
* The mbuf flags PKT_RX_VLAN_PKT and PKT_RX_QINQ_PKT are deprecated and
are respectively replaced by PKT_RX_VLAN_STRIPPED and
PKT_RX_QINQ_STRIPPED, that are better described. The old flags and
- their behavior will be kept until 16.11 and will be removed in 17.02.
-
-* mempool: The functions ``rte_mempool_count`` and ``rte_mempool_free_count``
- will be removed in 17.02.
- They are replaced by ``rte_mempool_avail_count`` and
- ``rte_mempool_in_use_count`` respectively.
-
-* mempool: The functions for single/multi producer/consumer are deprecated
- and will be removed in 17.02.
- It is replaced by ``rte_mempool_generic_get/put`` functions.
+ their behavior will be kept until 17.05 and will be removed in 17.08.
+
+* ethdev: Tx offloads will no longer be enabled by default in 17.08.
+ Instead, the ``rte_eth_txmode`` structure will be extended with
+ bit field to enable each Tx offload.
+ Besides of making the Rx/Tx configuration API more consistent for the
+ application, PMDs will be able to provide a better out of the box performance.
+ As part of the work, ``ETH_TXQ_FLAGS_NO*`` will be superseded as well.
+
+* ethdev: the legacy filter API, including
+ ``rte_eth_dev_filter_supported()``, ``rte_eth_dev_filter_ctrl()`` as well
+ as filter types MACVLAN, ETHERTYPE, FLEXIBLE, SYN, NTUPLE, TUNNEL, FDIR,
+ HASH and L2_TUNNEL, is superseded by the generic flow API (rte_flow) in
+ PMDs that implement the latter.
+ Target release for removal of the legacy API will be defined once most
+ PMDs have switched to rte_flow.
+
+* cryptodev: All PMD names definitions will be moved to the individual PMDs
+ in 17.08.
+
+* cryptodev: The following changes will be done in in 17.08:
+
+ - the device type enumeration ``rte_cryptodev_type`` will be removed
+ - the following structures will be changed: ``rte_cryptodev_session``,
+ ``rte_cryptodev_sym_session``, ``rte_cryptodev_info``, ``rte_cryptodev``
+ - the function ``rte_cryptodev_count_devtype`` will be replaced by
+ ``rte_cryptodev_device_count_by_driver``
+
+* cryptodev: API changes are planned for 17.08 for the sessions management
+ to make it agnostic to the underlying devices, removing coupling with
+ crypto PMDs, so a single session can be used on multiple devices.
+
+ - ``struct rte_cryptodev_sym_session``, dev_id, dev_type will be removed,
+ _private field changed to the indirect array of private data pointers of
+ all supported devices
+
+ An API of followed functions will be changed to allow operate on multiple
+ devices with one session:
+
+ - ``rte_cryptodev_sym_session_create``
+ - ``rte_cryptodev_sym_session_free``
+ - ``rte_cryptodev_sym_session_pool_create``
+
+ While dev_id will not be stored in the ``struct rte_cryptodev_sym_session``,
+ directly, the change of followed API is required:
+
+ - ``rte_cryptodev_queue_pair_attach_sym_session``
+ - ``rte_cryptodev_queue_pair_detach_sym_session``
+
+* cryptodev: the structures ``rte_crypto_op``, ``rte_crypto_sym_op``
+ and ``rte_crypto_sym_xform`` will be restructured in 17.08,
+ for correctness and improvement.
+
+* crypto/scheduler: the following two functions are deprecated starting
+ from 17.05 and will be removed in 17.08:
+
+ - ``rte_crpytodev_scheduler_mode_get``, replaced by ``rte_cryptodev_scheduler_mode_get``
+ - ``rte_crpytodev_scheduler_mode_set``, replaced by ``rte_cryptodev_scheduler_mode_set``
+
+* librte_table: The ``key_mask`` parameter will be added to all the hash tables
+ that currently do not have it, as well as to the hash compute function prototype.
+ The non-"do-sig" versions of the hash tables will be removed
+ (including the ``signature_offset`` parameter)
+ and the "do-sig" versions renamed accordingly.
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index 7e51b2c7..c4d243cd 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -36,6 +36,8 @@ Release Notes
:numbered:
rel_description
+ release_17_05
+ release_17_02
release_16_11
release_16_07
release_16_04
diff --git a/doc/guides/rel_notes/known_issues.rst b/doc/guides/rel_notes/known_issues.rst
index 3cd42376..3f6d8cb5 100644
--- a/doc/guides/rel_notes/known_issues.rst
+++ b/doc/guides/rel_notes/known_issues.rst
@@ -640,3 +640,105 @@ I40e VF may not receive packets in the promiscuous mode
**Driver/Module**:
Poll Mode Driver (PMD).
+
+
+uio pci generic module bind failed in X710/XL710/XXV710
+-------------------------------------------------------
+
+**Description**:
+ The ``uio_pci_generic`` module is not supported by XL710, since the errata of XL710
+ states that the Interrupt Status bit is not implemented. The errata is the item #71
+ from the `xl710 controller spec
+ <http://www.intel.com/content/www/us/en/embedded/products/networking/xl710-10-40-controller-spec-update.html>`_.
+ The hw limitation is the same as other X710/XXV710 NICs.
+
+**Implication**:
+ When use ``--bind=uio_pci_generic``, the ``uio_pci_generic`` module probes device and check the Interrupt
+ Status bit. Since it is not supported by X710/XL710/XXV710, it return a *failed* value. The statement
+ that these products don’t support INTx masking, is indicated in the related `linux kernel commit
+ <https://git.kernel.org/cgit/linux/kernel/git/stable/linux-stable.git/commit/drivers/pci/quirks.c?id=8bcf4525c5d43306c5fd07e132bc8650e3491aec>`_.
+
+**Resolution/Workaround**:
+ Do not bind the ``uio_pci_generic`` module in X710/XL710/XXV710 NICs.
+
+**Affected Environment/Platform**:
+ All.
+
+**Driver/Module**:
+ Poll Mode Driver (PMD).
+
+
+virtio tx_burst() function cannot do TSO on shared packets
+----------------------------------------------------------
+
+**Description**:
+ The standard TX function of virtio driver does not manage shared
+ packets properly when doing TSO. These packets should be read-only
+ but the driver modifies them.
+
+ When doing TSO, the virtio standard expects that the L4 checksum is
+ set to the pseudo header checksum in the packet data, which is
+ different than the DPDK API. The driver patches the L4 checksum to
+ conform to the virtio standard, but this solution is invalid when
+ dealing with shared packets (clones), because the packet data should
+ not be modified.
+
+**Implication**:
+ In this situation, the shared data will be modified by the driver,
+ potentially causing race conditions with the other users of the mbuf
+ data.
+
+**Resolution/Workaround**:
+ The workaround in the application is to ensure that the network
+ headers in the packet data are not shared.
+
+**Affected Environment/Platform**:
+ Virtual machines running a virtio driver.
+
+**Driver/Module**:
+ Poll Mode Driver (PMD).
+
+
+igb uio legacy mode can not be used in X710/XL710/XXV710
+--------------------------------------------------------
+
+**Description**:
+ X710/XL710/XXV710 NICs lack support for indicating INTx is asserted via the interrupt
+ bit in the PCI status register. Linux delected them from INTx support table. The related
+ `commit <https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/commit/drivers/pci/quirks.c?id=8bcf4525c5d43306c5fd07e132bc8650e3491aec>`_.
+
+**Implication**:
+ When insmod ``igb_uio`` with ``intr_mode=legacy`` and test link status interrupt. Since
+ INTx interrupt is not supported by X710/XL710/XXV710, it will cause Input/Output error
+ when reading file descriptor.
+
+**Resolution/Workaround**:
+ Do not bind ``igb_uio`` with legacy mode in X710/XL710/XXV710 NICs, or do not use kernel
+ version >4.7 when you bind ``igb_uio`` with legacy mode.
+
+**Affected Environment/Platform**:
+ ALL.
+
+**Driver/Module**:
+ Poll Mode Driver (PMD).
+
+
+igb_uio can not be used when running l3fwd-power
+------------------------------------------------
+
+**Description**:
+ Link Status Change(LSC) interrupt and packet receiving interrupt are all enabled in l3fwd-power
+ APP. Because of UIO only support one interrupt, so these two kinds of interrupt need to share
+ one, and the receiving interrupt have the higher priority, so can't get the right link status.
+
+**Implication**:
+ When insmod ``igb_uio`` and running l3fwd-power APP, link status getting doesn't work properly.
+
+**Resolution/Workaround**:
+ Use vfio-pci when LSC and packet receiving interrupt enabled.
+
+**Affected Environment/Platform**:
+ ALL.
+
+**Driver/Module**:
+ ``igb_uio`` module.
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index fbf2e368..8c9ec65c 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -598,113 +598,3 @@ Tested OSes
* Ubuntu 14.04
* Ubuntu 15.04
* Ubuntu 16.04
-
-Fixes in 16.11 LTS Release
---------------------------
-
-16.11.1
-~~~~~~~
-
-* app/test: fix symmetric session free in crypto perf tests
-* app/testpmd: fix check for invalid ports
-* app/testpmd: fix static build link ordering
-* crypto/aesni_gcm: fix IV size in capabilities
-* crypto/aesni_gcm: fix J0 padding bytes
-* crypto/aesni_mb: fix incorrect crypto session
-* crypto/openssl: fix extra bytes written at end of data
-* crypto/openssl: fix indentation in guide
-* crypto/qat: fix IV size in capabilities
-* crypto/qat: fix to avoid buffer overwrite in OOP case
-* cryptodev: fix crash on null dereference
-* cryptodev: fix loop in device query
-* devargs: reset driver name pointer on parsing failure
-* drivers/crypto: fix different auth/cipher keys
-* ethdev: check maximum number of queues for statistics
-* ethdev: fix extended statistics name index
-* ethdev: fix port data mismatched in multiple process model
-* ethdev: fix port lookup if none
-* ethdev: remove invalid function from version map
-* examples/ethtool: fix driver information
-* examples/ethtool: fix querying non-PCI devices
-* examples/ip_pipeline: fix coremask limitation
-* examples/ip_pipeline: fix parsing of pass-through pipeline
-* examples/l2fwd-crypto: fix overflow
-* examples/vhost: fix calculation of mbuf count
-* examples/vhost: fix lcore initialization
-* mempool: fix API documentation
-* mempool: fix stack handler dequeue
-* net/af_packet: fix fd use after free
-* net/bnx2x: fix Rx mode configuration
-* net/cxgbe/base: initialize variable before reading EEPROM
-* net/cxgbe: fix parenthesis on bitwise operation
-* net/ena: fix setting host attributes
-* net/enic: fix hardcoding of some flow director masks
-* net/enic: fix memory leak with oversized Tx packets
-* net/enic: remove unnecessary function parameter attributes
-* net/i40e: enable auto link update for 25G
-* net/i40e: fix Rx checksum flag
-* net/i40e: fix TC bandwidth definition
-* net/i40e: fix VF reset flow
-* net/i40e: fix checksum flag in x86 vector Rx
-* net/i40e: fix crash in close
-* net/i40e: fix deletion of all macvlan filters
-* net/i40e: fix ethertype filter on X722
-* net/i40e: fix link update delay
-* net/i40e: fix logging for Tx free threshold check
-* net/i40e: fix segment number in reassemble process
-* net/i40e: fix wrong return value when handling PF message
-* net/i40e: fix xstats value mapping
-* net/i40evf: fix casting between structs
-* net/i40evf: fix reporting of imissed packets
-* net/ixgbe: fix blocked interrupts
-* net/ixgbe: fix received packets number for ARM
-* net/ixgbe: fix received packets number for ARM NEON
-* net/ixgbevf: fix max packet length
-* net/mlx5: fix RSS hash result for flows
-* net/mlx5: fix Rx packet validation and type
-* net/mlx5: fix Tx doorbell
-* net/mlx5: fix endianness in Tx completion queue
-* net/mlx5: fix inconsistent link status
-* net/mlx5: fix leak when starvation occurs
-* net/mlx5: fix link status query
-* net/mlx5: fix memory leak when parsing device params
-* net/mlx5: fix missing inline attributes
-* net/mlx5: fix updating total length of multi-packet send
-* net/mlx: fix IPv4 and IPv6 packet type
-* net/nfp: fix VLAN offload flags check
-* net/nfp: fix typo in Tx offload capabilities
-* net/pcap: fix timestamps in output pcap file
-* net/qede/base: fix FreeBSD build
-* net/qede: add vendor/device id info
-* net/qede: fix PF fastpath status block index
-* net/qede: fix filtering code
-* net/qede: fix function declaration
-* net/qede: fix per queue statisitics
-* net/qede: fix resource leak
-* net/vhost: fix socket file deleted on stop
-* net/vhost: fix unix socket not removed as closing
-* net/virtio-user: fix not properly reset device
-* net/virtio-user: fix wrongly get/set features
-* net/virtio: fix build without virtio-user
-* net/virtio: fix crash when number of virtio devices > 1
-* net/virtio: fix multiple process support
-* net/virtio: fix performance regression due to TSO
-* net/virtio: fix rewriting LSC flag
-* net/virtio: fix wrong Rx/Tx method for secondary process
-* net/virtio: optimize header reset on any layout
-* net/virtio: store IO port info locally
-* net/virtio: store PCI operators pointer locally
-* net/vmxnet3: fix Rx deadlock
-* pci: fix check of mknod
-* pmdinfogen: fix endianness with cross-compilation
-* pmdinfogen: fix null dereference
-* sched: fix crash when freeing port
-* usertools: fix active interface detection when binding
-* vdev: fix detaching with alias
-* vfio: fix file descriptor leak in multi-process
-* vhost: allow many vhost-user ports
-* vhost: do not GSO when no header is present
-* vhost: fix dead loop in enqueue path
-* vhost: fix guest/host physical address mapping
-* vhost: fix long stall of negotiation
-* vhost: fix memory leak
diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
new file mode 100644
index 00000000..357965ac
--- /dev/null
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -0,0 +1,692 @@
+DPDK Release 17.02
+==================
+
+.. **Read this first.**
+
+ The text below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text: ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ firefox build/doc/html/guides/rel_notes/release_17_02.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to understand
+ the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like this.
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Added support for representing buses in EAL**
+
+ The ``rte_bus`` structure was introduced into the EAL. This allows for
+ devices to be represented by buses they are connected to. A new bus can be
+ added to DPDK by extending the ``rte_bus`` structure and implementing the
+ scan and probe functions. Once a new bus is registered using the provided
+ APIs, new devices can be detected and initialized using bus scan and probe
+ callbacks.
+
+ With this change, devices other than PCI or VDEV type can be represented
+ in the DPDK framework.
+
+* **Added generic EAL API for I/O device memory read/write operations.**
+
+ This API introduces 8 bit, 16 bit, 32 bit and 64 bit I/O device
+ memory read/write operations along with "relaxed" versions.
+
+ Weakly-ordered architectures like ARM need an additional I/O barrier for
+ device memory read/write access over PCI bus. By introducing the EAL
+ abstraction for I/O device memory read/write access, the drivers can access
+ I/O device memory in an architecture-agnostic manner. The relaxed version
+ does not have an additional I/O memory barrier, which is useful in accessing
+ the device registers of integrated controllers which is implicitly strongly
+ ordered with respect to memory access.
+
+* **Added generic flow API (rte_flow).**
+
+ This API provides a generic means to configure hardware to match specific
+ ingress or egress traffic, alter its behavior and query related counters
+ according to any number of user-defined rules.
+
+ In order to expose a single interface with an unambiguous behavior that is
+ common to all poll-mode drivers (PMDs) the ``rte_flow`` API is slightly
+ higher-level than the legacy filtering framework, which it encompasses and
+ supersedes (including all functions and filter types) .
+
+ See the :ref:`Generic flow API <Generic_flow_API>` documentation for more
+ information.
+
+* **Added firmware version get API.**
+
+ Added a new function ``rte_eth_dev_fw_version_get()`` to fetch the firmware
+ version for a given device.
+
+* **Added APIs for MACsec offload support to the ixgbe PMD.**
+
+ Six new APIs have been added to the ixgbe PMD for MACsec offload support.
+ The declarations for the APIs can be found in ``rte_pmd_ixgbe.h``.
+
+* **Added I219 NICs support.**
+
+ Added support for I219 Intel 1GbE NICs.
+
+* **Added VF Daemon (VFD) for i40e. - EXPERIMENTAL**
+
+ This is an EXPERIMENTAL feature to enhance the capability of the DPDK PF as
+ many VF management features are not currently supported by the kernel PF
+ driver. Some new private APIs are implemented directly in the PMD without an
+ abstraction layer. They can be used directly by some users who have the
+ need.
+
+ The new APIs to control VFs directly from PF include:
+
+ * Set VF MAC anti-spoofing.
+ * Set VF VLAN anti-spoofing.
+ * Set TX loopback.
+ * Set VF unicast promiscuous mode.
+ * Set VF multicast promiscuous mode.
+ * Set VF MTU.
+ * Get/reset VF stats.
+ * Set VF MAC address.
+ * Set VF VLAN stripping.
+ * Vf VLAN insertion.
+ * Set VF broadcast mode.
+ * Set VF VLAN tag.
+ * Set VF VLAN filter.
+
+ VFD also includes VF to PF mailbox message management from an application.
+ When the PF receives mailbox messages from the VF the PF should call the
+ callback provided by the application to know if they're permitted to be
+ processed.
+
+ As an EXPERIMENTAL feature, please be aware it can be changed or even
+ removed without prior notice.
+
+* **Updated the i40e base driver.**
+
+ Updated the i40e base driver, including the following changes:
+
+ * Replace existing legacy ``memcpy()`` calls with ``i40e_memcpy()`` calls.
+ * Use ``BIT()`` macro instead of bit fields.
+ * Add clear all WoL filters implementation.
+ * Add broadcast promiscuous control per VLAN.
+ * Remove unused ``X722_SUPPORT`` and ``I40E_NDIS_SUPPORT`` macros.
+
+* **Updated the enic driver.**
+
+ * Set new Rx checksum flags in mbufs to indicate unknown, good or bad checksums.
+ * Fix set/remove of MAC addresses. Allow up to 64 addresses per device.
+ * Enable TSO on outer headers.
+
+* **Added Solarflare libefx-based network PMD.**
+
+ Added a new network PMD which supports Solarflare SFN7xxx and SFN8xxx family
+ of 10/40 Gbps adapters.
+
+* **Updated the mlx4 driver.**
+
+ * Addressed a few bugs.
+
+* **Added support for Mellanox ConnectX-5 adapters (mlx5).**
+
+ Added support for Mellanox ConnectX-5 family of 10/25/40/50/100 Gbps
+ adapters to the existing mlx5 PMD.
+
+* **Updated the mlx5 driver.**
+
+ * Improve Tx performance by using vector logic.
+ * Improve RSS balancing when number of queues is not a power of two.
+ * Generic flow API support for Ethernet, IPv4, IPv4, UDP, TCP, VLAN and
+ VXLAN pattern items with DROP and QUEUE actions.
+ * Support for extended statistics.
+ * Addressed several data path bugs.
+ * As of MLNX_OFED 4.0-1.0.1.0, the Toeplitz RSS hash function is not
+ symmetric anymore for consistency with other PMDs.
+
+* **virtio-user with vhost-kernel as another exceptional path.**
+
+ Previously, we upstreamed a virtual device, virtio-user with vhost-user as
+ the backend as a way of enabling IPC (Inter-Process Communication) and user
+ space container networking.
+
+ Virtio-user with vhost-kernel as the backend is a solution for the exception
+ path, such as KNI, which exchanges packets with the kernel networking stack.
+ This solution is very promising in:
+
+ * Maintenance: vhost and vhost-net (kernel) is an upstreamed and extensively
+ used kernel module.
+ * Features: vhost-net is designed to be a networking solution, which has
+ lots of networking related features, like multi-queue, TSO, multi-seg
+ mbuf, etc.
+ * Performance: similar to KNI, this solution would use one or more
+ kthreads to send/receive packets from user space DPDK applications,
+ which has little impact on user space polling thread (except that
+ it might enter into kernel space to wake up those kthreads if
+ necessary).
+
+* **Added virtio Rx interrupt support.**
+
+ Added a feature to enable Rx interrupt mode for virtio pci net devices as
+ bound to VFIO (noiommu mode) and driven by virtio PMD.
+
+ With this feature, the virtio PMD can switch between polling mode and
+ interrupt mode, to achieve best performance, and at the same time save
+ power. It can work on both legacy and modern virtio devices. In this mode,
+ each ``rxq`` is mapped with an excluded MSIx interrupt.
+
+ See the :ref:`Virtio Interrupt Mode <virtio_interrupt_mode>` documentation
+ for more information.
+
+* **Added ARMv8 crypto PMD.**
+
+ A new crypto PMD has been added, which provides combined mode cryptographic
+ operations optimized for ARMv8 processors. The driver can be used to enhance
+ performance in processing chained operations such as cipher + HMAC.
+
+* **Updated the QAT PMD.**
+
+ The QAT PMD has been updated with additional support for:
+
+ * DES algorithm.
+ * Scatter-gather list (SGL) support.
+
+* **Updated the AESNI MB PMD.**
+
+ * The Intel(R) Multi Buffer Crypto for IPsec library used in
+ AESNI MB PMD has been moved to a new repository, in GitHub.
+ * Support has been added for single operations (cipher only and
+ authentication only).
+
+* **Updated the AES-NI GCM PMD.**
+
+ The AES-NI GCM PMD was migrated from the Multi Buffer library to the ISA-L
+ library. The migration entailed adding additional support for:
+
+ * GMAC algorithm.
+ * 256-bit cipher key.
+ * Session-less mode.
+ * Out-of place processing
+ * Scatter-gather support for chained mbufs (only out-of place and destination
+ mbuf must be contiguous)
+
+* **Added crypto performance test application.**
+
+ Added a new performance test application for measuring performance
+ parameters of PMDs available in the crypto tree.
+
+* **Added Elastic Flow Distributor library (rte_efd).**
+
+ Added a new library which uses perfect hashing to determine a target/value
+ for a given incoming flow key.
+
+ The library does not store the key itself for lookup operations, and
+ therefore, lookup performance is not dependent on the key size. Also, the
+ target/value can be any arbitrary value (8 bits by default). Finally, the
+ storage requirement is much smaller than a hash-based flow table and
+ therefore, it can better fit in CPU cache and scale to millions of flow
+ keys.
+
+ See the :ref:`Elastic Flow Distributor Library <Efd_Library>` documentation in
+ the Programmers Guide document, for more information.
+
+
+Resolved Issues
+---------------
+
+.. This section should contain bug fixes added to the relevant sections. Sample format:
+
+ * **code/section Fixed issue in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description of the resolved issue in the past tense.
+ The title should contain the code/lib section like a commit message.
+ Add the entries in alphabetic order in the relevant sections below.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+Drivers
+~~~~~~~
+
+* **net/virtio: Fixed multiple process support.**
+
+ Fixed a few regressions introduced in recent releases that break the virtio
+ multiple process support.
+
+
+Examples
+~~~~~~~~
+
+* **examples/ethtool: Fixed crash with non-PCI devices.**
+
+ Fixed issue where querying a non-PCI device was dereferencing non-existent
+ PCI data resulting in a segmentation fault.
+
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Moved five APIs for VF management from the ethdev to the ixgbe PMD.**
+
+ The following five APIs for VF management from the PF have been removed from
+ the ethdev, renamed, and added to the ixgbe PMD::
+
+ rte_eth_dev_set_vf_rate_limit()
+ rte_eth_dev_set_vf_rx()
+ rte_eth_dev_set_vf_rxmode()
+ rte_eth_dev_set_vf_tx()
+ rte_eth_dev_set_vf_vlan_filter()
+
+ The API's have been renamed to the following::
+
+ rte_pmd_ixgbe_set_vf_rate_limit()
+ rte_pmd_ixgbe_set_vf_rx()
+ rte_pmd_ixgbe_set_vf_rxmode()
+ rte_pmd_ixgbe_set_vf_tx()
+ rte_pmd_ixgbe_set_vf_vlan_filter()
+
+ The declarations for the API’s can be found in ``rte_pmd_ixgbe.h``.
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+ * Add a short 1-2 sentence description of the ABI change that was announced in
+ the previous releases and made in this release. Use fixed width quotes for
+ ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+``
+ sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. do not overwrite or remove it.
+ =========================================================
+
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ librte_cryptodev.so.2
+ librte_distributor.so.1
+ librte_eal.so.3
+ + librte_ethdev.so.6
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_lpm.so.2
+ librte_mbuf.so.2
+ librte_mempool.so.2
+ librte_meter.so.1
+ librte_net.so.1
+ librte_pdump.so.1
+ librte_pipeline.so.3
+ librte_pmd_bond.so.1
+ librte_pmd_ring.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_reorder.so.1
+ librte_ring.so.1
+ librte_sched.so.1
+ librte_table.so.2
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+This release has been tested with the below list of CPU/device/firmware/OS.
+Each section describes a different set of combinations.
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * Platform details
+
+ * Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+
+ * OS:
+
+ * CentOS 7.0
+ * Fedora 23
+ * Fedora 24
+ * FreeBSD 10.3
+ * Red Hat Enterprise Linux 7.2
+ * SUSE Enterprise Linux 12
+ * Ubuntu 14.04 LTS
+ * Ubuntu 15.10
+ * Ubuntu 16.04 LTS
+ * Wind River Linux 8
+
+ * MLNX_OFED: 4.0-1.0.1.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.40.5030
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.1000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.18.1000
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.18.1000
+
+* IBM(R) Power8(R) with Mellanox(R) NICs combinations
+
+ * Machine:
+
+ * Processor: POWER8E (raw), AltiVec supported
+
+ * type-model: 8247-22L
+ * Firmware FW810.21 (SV810_108)
+
+ * OS: Ubuntu 16.04 LTS PPC le
+
+ * MLNX_OFED: 4.0-1.0.1.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.1000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.1000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.18.1000
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * Platform details
+
+ * Intel(R) Atom(TM) CPU C2758 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU D-1540 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+
+ * OS:
+
+ * CentOS 7.2
+ * Fedora 25
+ * FreeBSD 11
+ * Red Hat Enterprise Linux Server release 7.3
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 4.0.1-k (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800001cf
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.2.5 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA2 (2x10G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA1 (1x40G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1584 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.48, 0x800006e7
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.2.13-k (igb)
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
new file mode 100644
index 00000000..cbdb4065
--- /dev/null
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -0,0 +1,829 @@
+DPDK Release 17.05
+==================
+
+.. **Read this first.**
+
+ The text in the sections below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text:
+ ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ xdg-open build/doc/html/guides/rel_notes/release_17_05.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample
+ format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to
+ understand the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like
+ this:
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Reorganized mbuf structure.**
+
+ The mbuf structure has been reorganized as follows:
+
+ * Align fields to facilitate the writing of ``data_off``, ``refcnt``, and
+ ``nb_segs`` in one operation.
+ * Use 2 bytes for port and number of segments.
+ * Move the sequence number to the second cache line.
+ * Add a timestamp field.
+ * Set default value for ``refcnt``, ``next`` and ``nb_segs`` at mbuf free.
+
+* **Added mbuf raw free API.**
+
+ Moved ``rte_mbuf_raw_free()`` and ``rte_pktmbuf_prefree_seg()`` functions to
+ the public API.
+
+* **Added free Tx mbuf on demand API.**
+
+ Added a new function ``rte_eth_tx_done_cleanup()`` which allows an
+ application to request the driver to release mbufs that are no longer in use
+ from a Tx ring, independent of whether or not the ``tx_rs_thresh`` has been
+ crossed.
+
+* **Added device removal interrupt.**
+
+ Added a new ethdev event ``RTE_ETH_DEV_INTR_RMV`` to signify
+ the sudden removal of a device.
+ This event can be advertised by PCI drivers and enabled accordingly.
+
+* **Added EAL dynamic log framework.**
+
+ Added new APIs to dynamically register named log types, and control
+ the level of each type independently.
+
+* **Added descriptor status ethdev API.**
+
+ Added a new API to get the status of a descriptor.
+
+ For Rx, it is almost similar to the ``rx_descriptor_done`` API, except
+ it differentiates descriptors which are held by the driver and not
+ returned to the hardware. For Tx, it is a new API.
+
+* **Increased number of next hops for LPM IPv6 to 2^21.**
+
+ The next_hop field has been extended from 8 bits to 21 bits for IPv6.
+
+* **Added VFIO hotplug support.**
+
+ Added hotplug support for VFIO in addition to the existing UIO support.
+
+* **Added PowerPC support to pci probing for vfio-pci devices.**
+
+ Enabled sPAPR IOMMU based pci probing for vfio-pci devices.
+
+* **Kept consistent PMD batching behavior.**
+
+ Removed the limit of fm10k/i40e/ixgbe Tx burst size and vhost Rx/Tx burst size
+ in order to support the same policy of "make an best effort to Rx/Tx pkts"
+ for PMDs.
+
+* **Updated the ixgbe base driver.**
+
+ Updated the ixgbe base driver, including the following changes:
+
+ * Add link block check for KR.
+ * Complete HW initialization even if SFP is not present.
+ * Add VF xcast promiscuous mode.
+
+* **Added PowerPC support for i40e and its vector PMD.**
+
+ Enabled i40e PMD and its vector PMD by default in PowerPC.
+
+* **Added VF max bandwidth setting in i40e.**
+
+ Enabled capability to set the max bandwidth for a VF in i40e.
+
+* **Added VF TC min and max bandwidth setting in i40e.**
+
+ Enabled capability to set the min and max allocated bandwidth for a TC on a
+ VF in i40.
+
+* **Added TC strict priority mode setting on i40e.**
+
+ There are 2 Tx scheduling modes supported for TCs by i40e HW: round robin
+ mode and strict priority mode. By default the round robin mode is used. It
+ is now possible to change the Tx scheduling mode for a TC. This is a global
+ setting on a physical port.
+
+* **Added i40e dynamic device personalization support.**
+
+ * Added dynamic device personalization processing to i40e firmware.
+
+* **Added Cloud Filter for QinQ steering to i40e.**
+
+ * Added a QinQ cloud filter on the i40e PMD, for steering traffic to a VM
+ using both VLAN tags. Note, this feature is not supported in Vector Mode.
+
+* **Updated mlx5 PMD.**
+
+ Updated the mlx5 driver, including the following changes:
+
+ * Added Generic flow API support for classification according to ether type.
+ * Extended Generic flow API support for classification of IPv6 flow
+ according to Vtc flow, Protocol and Hop limit.
+ * Added Generic flow API support for FLAG action.
+ * Added Generic flow API support for RSS action.
+ * Added support for TSO for non-tunneled and VXLAN packets.
+ * Added support for hardware Tx checksum offloads for VXLAN packets.
+ * Added support for user space Rx interrupt mode.
+ * Improved ConnectX-5 single core and maximum performance.
+
+* **Updated mlx4 PMD.**
+
+ Updated the mlx4 driver, including the following changes:
+
+ * Added support for Generic flow API basic flow items and actions.
+ * Added support for device removal event.
+
+* **Updated the sfc_efx driver.**
+
+ * Added Generic Flow API support for Ethernet, VLAN, IPv4, IPv6, UDP and TCP
+ pattern items with QUEUE action for ingress traffic.
+
+ * Added support for virtual functions (VFs).
+
+* **Added LiquidIO network PMD.**
+
+ Added poll mode driver support for Cavium LiquidIO II server adapter VFs.
+
+* **Added Atomic Rules Arkville PMD.**
+
+ Added a new poll mode driver for the Arkville family of
+ devices from Atomic Rules. The net/ark PMD supports line-rate
+ agnostic, multi-queue data movement on Arkville core FPGA instances.
+
+* **Added support for NXP DPAA2 - FSLMC bus.**
+
+ Added the new bus "fslmc" driver for NXP DPAA2 devices. See the
+ "Network Interface Controller Drivers" document for more details of this new
+ driver.
+
+* **Added support for NXP DPAA2 Network PMD.**
+
+ Added the new "dpaa2" net driver for NXP DPAA2 devices. See the
+ "Network Interface Controller Drivers" document for more details of this new
+ driver.
+
+* **Added support for the Wind River Systems AVP PMD.**
+
+ Added a new networking driver for the AVP device type. Theses devices are
+ specific to the Wind River Systems virtualization platforms.
+
+* **Added vmxnet3 version 3 support.**
+
+ Added support for vmxnet3 version 3 which includes several
+ performance enhancements such as configurable Tx data ring, Receive
+ Data Ring, and the ability to register memory regions.
+
+* **Updated the TAP driver.**
+
+ Updated the TAP PMD to:
+
+ * Support MTU modification.
+ * Support packet type for Rx.
+ * Support segmented packets on Rx and Tx.
+ * Speed up Rx on TAP when no packets are available.
+ * Support capturing traffic from another netdevice.
+ * Dynamically change link status when the underlying interface state changes.
+ * Added Generic Flow API support for Ethernet, VLAN, IPv4, IPv6, UDP and
+ TCP pattern items with DROP, QUEUE and PASSTHRU actions for ingress
+ traffic.
+
+* **Added MTU feature support to Virtio and Vhost.**
+
+ Implemented new Virtio MTU feature in Vhost and Virtio:
+
+ * Add ``rte_vhost_mtu_get()`` API to Vhost library.
+ * Enable Vhost PMD's MTU get feature.
+ * Get max MTU value from host in Virtio PMD
+
+* **Added interrupt mode support for virtio-user.**
+
+ Implemented Rxq interrupt mode and LSC support for virtio-user as a virtual
+ device. Supported cases:
+
+ * Rxq interrupt for virtio-user + vhost-user as the backend.
+ * Rxq interrupt for virtio-user + vhost-kernel as the backend.
+ * LSC interrupt for virtio-user + vhost-user as the backend.
+
+* **Added event driven programming model library (rte_eventdev).**
+
+ This API introduces an event driven programming model.
+
+ In a polling model, lcores poll ethdev ports and associated
+ Rx queues directly to look for a packet. By contrast in an event
+ driven model, lcores call the scheduler that selects packets for
+ them based on programmer-specified criteria. The Eventdev library
+ adds support for an event driven programming model, which offers
+ applications automatic multicore scaling, dynamic load balancing,
+ pipelining, packet ingress order maintenance and
+ synchronization services to simplify application packet processing.
+
+ By introducing an event driven programming model, DPDK can support
+ both polling and event driven programming models for packet processing,
+ and applications are free to choose whatever model
+ (or combination of the two) best suits their needs.
+
+* **Added Software Eventdev PMD.**
+
+ Added support for the software eventdev PMD. The software eventdev is a
+ software based scheduler device that implements the eventdev API. This
+ PMD allows an application to configure a pipeline using the eventdev
+ library, and run the scheduling workload on a CPU core.
+
+* **Added Cavium OCTEONTX Eventdev PMD.**
+
+ Added the new octeontx ssovf eventdev driver for OCTEONTX devices. See the
+ "Event Device Drivers" document for more details on this new driver.
+
+* **Added information metrics library.**
+
+ Added a library that allows information metrics to be added and updated
+ by producers, typically other libraries, for later retrieval by
+ consumers such as applications. It is intended to provide a
+ reporting mechanism that is independent of other libraries such
+ as ethdev.
+
+* **Added bit-rate calculation library.**
+
+ Added a library that can be used to calculate device bit-rates. Calculated
+ bitrates are reported using the metrics library.
+
+* **Added latency stats library.**
+
+ Added a library that measures packet latency. The collected statistics are
+ jitter and latency. For latency the minimum, average, and maximum is
+ measured.
+
+* **Added NXP DPAA2 SEC crypto PMD.**
+
+ A new "dpaa2_sec" hardware based crypto PMD for NXP DPAA2 devices has been
+ added. See the "Crypto Device Drivers" document for more details on this
+ driver.
+
+* **Updated the Cryptodev Scheduler PMD.**
+
+ * Added a packet-size based distribution mode, which distributes the enqueued
+ crypto operations among two slaves, based on their data lengths.
+ * Added fail-over scheduling mode, which enqueues crypto operations to a
+ primary slave first. Then, any operation that cannot be enqueued is
+ enqueued to a secondary slave.
+ * Added mode specific option support, so each scheduling mode can
+ now be configured individually by the new API.
+
+* **Updated the QAT PMD.**
+
+ The QAT PMD has been updated with additional support for:
+
+ * AES DOCSIS BPI algorithm.
+ * DES DOCSIS BPI algorithm.
+ * ZUC EEA3/EIA3 algorithms.
+
+* **Updated the AESNI MB PMD.**
+
+ The AESNI MB PMD has been updated with additional support for:
+
+ * AES DOCSIS BPI algorithm.
+
+* **Updated the OpenSSL PMD.**
+
+ The OpenSSL PMD has been updated with additional support for:
+
+ * DES DOCSIS BPI algorithm.
+
+
+Resolved Issues
+---------------
+
+.. This section should contain bug fixes added to the relevant
+ sections. Sample format:
+
+ * **code/section Fixed issue in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description of the resolved issue in the past
+ tense.
+
+ The title should contain the code/lib section like a commit message.
+
+ Add the entries in alphabetic order in the relevant sections below.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+
+* **l2fwd-keepalive: Fixed unclean shutdowns.**
+
+ Added clean shutdown to l2fwd-keepalive so that it can free up
+ stale resources used for inter-process communication.
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+ * **Add title in present tense with full stop.**
+
+ Add a short 1-2 sentence description of the known issue in the present
+ tense. Add information on any known workarounds.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **LSC interrupt doesn't work for virtio-user + vhost-kernel.**
+
+ LSC interrupt cannot be detected when setting the backend, tap device,
+ up/down as we fail to find a way to monitor such event.
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past
+ tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* The LPM ``next_hop`` field is extended from 8 bits to 21 bits for IPv6
+ while keeping ABI compatibility.
+
+* **Reworked rte_ring library.**
+
+ The rte_ring library has been reworked and updated. The following changes
+ have been made to it:
+
+ * Removed the build-time setting ``CONFIG_RTE_RING_SPLIT_PROD_CONS``.
+ * Removed the build-time setting ``CONFIG_RTE_LIBRTE_RING_DEBUG``.
+ * Removed the build-time setting ``CONFIG_RTE_RING_PAUSE_REP_COUNT``.
+ * Removed the function ``rte_ring_set_water_mark`` as part of a general
+ removal of watermarks support in the library.
+ * Added an extra parameter to the burst/bulk enqueue functions to
+ return the number of free spaces in the ring after enqueue. This can
+ be used by an application to implement its own watermark functionality.
+ * Added an extra parameter to the burst/bulk dequeue functions to return
+ the number elements remaining in the ring after dequeue.
+ * Changed the return value of the enqueue and dequeue bulk functions to
+ match that of the burst equivalents. In all cases, ring functions which
+ operate on multiple packets now return the number of elements enqueued
+ or dequeued, as appropriate. The updated functions are:
+
+ - ``rte_ring_mp_enqueue_bulk``
+ - ``rte_ring_sp_enqueue_bulk``
+ - ``rte_ring_enqueue_bulk``
+ - ``rte_ring_mc_dequeue_bulk``
+ - ``rte_ring_sc_dequeue_bulk``
+ - ``rte_ring_dequeue_bulk``
+
+ NOTE: the above functions all have different parameters as well as
+ different return values, due to the other listed changes above. This
+ means that all instances of the functions in existing code will be
+ flagged by the compiler. The return value usage should be checked
+ while fixing the compiler error due to the extra parameter.
+
+* **Reworked rte_vhost library.**
+
+ The rte_vhost library has been reworked to make it generic enough so that
+ the user could build other vhost-user drivers on top of it. To achieve this
+ the following changes have been made:
+
+ * The following vhost-pmd APIs are removed:
+
+ * ``rte_eth_vhost_feature_disable``
+ * ``rte_eth_vhost_feature_enable``
+ * ``rte_eth_vhost_feature_get``
+
+ * The vhost API ``rte_vhost_driver_callback_register(ops)`` is reworked to
+ be per vhost-user socket file. Thus, it takes one more argument:
+ ``rte_vhost_driver_callback_register(path, ops)``.
+
+ * The vhost API ``rte_vhost_get_queue_num`` is deprecated, instead,
+ ``rte_vhost_get_vring_num`` should be used.
+
+ * The following macros are removed in ``rte_virtio_net.h``
+
+ * ``VIRTIO_RXQ``
+ * ``VIRTIO_TXQ``
+ * ``VIRTIO_QNUM``
+
+ * The following net specific header files are removed in ``rte_virtio_net.h``
+
+ * ``linux/virtio_net.h``
+ * ``sys/socket.h``
+ * ``linux/if.h``
+ * ``rte_ether.h``
+
+ * The vhost struct ``virtio_net_device_ops`` is renamed to
+ ``vhost_device_ops``
+
+ * The vhost API ``rte_vhost_driver_session_start`` is removed. Instead,
+ ``rte_vhost_driver_start`` should be used, and there is no need to create
+ a thread to call it.
+
+ * The vhost public header file ``rte_virtio_net.h`` is renamed to
+ ``rte_vhost.h``
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+ * Add a short 1-2 sentence description of the ABI change that was announced
+ in the previous releases and made in this release. Use fixed width quotes
+ for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Reorganized the mbuf structure.**
+
+ The order and size of the fields in the ``mbuf`` structure changed,
+ as described in the `New Features`_ section.
+
+* The ``rte_cryptodev_info.sym`` structure has a new field ``max_nb_sessions_per_qp``
+ to support drivers which may support a limited number of sessions per queue_pair.
+
+
+Removed Items
+-------------
+
+.. This section should contain removed items in this release. Sample format:
+
+ * Add a short 1-2 sentence description of the removed item in the past
+ tense.
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* KNI vhost support has been removed.
+
+* The dpdk_qat sample application has been removed.
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+``
+ sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. do not overwrite or remove it.
+ =========================================================
+
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ + librte_bitratestats.so.1
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ librte_cryptodev.so.2
+ librte_distributor.so.1
+ + librte_eal.so.4
+ librte_ethdev.so.6
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ + librte_latencystats.so.1
+ librte_lpm.so.2
+ + librte_mbuf.so.3
+ librte_mempool.so.2
+ librte_meter.so.1
+ + librte_metrics.so.1
+ librte_net.so.1
+ librte_pdump.so.1
+ librte_pipeline.so.3
+ librte_pmd_bond.so.1
+ librte_pmd_ring.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_reorder.so.1
+ librte_ring.so.1
+ librte_sched.so.1
+ librte_table.so.2
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this
+ release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * CPU
+
+ * Intel(R) Atom(TM) CPU C2758 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU D-1540 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+
+ * OS:
+
+ * CentOS 7.2
+ * Fedora 25
+ * FreeBSD 11
+ * Red Hat Enterprise Linux Server release 7.3
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 4.0.1-k (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800001cf
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.2.5 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA2 (2x10G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA1 (1x40G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1584 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 5.05
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 1.5.23 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.48, 0x800006e7
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.2.13-k (igb)
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * Platform details:
+
+ * Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2640 @ 2.50GHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.3 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.2 (Maipo)
+ * Ubuntu 16.10
+ * Ubuntu 16.04
+ * Ubuntu 14.04
+
+ * MLNX_OFED: 4.0-2.0.0.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.40.5030
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.18.2000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.19.1200
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.19.1200
+
+* IBM(R) Power8(R) with Mellanox(R) NICs combinations
+
+ * Platform details:
+
+ * Processor: POWER8E (raw), AltiVec supported
+ * type-model: 8247-22L
+ * Firmware FW810.21 (SV810_108)
+
+ * OS: Ubuntu 16.04 LTS PPC le
+
+ * MLNX_OFED: 4.0-2.0.0.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.18.2000
diff --git a/doc/guides/sample_app_ug/cmd_line.rst b/doc/guides/sample_app_ug/cmd_line.rst
index 02a295ff..36c7971c 100644
--- a/doc/guides/sample_app_ug/cmd_line.rst
+++ b/doc/guides/sample_app_ug/cmd_line.rst
@@ -96,7 +96,7 @@ To run the application in linuxapp environment, issue the following command:
.. code-block:: console
- $ ./build/cmdline -c f -n 4
+ $ ./build/cmdline -l 0-3 -n 4
Refer to the *DPDK Getting Started Guide* for general information on running applications
and the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/sample_app_ug/dist_app.rst b/doc/guides/sample_app_ug/dist_app.rst
index e242748f..1cae4739 100644
--- a/doc/guides/sample_app_ug/dist_app.rst
+++ b/doc/guides/sample_app_ug/dist_app.rst
@@ -96,7 +96,7 @@ Running the Application
.. code-block:: console
- $ ./build/distributor_app -c 0x4003fe -n 4 -- -p f
+ $ ./build/distributor_app -l 1-9,22 -n 4 -- -p f
#. Refer to the DPDK Getting Started Guide for general information on running
applications and the Environment Abstraction Layer (EAL) options.
@@ -104,33 +104,35 @@ Running the Application
Explanation
-----------
-The distributor application consists of three types of threads: a receive
-thread (lcore_rx()), a set of worker threads(lcore_worker())
-and a transmit thread(lcore_tx()). How these threads work together is shown
-in :numref:`figure_dist_app` below. The main() function launches threads of these three types.
-Each thread has a while loop which will be doing processing and which is
-terminated only upon SIGINT or ctrl+C. The receive and transmit threads
-communicate using a software ring (rte_ring structure).
-
-The receive thread receives the packets using rte_eth_rx_burst() and gives
-them to the distributor (using rte_distributor_process() API) which will
-be called in context of the receive thread itself. The distributor distributes
-the packets to workers threads based on the tagging of the packet -
-indicated by the hash field in the mbuf. For IP traffic, this field is
-automatically filled by the NIC with the "usr" hash value for the packet,
-which works as a per-flow tag.
+The distributor application consists of four types of threads: a receive
+thread (``lcore_rx()``), a distributor thread (``lcore_dist()``), a set of
+worker threads (``lcore_worker()``), and a transmit thread(``lcore_tx()``).
+How these threads work together is shown in :numref:`figure_dist_app` below.
+The ``main()`` function launches threads of these four types. Each thread
+has a while loop which will be doing processing and which is terminated
+only upon SIGINT or ctrl+C.
+
+The receive thread receives the packets using ``rte_eth_rx_burst()`` and will
+enqueue them to an rte_ring. The distributor thread will dequeue the packets
+from the ring and assign them to workers (using ``rte_distributor_process()`` API).
+This assignment is based on the tag (or flow ID) of the packet - indicated by
+the hash field in the mbuf. For IP traffic, this field is automatically filled
+by the NIC with the "usr" hash value for the packet, which works as a per-flow
+tag. The distributor thread communicates with the worker threads using a
+cache-line swapping mechanism, passing up to 8 mbuf pointers at a time
+(one cache line) to each worker.
More than one worker thread can exist as part of the application, and these
worker threads do simple packet processing by requesting packets from
the distributor, doing a simple XOR operation on the input port mbuf field
(to indicate the output port which will be used later for packet transmission)
-and then finally returning the packets back to the distributor in the RX thread.
+and then finally returning the packets back to the distributor thread.
-Meanwhile, the receive thread will call the distributor api
-rte_distributor_returned_pkts() to get the packets processed, and will enqueue
-them to a ring for transfer to the TX thread for transmission on the output port.
-The transmit thread will dequeue the packets from the ring and transmit them on
-the output port specified in packet mbuf.
+The distributor thread will then call the distributor api
+``rte_distributor_returned_pkts()`` to get the processed packets, and will enqueue
+them to another rte_ring for transfer to the TX thread for transmission on the
+output port. The transmit thread will dequeue the packets from the ring and
+transmit them on the output port specified in packet mbuf.
Users who wish to terminate the running of the application have to press ctrl+C
(or send SIGINT to the app). Upon this signal, a signal handler provided
@@ -153,8 +155,10 @@ the line "#define DEBUG" defined in start of the application in main.c to enable
Statistics
----------
-Upon SIGINT (or) ctrl+C, the print_stats() function displays the count of packets
-processed at the different stages in the application.
+The main function will print statistics on the console every second. These
+statistics include the number of packets enqueued and dequeued at each stage
+in the application, and also key statistics per worker, including how many
+packets of each burst size (1-8) were sent to each worker thread.
Application Initialization
--------------------------
diff --git a/doc/guides/sample_app_ug/ethtool.rst b/doc/guides/sample_app_ug/ethtool.rst
index 4d1697e8..67797954 100644
--- a/doc/guides/sample_app_ug/ethtool.rst
+++ b/doc/guides/sample_app_ug/ethtool.rst
@@ -47,7 +47,7 @@ To compile the application:
.. code-block:: console
export RTE_SDK=/path/to/rte_sdk
- cd ${RTE_SD}/examples/ethtool
+ cd ${RTE_SDK}/examples/ethtool
#. Set the target (a default target is used if not specified). For example:
diff --git a/doc/guides/sample_app_ug/exception_path.rst b/doc/guides/sample_app_ug/exception_path.rst
index 161b6e0f..e505fb32 100644
--- a/doc/guides/sample_app_ug/exception_path.rst
+++ b/doc/guides/sample_app_ug/exception_path.rst
@@ -102,7 +102,7 @@ Refer to the *DPDK Getting Started Guide* for general information on running app
and the Environment Abstraction Layer (EAL) options.
The number of bits set in each bitmask must be the same.
-The coremask -c parameter of the EAL options should include IN_CORES and OUT_CORES.
+The coremask -c or the corelist -l parameter of the EAL options should include IN_CORES and OUT_CORES.
The same bit must not be set in IN_CORES and OUT_CORES.
The affinities between ports and cores are set beginning with the least significant bit of each mask, that is,
the port represented by the lowest bit in PORTMASK is read from by the core represented by the lowest bit in IN_CORES,
@@ -112,7 +112,7 @@ For example to run the application with two ports and four cores:
.. code-block:: console
- ./build/exception_path -c f -n 4 -- -p 3 -i 3 -o c
+ ./build/exception_path -l 0-3 -n 4 -- -p 3 -i 3 -o c
Getting Statistics
~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/hello_world.rst b/doc/guides/sample_app_ug/hello_world.rst
index 72e1e115..f4753af0 100644
--- a/doc/guides/sample_app_ug/hello_world.rst
+++ b/doc/guides/sample_app_ug/hello_world.rst
@@ -65,7 +65,7 @@ To run the example in a linuxapp environment:
.. code-block:: console
- $ ./build/helloworld -c f -n 4
+ $ ./build/helloworld -l 0-3 -n 4
Refer to *DPDK Getting Started Guide* for general information on running applications
and the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/sample_app_ug/img/dist_app.svg b/doc/guides/sample_app_ug/img/dist_app.svg
index 4714c7db..944f4377 100644
--- a/doc/guides/sample_app_ug/img/dist_app.svg
+++ b/doc/guides/sample_app_ug/img/dist_app.svg
@@ -1,8 +1,7 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-
<!--
# BSD LICENSE
-# Copyright (c) <2014>, Intel Corporation
+# Copyright (c) <2014-2017>, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -47,8 +46,8 @@
height="379.53668"
id="svg4090"
version="1.1"
- inkscape:version="0.48.5 r10040"
- sodipodi:docname="New document 2">
+ inkscape:version="0.92.1 r15371"
+ sodipodi:docname="dist_app.svg">
<defs
id="defs4092">
<marker
@@ -200,8 +199,8 @@
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1"
- inkscape:cx="339.92174"
- inkscape:cy="120.32038"
+ inkscape:cx="401.32873"
+ inkscape:cy="130.13572"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
@@ -210,8 +209,8 @@
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1920"
- inkscape:window-height="1017"
- inkscape:window-x="-8"
+ inkscape:window-height="1137"
+ inkscape:window-x="1912"
inkscape:window-y="-8"
inkscape:window-maximized="1" />
<metadata
@@ -222,7 +221,7 @@
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
@@ -232,40 +231,33 @@
id="layer1"
transform="translate(-35.078263,-28.308125)">
<rect
- style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
+ style="fill:none;stroke:#000000;stroke-width:0.81890059;stroke-opacity:0.98412697"
id="rect10443"
- width="152.9641"
- height="266.92566"
- x="122.95611"
- y="34.642567" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1;stroke-opacity:0.98412697"
- id="rect10445"
- width="124.71397"
- height="46.675529"
- x="435.7746"
- y="28.808125" />
+ width="152.96732"
+ height="178.99617"
+ x="124.50176"
+ y="128.95552" />
<rect
style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
id="rect10445-2"
width="124.71397"
height="46.675529"
- x="435.42999"
- y="103.92654" />
+ x="437.00507"
+ y="133.06113" />
<rect
style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
id="rect10445-0"
width="124.71397"
height="46.675529"
x="436.80811"
- y="178.31572" />
+ y="193.87207" />
<rect
style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
id="rect10445-9"
width="124.71397"
height="46.675529"
x="436.80811"
- y="246.87038" />
+ y="256.06277" />
<rect
style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
id="rect10445-7"
@@ -274,203 +266,241 @@
x="135.7057"
y="360.66928" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Mstart)"
- d="M 277.293,44.129101 433.02373,43.388655"
- id="path10486"
- inkscape:connector-type="polyline"
- inkscape:connector-curvature="3" />
- <path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Mstart)"
- d="m 277.83855,110.78109 155.73073,-0.74044"
+ style="fill:none;stroke:#000000;stroke-width:0.99566948;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="M 278.89497,147.51907 436.5713,146.78234"
id="path10486-2"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Mstart)"
- d="m 278.48623,189.32721 155.73073,-0.74042"
+ style="fill:none;stroke:#000000;stroke-width:0.99290925;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="m 279.37092,206.8834 156.80331,-0.73671"
id="path10486-1"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Mstart)"
- d="m 278.48623,255.19448 155.73073,-0.74043"
+ style="fill:none;stroke:#000000;stroke-width:0.99379504;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow1Mstart)"
+ d="m 279.19738,270.88669 157.15478,-0.73638"
id="path10486-4"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
- d="M 277.11852,66.041829 432.84924,65.301384"
- id="path10486-0"
- inkscape:connector-type="polyline"
- inkscape:connector-curvature="3" />
- <path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
- d="M 277.46746,136.71727 433.1982,135.97682"
+ style="fill:none;stroke:#000000;stroke-width:0.99820405;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
+ d="m 277.17846,166.20347 158.11878,-0.73842"
id="path10486-0-4"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
- d="m 276.77843,210.37709 155.73073,-0.74044"
+ style="fill:none;stroke:#000000;stroke-width:0.99410033;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
+ d="m 277.47049,225.92925 157.32298,-0.73606"
id="path10486-0-7"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.99200004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
- d="M 277.46746,282.5783 433.1982,281.83785"
+ style="fill:none;stroke:#000000;stroke-width:0.99566948;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
+ d="M 277.70474,289.26714 435.38107,288.5304"
id="path10486-0-77"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="348.03241"
- y="34.792767"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="345.02322"
+ y="134.82103"
id="text11995"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.93992339,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997"
- x="348.03241"
- y="34.792767">Request packet</tspan></text>
+ x="345.02322"
+ y="134.82103"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Request burst</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="349.51935"
- y="74.044792"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="346.38663"
+ y="164.76628"
id="text11995-7"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.93992339,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3"
- x="349.51935"
- y="74.044792">Mbuf pointer</tspan></text>
+ x="346.38663"
+ y="164.76628"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Mbuf Pointers</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="504.26611"
- y="52.165989"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="502.36844"
+ y="151.66222"
id="text11995-7-3"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.93992339,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-5"
- x="504.26611"
- y="52.165989">WorkerThread1</tspan></text>
+ x="502.36844"
+ y="151.66222"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">WorkerThread1</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="501.65793"
- y="121.54361"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="499.40103"
+ y="207.94502"
id="text11995-7-3-9"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.93992339,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-5-9"
- x="501.65793"
- y="121.54361">WorkerThread2</tspan></text>
+ x="499.40103"
+ y="207.94502"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">WorkerThread2</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="499.45868"
- y="191.46367"
- id="text11995-7-3-8"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
- sodipodi:role="line"
- id="tspan11997-3-5-1"
- x="499.45868"
- y="191.46367">WorkerThread3</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
x="500.1918"
- y="257.9563"
+ y="266.59644"
id="text11995-7-3-82"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.9399234,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-5-6"
x="500.1918"
- y="257.9563">WorkerThreadN</tspan></text>
+ y="266.59644"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">WorkerThreadN</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
x="193.79703"
y="362.85193"
id="text11995-7-3-6"
- sodipodi:linespacing="125%"
transform="scale(0.93992342,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-5-0"
x="193.79703"
- y="362.85193">TX thread</tspan></text>
+ y="362.85193"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">TX thread</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="162.2476"
- y="142.79382"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="175.78905"
+ y="207.26257"
id="text11995-7-3-3"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.9399234,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-5-8"
- x="162.2476"
- y="142.79382">RX thread &amp; Distributor</tspan></text>
+ x="175.78905"
+ y="207.26257"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Distributor Thread</tspan></text>
<path
- style="fill:none;stroke:#000000;stroke-width:0.75945646;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
- d="m 35.457991,109.77995 85.546359,-0.79004"
+ style="fill:none;stroke:#000000;stroke-width:0.75945646;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
+ d="m 49.600127,54.625621 85.546363,-0.79004"
id="path10486-0-4-5"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<path
- style="fill:none;stroke:#000000;stroke-width:0.75945646;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
+ style="fill:none;stroke:#000000;stroke-width:0.75945646;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:1;marker-start:none;marker-mid:none;marker-end:url(#Arrow1Mend)"
d="m 135.70569,384.00706 -85.546361,0.79003"
id="path10486-0-4-5-7"
inkscape:connector-type="polyline"
inkscape:connector-curvature="3" />
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="58.296661"
- y="96.037407"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="73.342712"
+ y="44.196564"
id="text11995-7-8"
- sodipodi:linespacing="125%"
- transform="scale(0.93992342,1.0639165)"><tspan
+ transform="scale(0.9399234,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-3"
- x="58.296661"
- y="96.037407">Mbufs In</tspan></text>
+ x="73.342712"
+ y="44.196564"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Mbufs In</tspan></text>
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
x="83.4814"
y="352.62543"
id="text11995-7-8-5"
- sodipodi:linespacing="125%"
transform="scale(0.93992342,1.0639165)"><tspan
sodipodi:role="line"
id="tspan11997-3-3-1"
x="83.4814"
- y="352.62543">Mbufs Out</tspan></text>
+ y="352.62543"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Mbufs Out</tspan></text>
<path
- style="fill:none;stroke:#000000;stroke-width:1.05720723;stroke-miterlimit:3;stroke-opacity:0.98412697;stroke-dasharray:none"
- d="m 171.68192,303.16236 0.21464,30.4719 -8.6322,0.40574 -11.33877,0.1956 25.75778,14.79103 23.25799,11.11792 18.87014,-7.32926 31.83305,-17.26495 -10.75831,-0.32986 -10.37586,-0.44324 -0.22443,-31.54093 z"
+ style="fill:none;stroke:#000000;stroke-width:1.01068497;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:0.98412697"
+ d="m 171.68192,308.06701 0.21464,27.84908 -8.6322,0.37082 -11.33877,0.17876 25.75778,13.51792 23.25799,10.16096 18.87014,-6.69841 31.83305,-15.77889 -10.75831,-0.30147 -10.37586,-0.40509 -0.22443,-28.8261 z"
id="path12188"
inkscape:connector-curvature="0"
- inkscape:transform-center-y="7.6863474"
+ inkscape:transform-center-y="7.0247597"
sodipodi:nodetypes="cccccccccccc" />
<text
xml:space="preserve"
- style="font-size:9.32312489px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
x="193.68871"
y="309.26349"
id="text11995-7-3-6-2"
- sodipodi:linespacing="125%"
transform="scale(0.93992342,1.0639165)"><tspan
sodipodi:role="line"
x="193.68871"
y="309.26349"
- id="tspan12214">SW Ring</tspan></text>
+ id="tspan12214"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">SW Ring</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1.02106845;stroke-miterlimit:3;stroke-dasharray:none;stroke-opacity:0.98412697"
+ d="m 173.27214,75.568236 0.21464,28.424254 -8.6322,0.37848 -11.33877,0.18245 25.75778,13.79709 23.25799,10.37083 18.87013,-6.83675 31.83305,-16.10478 -10.75831,-0.30769 -10.37586,-0.41345 -0.22443,-29.421453 z"
+ id="path12188-5"
+ inkscape:connector-curvature="0"
+ inkscape:transform-center-y="7.1698404"
+ sodipodi:nodetypes="cccccccccccc" />
+ <rect
+ style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
+ id="rect10445-7-7"
+ width="124.71397"
+ height="46.675529"
+ x="138.18427"
+ y="28.832333" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="190.80019"
+ y="51.17778"
+ id="text11995-7-3-6-6"
+ transform="scale(0.93992339,1.0639165)"><tspan
+ sodipodi:role="line"
+ id="tspan11997-3-5-0-4"
+ x="190.80019"
+ y="51.17778"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">RX thread</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="196.38097"
+ y="90.224785"
+ id="text11995-7-3-6-2-9"
+ transform="scale(0.93992339,1.0639165)"><tspan
+ sodipodi:role="line"
+ x="196.38097"
+ y="90.224785"
+ id="tspan12214-8"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">SW Ring</tspan></text>
+ <rect
+ style="fill:none;stroke:#000000;stroke-width:0.99999988;stroke-opacity:0.98412697"
+ id="rect10445-7-7-5"
+ width="124.71397"
+ height="46.675529"
+ x="327.86566"
+ y="29.009106" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;line-height:0%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ x="387.27209"
+ y="45.36227"
+ id="text11995-7-3-6-6-3"
+ transform="scale(0.93992339,1.0639165)"><tspan
+ sodipodi:role="line"
+ id="tspan11997-3-5-0-4-4"
+ x="387.27209"
+ y="45.36227"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif">Stats thread</tspan><tspan
+ sodipodi:role="line"
+ x="387.27209"
+ y="57.016178"
+ style="font-size:9.32312489px;line-height:1.25;font-family:sans-serif"
+ id="tspan165">(to console)</tspan></text>
</g>
</svg>
diff --git a/doc/guides/sample_app_ug/img/server_node_efd.svg b/doc/guides/sample_app_ug/img/server_node_efd.svg
new file mode 100644
index 00000000..9aee30bc
--- /dev/null
+++ b/doc/guides/sample_app_ug/img/server_node_efd.svg
@@ -0,0 +1,1254 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export efd_i6.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="8.2496in" height="5.89673in"
+ viewBox="0 0 593.971 424.565" xml:space="preserve" color-interpolation-filters="sRGB" class="st27">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false">
+ <v:userDefs>
+ <v:ud v:nameU="msvSubprocessMaster" v:prompt="" v:val="VT4(Rectangle)"/>
+ <v:ud v:nameU="msvNoAutoConnect" v:val="VT0(1):26"/>
+ </v:userDefs>
+ </v:documentProperties>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:#feffff;font-family:Calibri;font-size:0.75em}
+ .st6 {fill:none;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st7 {fill:none;stroke:#2e75b5;stroke-width:2.25}
+ .st8 {fill:#305497;stroke:#2e75b5;stroke-width:1}
+ .st9 {fill:#feffff;font-family:Calibri;font-size:0.833336em;font-weight:bold}
+ .st10 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2)}
+ .st11 {fill:#5b9bd5}
+ .st12 {stroke:#c7c8c8;stroke-width:0.25}
+ .st13 {fill:#acccea;stroke:#c7c8c8;stroke-width:0.25}
+ .st14 {fill:#feffff;font-family:Calibri;font-size:1.00001em;font-weight:bold}
+ .st15 {fill:#ed7d31;stroke:#c7c8c8;stroke-width:0.25}
+ .st16 {fill:#deebf6;stroke:#c7c8c8;stroke-width:0.25}
+ .st17 {marker-end:url(#mrkr5-212);stroke:#ff0000;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st18 {fill:#ff0000;fill-opacity:1;stroke:#ff0000;stroke-opacity:1;stroke-width:0.28409090909091}
+ .st19 {fill:none;stroke:#2e75b5;stroke-width:1}
+ .st20 {fill:#5b9bd5;font-family:Calibri;font-size:1.00001em}
+ .st21 {fill:none;stroke:none;stroke-width:0.25}
+ .st22 {font-size:1em}
+ .st23 {fill:#ffffff}
+ .st24 {stroke:#5b9bd5;stroke-dasharray:1.5,3;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st25 {marker-end:url(#mrkr5-444);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5}
+ .st26 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.37313432835821}
+ .st27 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-212" class="st18" v:arrowType="5" v:arrowSize="2" v:setback="5.8" refX="-5.8" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-3.52,-3.52) "/>
+ </marker>
+ <marker id="mrkr5-444" class="st26" v:arrowType="5" v:arrowSize="2" v:setback="4.69" refX="-4.69" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-2.68,-2.68) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <v:userDefs>
+ <v:ud v:nameU="msvThemeOrder" v:val="VT0(0):26"/>
+ </v:userDefs>
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <g id="shape3-1" v:mID="3" v:groupContext="shape" transform="translate(319.501,-335.688)">
+ <title>Rectangle.58</title>
+ <desc>Key 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow3-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1</text> </g>
+ <g id="shape4-7" v:mID="4" v:groupContext="shape" transform="translate(353.251,-335.688)">
+ <title>Rectangle.59</title>
+ <desc>Action 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow4-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 1</text> </g>
+ <g id="shape5-13" v:mID="5" v:groupContext="shape" transform="translate(400.501,-335.688)">
+ <title>Rectangle.60</title>
+ <desc>Key 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow5-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 2</text> </g>
+ <g id="shape6-19" v:mID="6" v:groupContext="shape" transform="translate(434.251,-335.688)">
+ <title>Rectangle.61</title>
+ <desc>Action 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow6-20" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 2</text> </g>
+ <g id="shape7-25" v:mID="7" v:groupContext="shape" transform="translate(481.501,-335.688)">
+ <title>Rectangle.62</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow7-26" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape8-30" v:mID="8" v:groupContext="shape" transform="translate(515.251,-335.688)">
+ <title>Rectangle.63</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow8-31" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape9-35" v:mID="9" v:groupContext="shape" transform="translate(319.501,-313.188)">
+ <title>Rectangle.64</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow9-36" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape10-40" v:mID="10" v:groupContext="shape" transform="translate(353.251,-313.188)">
+ <title>Rectangle.65</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow10-41" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape11-45" v:mID="11" v:groupContext="shape" transform="translate(400.501,-313.188)">
+ <title>Rectangle.66</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow11-46" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape12-50" v:mID="12" v:groupContext="shape" transform="translate(434.251,-313.188)">
+ <title>Rectangle.67</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow12-51" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape13-55" v:mID="13" v:groupContext="shape" transform="translate(481.501,-313.188)">
+ <title>Rectangle.68</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow13-56" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape14-60" v:mID="14" v:groupContext="shape" transform="translate(515.251,-313.188)">
+ <title>Rectangle.69</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow14-61" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape15-65" v:mID="15" v:groupContext="shape" transform="translate(319.501,-277.188)">
+ <title>Rectangle.70</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow15-66" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape16-71" v:mID="16" v:groupContext="shape" transform="translate(353.251,-277.188)">
+ <title>Rectangle.71</title>
+ <desc>Action x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow16-72" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.99" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action x</text> </g>
+ <g id="shape17-77" v:mID="17" v:groupContext="shape" transform="translate(400.501,-277.188)">
+ <title>Rectangle.72</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow17-78" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape18-83" v:mID="18" v:groupContext="shape" transform="translate(434.251,-277.188)">
+ <title>Rectangle.73</title>
+ <desc>Action y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow18-84" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.89" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action y</text> </g>
+ <g id="shape19-89" v:mID="19" v:groupContext="shape" transform="translate(481.501,-277.188)">
+ <title>Rectangle.74</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow19-90" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="shape20-95" v:mID="20" v:groupContext="shape" transform="translate(515.251,-277.188)">
+ <title>Rectangle.75</title>
+ <desc>Action z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow20-96" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.18" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action z</text> </g>
+ <g id="shape21-101" v:mID="21" v:groupContext="shape" transform="translate(319.501,-240.687)">
+ <title>Rectangle.76</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow21-102" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape22-106" v:mID="22" v:groupContext="shape" transform="translate(353.251,-240.687)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow22-107" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape23-111" v:mID="23" v:groupContext="shape" transform="translate(400.501,-240.687)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow23-112" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape24-116" v:mID="24" v:groupContext="shape" transform="translate(434.251,-240.687)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow24-117" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape25-121" v:mID="25" v:groupContext="shape" transform="translate(481.501,-240.687)">
+ <title>Rectangle.80</title>
+ <desc>Key N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow25-122" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.21" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key N</text> </g>
+ <g id="shape26-127" v:mID="26" v:groupContext="shape" transform="translate(515.251,-240.687)">
+ <title>Rectangle.81</title>
+ <desc>Action N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow26-128" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.67" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action N</text> </g>
+ <g id="shape27-133" v:mID="27" v:groupContext="shape" transform="translate(317.251,-231.687)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow27-134" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st6"/>
+ </g>
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st7"/>
+ </g>
+ <g id="shape28-138" v:mID="28" v:groupContext="shape" transform="translate(328.501,-362.688)">
+ <title>Sheet.28</title>
+ <desc>Local Table for N Specific Flows Serviced at Node 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="110.423" cy="418.94" width="220.85" height="11.25"/>
+ <rect x="0" y="413.315" width="220.846" height="11.25" class="st8"/>
+ <text x="5.77" y="421.94" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Local Table for N Specific Flows Serviced at Node 1</text> </g>
+ <g id="group34-141" transform="translate(66.0294,-165.569)" v:mID="34" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Device)"/>
+ <v:cp v:nameU="SubShapeType" v:lbl="SubShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Load balancer)"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Load balancer</title>
+ <g id="shape35-142" v:mID="35" v:groupContext="shape" transform="translate(0,-7.33146)">
+ <title>Sheet.35</title>
+ <g id="shadow35-143" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23 Z" class="st10"/>
+ <path d="M0 377.86 L72 377.86" class="st6"/>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23" class="st6"/>
+ </g>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23 Z" class="st11"/>
+ <path d="M0 377.86 L72 377.86" class="st12"/>
+ <path d="M54 367.23 L18 367.23 L0 377.86 L0 424.56 L72 424.56 L72 377.86 L54 367.23" class="st12"/>
+ </g>
+ <g id="shape36-152" v:mID="36" v:groupContext="shape" transform="translate(8.03054,-12.9324)">
+ <title>Sheet.36</title>
+ <g id="shadow36-153" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M45.34 421.81 L41.2 422.66 L44.12 424.56 L49.68 423.16 L48.75 417.51 L45.8 415.59 L46.69 419.68 L36.97
+ 413.34 L35.6 415.45 L45.34 421.81 ZM50.83 405.36 L39.2 405.36 L39.2 407.88 L50.8 407.88 L47.82 410.83
+ L51.34 410.83 L55.21 406.61 L51.32 402.39 L47.83 402.39 L50.83 405.36 ZM46.49 392.01 L36.75 398.37
+ L38.13 400.48 L47.84 394.14 L46.96 398.23 L49.91 396.31 L50.84 390.66 L45.28 389.26 L42.36 391.16
+ L46.49 392.01 ZM27.71 397.16 C22.66 397.16 18.58 401.25 18.58 406.29 C18.58 411.33 22.66 415.42
+ 27.71 415.42 C32.75 415.42 36.84 411.33 36.84 406.29 C36.84 401.25 32.75 397.16 27.71 397.16 ZM27.71
+ 400.04 C31.15 400.04 33.96 402.84 33.96 406.29 C33.96 409.74 31.15 412.54 27.71 412.54 C24.26 412.54
+ 21.46 409.74 21.46 406.29 C21.46 402.84 24.26 400.04 27.71 400.04 ZM11.64 405.04 L0 405.04 L0 407.56
+ L11.6 407.56 L8.62 410.51 L12.14 410.51 L16.01 406.29 L12.12 402.07 L8.64 402.07 L11.64 405.04 Z"
+ class="st2"/>
+ </g>
+ <path d="M45.34 421.81 L41.2 422.66 L44.12 424.56 L49.68 423.16 L48.75 417.51 L45.8 415.59 L46.69 419.68 L36.97 413.34
+ L35.6 415.45 L45.34 421.81 ZM50.83 405.36 L39.2 405.36 L39.2 407.88 L50.8 407.88 L47.82 410.83 L51.34
+ 410.83 L55.21 406.61 L51.32 402.39 L47.83 402.39 L50.83 405.36 ZM46.49 392.01 L36.75 398.37 L38.13 400.48
+ L47.84 394.14 L46.96 398.23 L49.91 396.31 L50.84 390.66 L45.28 389.26 L42.36 391.16 L46.49 392.01 ZM27.71
+ 397.16 C22.66 397.16 18.58 401.25 18.58 406.29 C18.58 411.33 22.66 415.42 27.71 415.42 C32.75 415.42
+ 36.84 411.33 36.84 406.29 C36.84 401.25 32.75 397.16 27.71 397.16 ZM27.71 400.04 C31.15 400.04 33.96
+ 402.84 33.96 406.29 C33.96 409.74 31.15 412.54 27.71 412.54 C24.26 412.54 21.46 409.74 21.46 406.29
+ C21.46 402.84 24.26 400.04 27.71 400.04 ZM11.64 405.04 L0 405.04 L0 407.56 L11.6 407.56 L8.62 410.51
+ L12.14 410.51 L16.01 406.29 L12.12 402.07 L8.64 402.07 L11.64 405.04 Z" class="st13"/>
+ </g>
+ </g>
+ <g id="shape37-157" v:mID="37" v:groupContext="shape" transform="translate(21.0294,-45.4375)">
+ <title>Rectangle</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow37-158" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="336.433" width="135" height="88.1315" class="st2"/>
+ </g>
+ <rect x="0" y="336.433" width="135" height="88.1315" class="st3"/>
+ </g>
+ <g id="shape38-162" v:mID="38" v:groupContext="shape" transform="translate(34.693,-126.438)">
+ <title>Sheet.38</title>
+ <desc>EFD Table</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="49.3364" cy="415.565" width="98.68" height="18"/>
+ <rect x="0" y="406.565" width="98.6728" height="18" class="st8"/>
+ <text x="24.87" y="419.17" class="st14" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>EFD Table</text> </g>
+ <g id="shape39-165" v:mID="39" v:groupContext="shape" transform="translate(30.0294,-99.4375)">
+ <title>Rectangle.39</title>
+ <desc>Group_id</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="26.9182" cy="415.565" width="53.84" height="18"/>
+ <g id="shadow39-166" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st15"/>
+ <text x="7.87" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Group_id</text> </g>
+ <g id="shape40-171" v:mID="40" v:groupContext="shape" transform="translate(93.193,-99.4375)">
+ <title>Rectangle.40</title>
+ <desc>Hash index</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="26.9182" cy="415.565" width="53.84" height="18"/>
+ <g id="shadow40-172" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="53.8364" height="18" class="st15"/>
+ <text x="4.64" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Hash index</text> </g>
+ <g id="shape41-177" v:mID="41" v:groupContext="shape" transform="translate(30.193,-82.4275)">
+ <title>Rectangle.41</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow41-178" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape42-182" v:mID="42" v:groupContext="shape" transform="translate(30.193,-66.8125)">
+ <title>Rectangle.42</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow42-183" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape43-187" v:mID="43" v:groupContext="shape" transform="translate(30.1112,-52.1875)">
+ <title>Rectangle.43</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow43-188" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape44-192" v:mID="44" v:groupContext="shape" transform="translate(93.0294,-81.4375)">
+ <title>Rectangle.44</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow44-193" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape45-197" v:mID="45" v:groupContext="shape" transform="translate(93.193,-66.8125)">
+ <title>Rectangle.45</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow45-198" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape46-202" v:mID="46" v:groupContext="shape" transform="translate(93.193,-52.1875)">
+ <title>Rectangle.46</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow46-203" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st2"/>
+ </g>
+ <rect x="0" y="413.315" width="53.8364" height="11.25" class="st16"/>
+ </g>
+ <g id="shape47-207" v:mID="47" v:groupContext="shape" transform="translate(374.924,544.022) rotate(135)">
+ <title>Sheet.47</title>
+ <path d="M-0 417.75 A40.674 18.0151 -156.2 0 0 40.24 422.15 L40.49 421.89" class="st17"/>
+ </g>
+ <g id="shape48-213" v:mID="48" v:groupContext="shape" transform="translate(21.0294,-19)">
+ <title>Sheet.48</title>
+ <desc>Supports X*N Flows</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="415.565" width="135" height="18"/>
+ <rect x="0" y="406.565" width="135" height="18" class="st19"/>
+ <text x="19.05" y="419.17" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Supports X*N Flows</text> </g>
+ <g id="shape49-216" v:mID="49" v:groupContext="shape" transform="translate(48.0294,-229.938)">
+ <title>Sheet.49</title>
+ <desc>Frontend Server or Load Balancer</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="400.94" width="108" height="47.25"/>
+ <rect x="0" y="377.315" width="108" height="47.25" class="st21"/>
+ <text x="14.56" y="397.34" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Frontend Server<v:newlineChar/><tspan
+ x="13.16" dy="1.2em" class="st22">or Load Balancer </tspan> </text> </g>
+ <g id="group51-220" transform="translate(223.876,-310.938)" v:mID="51" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server</title>
+ <g id="shape52-221" v:mID="52" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.52</title>
+ <g id="shadow52-222" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape53-226" v:mID="53" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.53</title>
+ <g id="shadow53-227" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape54-231" v:mID="54" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.54</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow54-232" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape59-239" v:mID="59" v:groupContext="shape" transform="translate(277.876,-373.938)">
+ <title>Sheet.59</title>
+ <path d="M-0 424.56 A111.108 53.2538 42.31 0 1 93.83 421.21 L94.14 421.41" class="st17"/>
+ </g>
+ <g id="shape60-244" v:mID="60" v:groupContext="shape" transform="translate(205.876,-283.938)">
+ <title>Sheet.60</title>
+ <desc>Backend Server 1</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.93" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server 1</text> </g>
+ <g id="group61-247" transform="translate(223.876,-207.438)" v:mID="61" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server.61</title>
+ <g id="shape62-248" v:mID="62" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.62</title>
+ <g id="shadow62-249" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape63-253" v:mID="63" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.63</title>
+ <g id="shadow63-254" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape64-258" v:mID="64" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.64</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow64-259" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape65-266" v:mID="65" v:groupContext="shape" transform="translate(205.876,-180.437)">
+ <title>Sheet.65</title>
+ <desc>Backend Server 2</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.93" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server 2</text> </g>
+ <g id="group66-269" transform="translate(219.029,-58.9375)" v:mID="66" v:groupContext="group">
+ <v:custProps>
+ <v:cp v:nameU="AssetNumber" v:lbl="Asset Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SerialNumber" v:lbl="Serial Number" v:prompt="" v:type="0" v:format="" v:sortKey="Asset"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Location" v:lbl="Location" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Building" v:lbl="Building" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Room" v:lbl="Room" v:prompt="" v:type="0" v:format="" v:sortKey="Asset" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Manufacturer" v:lbl="Manufacturer" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductNumber" v:lbl="Product Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="PartNumber" v:lbl="Part Number" v:prompt="" v:type="0" v:format="" v:sortKey="Equipment"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ProductDescription" v:lbl="Product Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Equipment" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkName" v:lbl="Network Name" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="IPAddress" v:lbl="IP Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="SubnetMask" v:lbl="Subnet Mask" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="AdminInterface" v:lbl="Administrative Interface" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NumberofPorts" v:lbl="Number of Ports" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CommunityString" v:lbl="Community String" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="NetworkDescription" v:lbl="Network Description" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Network" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="MACAddress" v:lbl="MAC Address" v:prompt="" v:type="0" v:format="" v:sortKey="Network"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="CPU" v:lbl="CPU" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Memory" v:lbl="Memory" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="OperatingSystem" v:lbl="Operating System" v:prompt="" v:type="0" v:format="" v:sortKey="Workstation"
+ v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="HardDriveSize" v:lbl="Hard Drive Capacity" v:prompt="" v:type="0" v:format=""
+ v:sortKey="Workstation" v:invis="false" v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="Department" v:lbl="Department" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="false"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ <v:cp v:nameU="ShapeClass" v:lbl="ShapeClass" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Equipment)"/>
+ <v:cp v:nameU="ShapeType" v:lbl="ShapeType" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0" v:val="VT4(Server)"/>
+ <v:cp v:nameU="BelongsTo" v:lbl="Belongs To" v:prompt="" v:type="0" v:format="" v:sortKey="" v:invis="true"
+ v:ask="false" v:langID="1033" v:cal="0"/>
+ </v:custProps>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ShapeClass" v:prompt="" v:val="VT0(5):26"/>
+ <v:ud v:nameU="SolSH" v:prompt="" v:val="VT15({BF0433D9-CD73-4EB5-8390-8653BE590246}):41"/>
+ <v:ud v:nameU="visLegendShape" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <title>Server.66</title>
+ <g id="shape67-270" v:mID="67" v:groupContext="shape" transform="translate(13.0183,0)">
+ <title>Sheet.67</title>
+ <g id="shadow67-271" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st2"/>
+ </g>
+ <rect x="0" y="352.565" width="45.9634" height="72" class="st3"/>
+ </g>
+ <g id="shape68-275" v:mID="68" v:groupContext="shape" transform="translate(47.371,-30.7354)">
+ <title>Sheet.68</title>
+ <g id="shadow68-276" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st2"/>
+ </g>
+ <ellipse cx="2.77848" cy="421.786" rx="2.77848" ry="2.77848" class="st13"/>
+ </g>
+ <g id="shape69-280" v:mID="69" v:groupContext="shape" transform="translate(30.51,-11.8022)">
+ <title>Sheet.69</title>
+ <v:userDefs>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(1)"/>
+ <v:ud v:nameU="SurroundingRegionColor" v:prompt="" v:val="VT5(#5b9bd5)"/>
+ </v:userDefs>
+ <g id="shadow69-281" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31
+ L-0 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z"
+ class="st10"/>
+ </g>
+ <path d="M-0 424.56 L22.42 424.56 L22.42 422.76 L-0 422.76 L-0 424.56 ZM-0 419.11 L22.42 419.11 L22.42 417.31 L-0
+ 417.31 L-0 419.11 ZM-0 413.65 L22.42 413.65 L22.42 411.84 L-0 411.84 L-0 413.65 Z" class="st23"/>
+ </g>
+ </g>
+ <g id="shape70-288" v:mID="70" v:groupContext="shape" transform="translate(201.029,-26.056)">
+ <title>Sheet.70</title>
+ <desc>Backend Server X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="54" cy="408.124" width="108" height="32.8815"/>
+ <rect x="0" y="391.683" width="108" height="32.8815" class="st21"/>
+ <text x="11.86" y="411.72" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Backend Server X</text> </g>
+ <g id="shape71-291" v:mID="71" v:groupContext="shape" transform="translate(684.44,239.627) rotate(90)">
+ <title>Sheet.71</title>
+ <path d="M0 424.56 L45 424.56" class="st24"/>
+ </g>
+ <g id="shape72-294" v:mID="72" v:groupContext="shape" transform="translate(6.85967,-22.443) rotate(-38.1076)">
+ <title>Sheet.72</title>
+ <path d="M-0 424.56 A96.1331 44.4001 55.03 0 1 68.24 420.56 L68.51 420.79" class="st17"/>
+ </g>
+ <g id="shape73-299" v:mID="73" v:groupContext="shape" transform="translate(328.501,-135.937)">
+ <title>Rectangle.73</title>
+ <desc>Key 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow73-300" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 1</text> </g>
+ <g id="shape74-305" v:mID="74" v:groupContext="shape" transform="translate(362.251,-135.937)">
+ <title>Rectangle.74</title>
+ <desc>Action 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow74-306" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 1</text> </g>
+ <g id="shape75-311" v:mID="75" v:groupContext="shape" transform="translate(409.501,-135.937)">
+ <title>Rectangle.75</title>
+ <desc>Key 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow75-312" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="4.74" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key 2</text> </g>
+ <g id="shape76-317" v:mID="76" v:groupContext="shape" transform="translate(443.251,-135.937)">
+ <title>Rectangle.76</title>
+ <desc>Action 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow76-318" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.62" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action 2</text> </g>
+ <g id="shape77-323" v:mID="77" v:groupContext="shape" transform="translate(490.501,-135.937)">
+ <title>Rectangle.77</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow77-324" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape78-328" v:mID="78" v:groupContext="shape" transform="translate(524.251,-135.937)">
+ <title>Rectangle.78</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow78-329" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape79-333" v:mID="79" v:groupContext="shape" transform="translate(328.501,-113.437)">
+ <title>Rectangle.79</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow79-334" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape80-338" v:mID="80" v:groupContext="shape" transform="translate(362.251,-113.437)">
+ <title>Rectangle.80</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow80-339" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape81-343" v:mID="81" v:groupContext="shape" transform="translate(409.501,-113.437)">
+ <title>Rectangle.81</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow81-344" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape82-348" v:mID="82" v:groupContext="shape" transform="translate(443.251,-113.437)">
+ <title>Rectangle.82</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow82-349" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape83-353" v:mID="83" v:groupContext="shape" transform="translate(490.501,-113.437)">
+ <title>Rectangle.83</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow83-354" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape84-358" v:mID="84" v:groupContext="shape" transform="translate(524.251,-113.437)">
+ <title>Rectangle.84</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow84-359" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape85-363" v:mID="85" v:groupContext="shape" transform="translate(328.501,-77.4375)">
+ <title>Rectangle.85</title>
+ <desc>Key x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow85-364" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.11" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key x</text> </g>
+ <g id="shape86-369" v:mID="86" v:groupContext="shape" transform="translate(362.251,-77.4375)">
+ <title>Rectangle.86</title>
+ <desc>Action x</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow86-370" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.99" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action x</text> </g>
+ <g id="shape87-375" v:mID="87" v:groupContext="shape" transform="translate(409.501,-77.4375)">
+ <title>Rectangle.87</title>
+ <desc>Key y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow87-376" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.01" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key y</text> </g>
+ <g id="shape88-381" v:mID="88" v:groupContext="shape" transform="translate(443.251,-77.4375)">
+ <title>Rectangle.88</title>
+ <desc>Action y</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow88-382" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="4.89" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action y</text> </g>
+ <g id="shape89-387" v:mID="89" v:groupContext="shape" transform="translate(490.501,-77.4375)">
+ <title>Rectangle.89</title>
+ <desc>Key z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow89-388" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.3" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key z</text> </g>
+ <g id="shape90-393" v:mID="90" v:groupContext="shape" transform="translate(524.251,-77.4375)">
+ <title>Rectangle.90</title>
+ <desc>Action z</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow90-394" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.18" y="418.56" class="st4" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action z</text> </g>
+ <g id="shape91-399" v:mID="91" v:groupContext="shape" transform="translate(328.501,-40.9375)">
+ <title>Rectangle.91</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow91-400" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape92-404" v:mID="92" v:groupContext="shape" transform="translate(362.251,-40.9375)">
+ <title>Rectangle.92</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow92-405" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape93-409" v:mID="93" v:groupContext="shape" transform="translate(409.501,-40.9375)">
+ <title>Rectangle.93</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow93-410" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ </g>
+ <g id="shape94-414" v:mID="94" v:groupContext="shape" transform="translate(443.251,-40.9375)">
+ <title>Rectangle.94</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow94-415" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ </g>
+ <g id="shape95-419" v:mID="95" v:groupContext="shape" transform="translate(490.501,-40.9375)">
+ <title>Rectangle.95</title>
+ <desc>Key N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="15.75" cy="415.565" width="31.5" height="18"/>
+ <g id="shadow95-420" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="31.5" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="31.5" height="18" class="st3"/>
+ <text x="5.21" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Key N</text> </g>
+ <g id="shape96-425" v:mID="96" v:groupContext="shape" transform="translate(524.251,-40.9375)">
+ <title>Rectangle.96</title>
+ <desc>Action N</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="21.375" cy="415.565" width="42.75" height="18"/>
+ <g id="shadow96-426" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="406.565" width="42.75" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="406.565" width="42.75" height="18" class="st3"/>
+ <text x="5.67" y="418.26" class="st5" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Action N</text> </g>
+ <g id="shape97-431" v:mID="97" v:groupContext="shape" transform="translate(326.251,-31.9375)">
+ <title>Rectangle.97</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <g id="shadow97-432" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st6"/>
+ </g>
+ <rect x="0" y="289.065" width="245.25" height="135.5" class="st7"/>
+ </g>
+ <g id="shape98-436" v:mID="98" v:groupContext="shape" transform="translate(337.501,-162.938)">
+ <title>Sheet.98</title>
+ <desc>Local Table for N Specific Flows Serviced at Node X</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="110.423" cy="418.94" width="220.85" height="11.25"/>
+ <rect x="0" y="413.315" width="220.846" height="11.25" class="st8"/>
+ <text x="5.55" y="421.94" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Local Table for N Specific Flows Serviced at Node X</text> </g>
+ <g id="shape99-439" v:mID="99" v:groupContext="shape" transform="translate(-204.342,-29.4449) rotate(-53.7462)">
+ <title>Sheet.99</title>
+ <path d="M0 424.56 L160.37 424.56" class="st25"/>
+ </g>
+ <g id="shape100-445" v:mID="100" v:groupContext="shape" transform="translate(-37.6568,-164.882) rotate(-24.444)">
+ <title>Sheet.100</title>
+ <path d="M0 424.56 L101.71 424.56" class="st25"/>
+ </g>
+ <g id="shape101-450" v:mID="101" v:groupContext="shape" transform="translate(464.049,-50.8578) rotate(50.099)">
+ <title>Sheet.101</title>
+ <path d="M0 424.56 L139.8 424.56" class="st25"/>
+ </g>
+ <g id="shape102-455" v:mID="102" v:groupContext="shape" transform="translate(372.376,-207.438)">
+ <title>Sheet.102</title>
+ <desc>Supports N Flows</desc>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="67.5" cy="415.565" width="135" height="18"/>
+ <rect x="0" y="406.565" width="135" height="18" class="st19"/>
+ <text x="25.15" y="419.17" class="st20" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Supports N Flows</text> </g>
+ </g>
+</svg>
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index 775e2f70..02611ef8 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -57,10 +57,10 @@ Sample Applications User Guides
l3_forward_virtual
link_status_intr
load_balancer
+ server_node_efd
multi_process
qos_metering
qos_scheduler
- intel_quickassist
quota_watermark
timer
packet_ordering
@@ -108,8 +108,6 @@ Sample Applications User Guides
:numref:`figure_qos_sched_app_arch` :ref:`figure_qos_sched_app_arch`
-:numref:`figure_quickassist_block_diagram` :ref:`figure_quickassist_block_diagram`
-
:numref:`figure_pipeline_overview` :ref:`figure_pipeline_overview`
:numref:`figure_ring_pipeline_perf_setup` :ref:`figure_ring_pipeline_perf_setup`
@@ -132,6 +130,8 @@ Sample Applications User Guides
:numref:`figure_ptpclient_highlevel` :ref:`figure_ptpclient_highlevel`
+:numref:`figure_efd_sample_app_overview` :ref:`figure_efd_sample_app_overview`
+
**Tables**
:numref:`table_qos_metering_1` :ref:`table_qos_metering_1`
diff --git a/doc/guides/sample_app_ug/intel_quickassist.rst b/doc/guides/sample_app_ug/intel_quickassist.rst
deleted file mode 100644
index 04d1593d..00000000
--- a/doc/guides/sample_app_ug/intel_quickassist.rst
+++ /dev/null
@@ -1,221 +0,0 @@
-.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Intel Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-Intel® QuickAssist Technology Sample Application
-================================================
-
-This sample application demonstrates the use of the cryptographic operations provided
-by the Intel® QuickAssist Technology from within the DPDK environment.
-Therefore, building and running this application requires having both the DPDK and
-the QuickAssist Technology Software Library installed, as well as at least one
-Intel® QuickAssist Technology hardware device present in the system.
-
-For this sample application, there is a dependency on either of:
-
-* Intel® Communications Chipset 8900 to 8920 Series Software for Linux* package
-
-* Intel® Communications Chipset 8925 to 8955 Series Software for Linux* package
-
-Overview
---------
-
-An overview of the application is provided in :numref:`figure_quickassist_block_diagram`.
-For simplicity, only two NIC ports and one Intel® QuickAssist Technology device are shown in this diagram,
-although the number of NIC ports and Intel® QuickAssist Technology devices can be different.
-
-.. _figure_quickassist_block_diagram:
-
-.. figure:: img/quickassist_block_diagram.*
-
- Intel® QuickAssist Technology Application Block Diagram
-
-
-The application allows the configuration of the following items:
-
-* Number of NIC ports
-
-* Number of logical cores (lcores)
-
-* Mapping of NIC RX queues to logical cores
-
-Each lcore communicates with every cryptographic acceleration engine in the system through a pair of dedicated input - output queues.
-Each lcore has a dedicated NIC TX queue with every NIC port in the system.
-Therefore, each lcore reads packets from its NIC RX queues and cryptographic accelerator output queues and
-writes packets to its NIC TX queues and cryptographic accelerator input queues.
-
-Each incoming packet that is read from a NIC RX queue is either directly forwarded to its destination NIC TX port (forwarding path)
-or first sent to one of the Intel® QuickAssist Technology devices for either encryption or decryption
-before being sent out on its destination NIC TX port (cryptographic path).
-
-The application supports IPv4 input packets only.
-For each input packet, the decision between the forwarding path and
-the cryptographic path is taken at the classification stage based on the value of the IP source address field read from the input packet.
-Assuming that the IP source address is A.B.C.D, then if:
-
-* D = 0: the forwarding path is selected (the packet is forwarded out directly)
-
-* D = 1: the cryptographic path for encryption is selected (the packet is first encrypted and then forwarded out)
-
-* D = 2: the cryptographic path for decryption is selected (the packet is first decrypted and then forwarded out)
-
-For the cryptographic path cases (D = 1 or D = 2), byte C specifies the cipher algorithm and
-byte B the cryptographic hash algorithm to be used for the current packet.
-Byte A is not used and can be any value.
-The cipher and cryptographic hash algorithms supported by this application are listed in the crypto.h header file.
-
-For each input packet, the destination NIC TX port is decided at the forwarding stage (executed after the cryptographic stage,
-if enabled for the packet) by looking at the RX port index of the dst_ports[ ] array,
-which was initialized at startup, being the outport the adjacent enabled port.
-For example, if ports 1,3,5 and 6 are enabled, for input port 1, outport port will be 3 and vice versa,
-and for input port 5, output port will be 6 and vice versa.
-
-For the cryptographic path, it is the payload of the IPv4 packet that is encrypted or decrypted.
-
-Setup
-~~~~~
-
-Building and running this application requires having both the DPDK package and
-the QuickAssist Technology Software Library installed,
-as well as at least one Intel® QuickAssist Technology hardware device present in the system.
-
-For more details on how to build and run DPDK and Intel® QuickAssist Technology applications,
-please refer to the following documents:
-
-* *DPDK Getting Started Guide*
-
-* Intel® Communications Chipset 8900 to 8920 Series Software for Linux* Getting Started Guide (440005)
-
-* Intel® Communications Chipset 8925 to 8955 Series Software for Linux* Getting Started Guide (523128)
-
-For more details on the actual platforms used to validate this application, as well as performance numbers,
-please refer to the Test Report, which is accessible by contacting your Intel representative.
-
-Building the Application
-------------------------
-
-Steps to build the application:
-
-#. Set up the following environment variables:
-
- .. code-block:: console
-
- export RTE_SDK=<Absolute path to the DPDK installation folder>
- export ICP_ROOT=<Absolute path to the Intel QAT installation folder>
-
-#. Set the target (a default target is used if not specified). For example:
-
- .. code-block:: console
-
- export RTE_TARGET=x86_64-native-linuxapp-gcc
-
- Refer to the *DPDK Getting Started Guide* for possible RTE_TARGET values.
-
-#. Build the application:
-
- .. code-block:: console
-
- cd ${RTE_SDK}/examples/dpdk_qat
- make
-
-Running the Application
------------------------
-
-Intel® QuickAssist Technology Configuration Files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The Intel® QuickAssist Technology configuration files used by the application are located in the config_files folder in the application folder.
-There following sets of configuration files are included in the DPDK package:
-
-* Stargo CRB (single CPU socket): located in the stargo folder
-
- * dh89xxcc_qa_dev0.conf
-
-* Shumway CRB (dual CPU socket): located in the shumway folder
-
- * dh89xxcc_qa_dev0.conf
-
- * dh89xxcc_qa_dev1.conf
-
-* Coleto Creek: located in the coleto folder
-
- * dh895xcc_qa_dev0.conf
-
-The relevant configuration file(s) must be copied to the /etc/ directory.
-
-Please note that any change to these configuration files requires restarting the Intel®
-QuickAssist Technology driver using the following command:
-
-.. code-block:: console
-
- # service qat_service restart
-
-Refer to the following documents for information on the Intel® QuickAssist Technology configuration files:
-
-* Intel® Communications Chipset 8900 to 8920 Series Software Programmer's Guide
-
-* Intel® Communications Chipset 8925 to 8955 Series Software Programmer's Guide
-
-* Intel® Communications Chipset 8900 to 8920 Series Software for Linux* Getting Started Guide.
-
-* Intel® Communications Chipset 8925 to 8955 Series Software for Linux* Getting Started Guide.
-
-Traffic Generator Setup and Application Startup
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The application has a number of command line options:
-
- dpdk_qat [EAL options] -- -p PORTMASK [--no-promisc] [--config '(port,queue,lcore)[,(port,queue,lcore)]']
-
-where,
-
-* -p PORTMASK: Hexadecimal bitmask of ports to configure
-
-* --no-promisc: Disables promiscuous mode for all ports,
- so that only packets with the Ethernet MAC destination address set to the Ethernet address of the port are accepted.
- By default promiscuous mode is enabled so that packets are accepted regardless of the packet's Ethernet MAC destination address.
-
-* --config'(port,queue,lcore)[,(port,queue,lcore)]': determines which queues from which ports are mapped to which cores.
-
-Refer to the :doc:`l3_forward` for more detailed descriptions of the --config command line option.
-
-As an example, to run the application with two ports and two cores,
-which are using different Intel® QuickAssist Technology execution engines,
-performing AES-CBC-128 encryption with AES-XCBC-MAC-96 hash, the following settings can be used:
-
-* Traffic generator source IP address: 0.9.6.1
-
-* Command line:
-
- .. code-block:: console
-
- ./build/dpdk_qat -c 0xff -n 2 -- -p 0x3 --config '(0,0,1),(1,0,2)'
-
-Refer to the *DPDK Test Report* for more examples of traffic generator setup and the application startup command lines.
-If no errors are generated in response to the startup commands, the application is running correctly.
diff --git a/doc/guides/sample_app_ug/ip_frag.rst b/doc/guides/sample_app_ug/ip_frag.rst
index 0c8da194..bd1fe2dc 100644
--- a/doc/guides/sample_app_ug/ip_frag.rst
+++ b/doc/guides/sample_app_ug/ip_frag.rst
@@ -111,7 +111,7 @@ To run the example in linuxapp environment with 2 lcores (2,4) over 2 ports(0,2)
.. code-block:: console
- ./build/ip_fragmentation -c 0x14 -n 3 -- -p 5
+ ./build/ip_fragmentation -l 2,4 -n 3 -- -p 5
EAL: coremask set to 14
EAL: Detected lcore 0 on socket 0
EAL: Detected lcore 1 on socket 1
@@ -140,7 +140,7 @@ To run the example in linuxapp environment with 1 lcore (4) over 2 ports(0,2) wi
.. code-block:: console
- ./build/ip_fragmentation -c 0x10 -n 3 -- -p 5 -q 2
+ ./build/ip_fragmentation -l 4 -n 3 -- -p 5 -q 2
To test the application, flows should be set up in the flow generator that match the values in the
l3fwd_ipv4_route_array and/or l3fwd_ipv6_route_array table.
diff --git a/doc/guides/sample_app_ug/ip_reassembly.rst b/doc/guides/sample_app_ug/ip_reassembly.rst
index 3c5cc708..cc9e5911 100644
--- a/doc/guides/sample_app_ug/ip_reassembly.rst
+++ b/doc/guides/sample_app_ug/ip_reassembly.rst
@@ -102,7 +102,7 @@ To run the example in linuxapp environment with 2 lcores (2,4) over 2 ports(0,2)
.. code-block:: console
- ./build/ip_reassembly -c 0x14 -n 3 -- -p 5
+ ./build/ip_reassembly -l 2,4 -n 3 -- -p 5
EAL: coremask set to 14
EAL: Detected lcore 0 on socket 0
EAL: Detected lcore 1 on socket 1
@@ -133,7 +133,7 @@ To run the example in linuxapp environment with 1 lcore (4) over 2 ports(0,2) wi
.. code-block:: console
- ./build/ip_reassembly -c 0x10 -n 3 -- -p 5 -q 2
+ ./build/ip_reassembly -l 4 -n 3 -- -p 5 -q 2
To test the application, flows should be set up in the flow generator that match the values in the
l3fwd_ipv4_route_array and/or l3fwd_ipv6_route_array table.
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 72da8c42..3e30f509 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -113,7 +113,7 @@ Typically, to run the IPv4 Multicast sample application, issue the following com
.. code-block:: console
- ./build/ipv4_multicast -c 0x00f -n 3 -- -p 0x3 -q 1
+ ./build/ipv4_multicast -l 0-3 -n 3 -- -p 0x3 -q 1
In this command:
@@ -145,12 +145,12 @@ Memory pools for indirect buffers are initialized differently from the memory po
.. code-block:: c
- packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF, PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
-
- header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF, HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
- clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF,
- CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ packet_pool = rte_pktmbuf_pool_create("packet_pool", NB_PKT_MBUF, 32,
+ 0, PKT_MBUF_DATA_SIZE, rte_socket_id());
+ header_pool = rte_pktmbuf_pool_create("header_pool", NB_HDR_MBUF, 32,
+ 0, HDR_MBUF_DATA_SIZE, rte_socket_id());
+ clone_pool = rte_pktmbuf_pool_create("clone_pool", NB_CLONE_MBUF, 32,
+ 0, 0, rte_socket_id());
The reason for this is because indirect buffers are not supposed to hold any packet data and
therefore can be initialized with lower amount of reserved memory for each buffer.
diff --git a/doc/guides/sample_app_ug/keep_alive.rst b/doc/guides/sample_app_ug/keep_alive.rst
index 38973779..fe908206 100644
--- a/doc/guides/sample_app_ug/keep_alive.rst
+++ b/doc/guides/sample_app_ug/keep_alive.rst
@@ -114,7 +114,7 @@ To run the application in linuxapp environment with 4 lcores, 16 ports
.. code-block:: console
- ./build/l2fwd-keepalive -c f -n 4 -- -q 8 -p ffff -K 10
+ ./build/l2fwd-keepalive -l 0-3 -n 4 -- -q 8 -p ffff -K 10
Refer to the *DPDK Getting Started Guide* for general information on
running applications and the Environment Abstraction Layer (EAL)
diff --git a/doc/guides/sample_app_ug/kernel_nic_interface.rst b/doc/guides/sample_app_ug/kernel_nic_interface.rst
index 2ae9b702..619a7b52 100644
--- a/doc/guides/sample_app_ug/kernel_nic_interface.rst
+++ b/doc/guides/sample_app_ug/kernel_nic_interface.rst
@@ -180,7 +180,7 @@ Where:
Refer to *DPDK Getting Started Guide* for general information on running applications and the Environment Abstraction Layer (EAL) options.
-The -c coremask parameter of the EAL options should include the lcores indicated by the lcore_rx and lcore_tx,
+The -c coremask or -l corelist parameter of the EAL options should include the lcores indicated by the lcore_rx and lcore_tx,
but does not need to include lcores indicated by lcore_kthread as they are used to pin the kernel thread on.
The -p PORTMASK parameter should include the ports indicated by the port in --config, neither more nor less.
@@ -199,7 +199,7 @@ and one lcore of kernel thread for each port:
.. code-block:: console
- ./build/kni -c 0xf0 -n 4 -- -P -p 0x3 -config="(0,4,6,8),(1,5,7,9)"
+ ./build/kni -l 4-7 -n 4 -- -P -p 0x3 -config="(0,4,6,8),(1,5,7,9)"
KNI Operations
--------------
diff --git a/doc/guides/sample_app_ug/l2_forward_cat.rst b/doc/guides/sample_app_ug/l2_forward_cat.rst
index 285c3c74..b0c2e109 100644
--- a/doc/guides/sample_app_ug/l2_forward_cat.rst
+++ b/doc/guides/sample_app_ug/l2_forward_cat.rst
@@ -108,13 +108,13 @@ To run the example in a ``linuxapp`` environment and enable CAT on cpus 0-2:
.. code-block:: console
- ./build/l2fwd-cat -c 2 -n 4 -- --l3ca="0x3@(0-2)"
+ ./build/l2fwd-cat -l 1 -n 4 -- --l3ca="0x3@(0-2)"
or to enable CAT and CDP on cpus 1,3:
.. code-block:: console
- ./build/l2fwd-cat -c 2 -n 4 -- --l3ca="(0x00C00,0x00300)@(1,3)"
+ ./build/l2fwd-cat -l 1 -n 4 -- --l3ca="(0x00C00,0x00300)@(1,3)"
If CDP is not supported it will fail with following error message:
@@ -242,4 +242,4 @@ relevant CPUs via ``pqos_l3ca_assoc_set(...)`` calls.
``atexit(...)`` is used to register ``cat_exit(...)`` to be called on
a clean exit. ``cat_exit(...)`` performs a simple CAT clean-up, by associating
-COS 0 to all involved CPUs via ``pqos_l3ca_assoc_set(...)`` calls. \ No newline at end of file
+COS 0 to all involved CPUs via ``pqos_l3ca_assoc_set(...)`` calls.
diff --git a/doc/guides/sample_app_ug/l2_forward_crypto.rst b/doc/guides/sample_app_ug/l2_forward_crypto.rst
index 723376c5..d6df36b5 100644
--- a/doc/guides/sample_app_ug/l2_forward_crypto.rst
+++ b/doc/guides/sample_app_ug/l2_forward_crypto.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2016 Intel Corporation. All rights reserved.
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -89,7 +89,7 @@ The application requires a number of command line options:
[--cipher_key_random_size SIZE] [--iv IV] [--iv_random_size SIZE] /
[--auth_algo ALGO] [--auth_op GENERATE/VERIFY] [--auth_key KEY] /
[--auth_key_random_size SIZE] [--aad AAD] [--aad_random_size SIZE] /
- [--digest size SIZE] [--sessionless]
+ [--digest size SIZE] [--sessionless] [--cryptodev_mask MASK]
where,
@@ -113,7 +113,7 @@ where,
(default is Cipher->Hash)
-* cipher_algo: select the ciphering algorithm (default is AES CBC)
+* cipher_algo: select the ciphering algorithm (default is aes-cbc)
* cipher_op: select the ciphering operation to perform: ENCRYPT or DECRYPT
@@ -133,7 +133,7 @@ where,
Note that if --iv is used, this will be ignored.
-* auth_algo: select the authentication algorithm (default is SHA1-HMAC)
+* auth_algo: select the authentication algorithm (default is sha1-hmac)
* cipher_op: select the authentication operation to perform: GENERATE or VERIFY
@@ -157,6 +157,11 @@ where,
* sessionless: no crypto session will be created.
+* cryptodev_mask: A hexadecimal bitmask of the cryptodevs to be used by the
+ application.
+
+ (default is all cryptodevs).
+
The application requires that crypto devices capable of performing
the specified crypto operation are available on application initialization.
@@ -167,11 +172,11 @@ To run the application in linuxapp environment with 2 lcores, 2 ports and 2 cryp
.. code-block:: console
- $ ./build/l2fwd-crypto -c 0x3 -n 4 --vdev "cryptodev_aesni_mb_pmd" \
+ $ ./build/l2fwd-crypto -l 0-1 -n 4 --vdev "cryptodev_aesni_mb_pmd" \
--vdev "cryptodev_aesni_mb_pmd" -- -p 0x3 --chain CIPHER_HASH \
- --cipher_op ENCRYPT --cipher_algo AES_CBC \
+ --cipher_op ENCRYPT --cipher_algo aes-cbc \
--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
- --auth_op GENERATE --auth_algo AES_XCBC_MAC \
+ --auth_op GENERATE --auth_algo aes-xcbc-mac \
--auth_key 10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f
Refer to the *DPDK Getting Started Guide* for general information on running applications
diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
index 2444e36e..1029cc8f 100644
--- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst
+++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -140,7 +140,7 @@ thousands separator printing, issue the command:
.. code-block:: console
- $ ./build/l2fwd-jobstats -c f -n 4 -- -q 8 -p ffff -l
+ $ ./build/l2fwd-jobstats -l 0-3 -n 4 -- -q 8 -p ffff -l
Refer to the *DPDK Getting Started Guide* for general information on running applications
and the Environment Abstraction Layer (EAL) options.
@@ -193,36 +193,25 @@ and the application to store network packet data:
.. code-block:: c
/* create the mbuf pool */
- l2fwd_pktmbuf_pool =
- rte_mempool_create("mbuf_pool", NB_MBUF,
- MBUF_SIZE, 32,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure,
-sizeof(struct rte_pktmbuf_pool_private) bytes.
-The number of allocated pkt mbufs is NB_MBUF, with a size of MBUF_SIZE each.
-A per-lcore cache of 32 mbufs is kept.
+In this case, it is necessary to create a pool that will be used by the driver.
+The number of allocated pkt mbufs is NB_MBUF, with a data room size of
+RTE_MBUF_DEFAULT_BUF_SIZE each.
+A per-lcore cache of MEMPOOL_CACHE_SIZE mbufs is kept.
The memory is allocated in rte_socket_id() socket,
but it is possible to extend this code to allocate one mbuf pool per socket.
-Two callback pointers are also given to the rte_mempool_create() function:
-
-* The first callback pointer is to rte_pktmbuf_pool_init() and is used
- to initialize the private data of the mempool, which is needed by the driver.
- This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-* The second callback pointer given to rte_mempool_create() is the mbuf initializer.
- The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
- If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
- a new function derived from rte_pktmbuf_init( ) can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
Driver Initialization
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
index cf15d1c3..f579c8fe 100644
--- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
+++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
@@ -150,7 +150,7 @@ updating enabled, issue the command:
.. code-block:: console
- $ ./build/l2fwd -c f -n 4 -- -q 8 -p ffff
+ $ ./build/l2fwd -l 0-3 -n 4 -- -q 8 -p ffff
Refer to the *DPDK Getting Started Guide* for general information on running applications
and the Environment Abstraction Layer (EAL) options.
@@ -207,31 +207,25 @@ and the application to store network packet data:
/* create the mbuf pool */
- l2fwd_pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, SOCKET0, 0);
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
if (l2fwd_pktmbuf_pool == NULL)
rte_panic("Cannot init mbuf pool\n");
The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure,
-sizeof(struct rte_pktmbuf_pool_private) bytes.
-The number of allocated pkt mbufs is NB_MBUF, with a size of MBUF_SIZE each.
+In this case, it is necessary to create a pool that will be used by the driver.
+The number of allocated pkt mbufs is NB_MBUF, with a data room size of
+RTE_MBUF_DEFAULT_BUF_SIZE each.
A per-lcore cache of 32 mbufs is kept.
The memory is allocated in NUMA socket 0,
but it is possible to extend this code to allocate one mbuf pool per socket.
-Two callback pointers are also given to the rte_mempool_create() function:
-
-* The first callback pointer is to rte_pktmbuf_pool_init() and is used
- to initialize the private data of the mempool, which is needed by the driver.
- This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-* The second callback pointer given to rte_mempool_create() is the mbuf initializer.
- The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
- If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
- a new function derived from rte_pktmbuf_init( ) can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
.. _l2_fwd_app_dvr_init:
@@ -244,7 +238,7 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
.. code-block:: c
- if (rte_eal_pci_probe() < 0)
+ if (rte_pci_probe() < 0)
rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
nb_ports = rte_eth_dev_count();
@@ -285,7 +279,7 @@ Observe that:
* rte_igb_pmd_init_all() simultaneously registers the driver as a PCI driver and as an Ethernet* Poll Mode Driver.
-* rte_eal_pci_probe() parses the devices on the PCI bus and initializes recognized devices.
+* rte_pci_probe() parses the devices on the PCI bus and initializes recognized devices.
The next step is to configure the RX and TX queues.
For each port, there is only one RX queue (only one lcore is able to poll a given port).
diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst
index ab916b97..6a6b8fbe 100644
--- a/doc/guides/sample_app_ug/l3_forward.rst
+++ b/doc/guides/sample_app_ug/l3_forward.rst
@@ -129,43 +129,33 @@ Where,
* ``--parse-ptype:`` Optional, set to use software to analyze packet type. Without this option, hardware will check the packet type.
-For example, consider a dual processor socket platform where cores 0-7 and 16-23 appear on socket 0, while cores 8-15 and 24-31 appear on socket 1.
-Let's say that the programmer wants to use memory from both NUMA nodes, the platform has only two ports, one connected to each NUMA node,
-and the programmer wants to use two cores from each processor socket to do the packet processing.
+For example, consider a dual processor socket platform with 8 physical cores, where cores 0-7 and 16-23 appear on socket 0,
+while cores 8-15 and 24-31 appear on socket 1.
-To enable L3 forwarding between two ports, using two cores, cores 1 and 2, from each processor,
-while also taking advantage of local memory access by optimizing around NUMA, the programmer must enable two queues from each port,
-pin to the appropriate cores and allocate memory from the appropriate NUMA node. This is achieved using the following command:
+To enable L3 forwarding between two ports, assuming that both ports are in the same socket, using two cores, cores 1 and 2,
+(which are in the same socket too), use the following command:
.. code-block:: console
- ./build/l3fwd -c 606 -n 4 -- -p 0x3 --config="(0,0,1),(0,1,2),(1,0,9),(1,1,10)"
+ ./build/l3fwd -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)"
In this command:
-* The -c option enables cores 0, 1, 2, 3
+* The -l option enables cores 1, 2
* The -p option enables ports 0 and 1
-* The --config option enables two queues on each port and maps each (port,queue) pair to a specific core.
- Logic to enable multiple RX queues using RSS and to allocate memory from the correct NUMA nodes
- is included in the application and is done transparently.
+* The --config option enables one queue on each port and maps each (port,queue) pair to a specific core.
The following table shows the mapping in this example:
+----------+-----------+-----------+-------------------------------------+
| **Port** | **Queue** | **lcore** | **Description** |
| | | | |
+----------+-----------+-----------+-------------------------------------+
-| 0 | 0 | 0 | Map queue 0 from port 0 to lcore 0. |
+| 0 | 0 | 1 | Map queue 0 from port 0 to lcore 1. |
| | | | |
+----------+-----------+-----------+-------------------------------------+
-| 0 | 1 | 2 | Map queue 1 from port 0 to lcore 2. |
-| | | | |
-+----------+-----------+-----------+-------------------------------------+
-| 1 | 0 | 1 | Map queue 0 from port 1 to lcore 1. |
-| | | | |
-+----------+-----------+-----------+-------------------------------------+
-| 1 | 1 | 3 | Map queue 1 from port 1 to lcore 3. |
+| 1 | 0 | 2 | Map queue 0 from port 1 to lcore 2. |
| | | | |
+----------+-----------+-----------+-------------------------------------+
diff --git a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
index 4049e019..a6aa4fb1 100644
--- a/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
+++ b/doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
@@ -306,48 +306,35 @@ where,
* --no-numa: optional, disables numa awareness
-As an example, consider a dual processor socket platform where cores 0, 2, 4, 6, 8 and 10 appear on socket 0,
-while cores 1, 3, 5, 7, 9 and 11 appear on socket 1.
-Let's say that the user wants to use memory from both NUMA nodes,
-the platform has only two ports and the user wants to use two cores from each processor socket to do the packet processing.
+For example, consider a dual processor socket platform with 8 physical cores, where cores 0-7 and 16-23 appear on socket 0,
+while cores 8-15 and 24-31 appear on socket 1.
-To enable L3 forwarding between two ports, using two cores from each processor,
-while also taking advantage of local memory access by optimizing around NUMA,
-the user must enable two queues from each port,
-pin to the appropriate cores and allocate memory from the appropriate NUMA node.
-This is achieved using the following command:
+To enable L3 forwarding between two ports, assuming that both ports are in the same socket, using two cores, cores 1 and 2,
+(which are in the same socket too), use the following command:
.. code-block:: console
- ./build/l3fwd-acl -c f -n 4 -- -p 0x3 --config="(0,0,0),(0,1,2),(1,0,1),(1,1,3)" --rule_ipv4="./rule_ipv4.db" -- rule_ipv6="./rule_ipv6.db" --scalar
+ ./build/l3fwd-acl -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="./rule_ipv4.db" -- rule_ipv6="./rule_ipv6.db" --scalar
In this command:
-* The -c option enables cores 0, 1, 2, 3
+* The -l option enables cores 1, 2
* The -p option enables ports 0 and 1
-* The --config option enables two queues on each port and maps each (port,queue) pair to a specific core.
- Logic to enable multiple RX queues using RSS and to allocate memory from the correct NUMA nodes is included in the application
- and is done transparently.
+* The --config option enables one queue on each port and maps each (port,queue) pair to a specific core.
The following table shows the mapping in this example:
- +----------+------------+-----------+------------------------------------------------+
- | **Port** | **Queue** | **lcore** | **Description** |
- | | | | |
- +==========+============+===========+================================================+
- | 0 | 0 | 0 | Map queue 0 from port 0 to lcore 0. |
- | | | | |
- +----------+------------+-----------+------------------------------------------------+
- | 0 | 1 | 2 | Map queue 1 from port 0 to lcore 2. |
- | | | | |
- +----------+------------+-----------+------------------------------------------------+
- | 1 | 0 | 1 | Map queue 0 from port 1 to lcore 1. |
- | | | | |
- +----------+------------+-----------+------------------------------------------------+
- | 1 | 1 | 3 | Map queue 1 from port 1 to lcore 3. |
- | | | | |
- +----------+------------+-----------+------------------------------------------------+
+ +----------+------------+-----------+-------------------------------------+
+ | **Port** | **Queue** | **lcore** | **Description** |
+ | | | | |
+ +==========+============+===========+=====================================+
+ | 0 | 0 | 1 | Map queue 0 from port 0 to lcore 1. |
+ | | | | |
+ +----------+------------+-----------+-------------------------------------+
+ | 1 | 0 | 2 | Map queue 0 from port 1 to lcore 2. |
+ | | | | |
+ +----------+------------+-----------+-------------------------------------+
* The --rule_ipv4 option specifies the reading of IPv4 rules sets from the ./ rule_ipv4.db file.
diff --git a/doc/guides/sample_app_ug/l3_forward_virtual.rst b/doc/guides/sample_app_ug/l3_forward_virtual.rst
index fa04722e..5f9d8948 100644
--- a/doc/guides/sample_app_ug/l3_forward_virtual.rst
+++ b/doc/guides/sample_app_ug/l3_forward_virtual.rst
@@ -110,40 +110,33 @@ where,
* --no-numa: optional, disables numa awareness
-For example, consider a dual processor socket platform where cores 0,2,4,6, 8, and 10 appear on socket 0,
-while cores 1,3,5,7,9, and 11 appear on socket 1.
-Let's say that the programmer wants to use memory from both NUMA nodes,
-the platform has only two ports and the programmer wants to use one core from each processor socket to do the packet processing
-since only one Rx/Tx queue pair can be used in virtualization mode.
+For example, consider a dual processor socket platform with 8 physical cores, where cores 0-7 and 16-23 appear on socket 0,
+while cores 8-15 and 24-31 appear on socket 1.
-To enable L3 forwarding between two ports, using one core from each processor,
-while also taking advantage of local memory accesses by optimizing around NUMA,
-the programmer can pin to the appropriate cores and allocate memory from the appropriate NUMA node.
-This is achieved using the following command:
+To enable L3 forwarding between two ports, assuming that both ports are in the same socket, using two cores, cores 1 and 2,
+(which are in the same socket too), use the following command:
.. code-block:: console
- ./build/l3fwd-vf -c 0x03 -n 3 -- -p 0x3 --config="(0,0,0),(1,0,1)"
+ ./build/l3fwd-vf -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)"
In this command:
-* The -c option enables cores 0 and 1
+* The -l option enables cores 1 and 2
* The -p option enables ports 0 and 1
* The --config option enables one queue on each port and maps each (port,queue) pair to a specific core.
- Logic to enable multiple RX queues using RSS and to allocate memory from the correct NUMA nodes
- is included in the application and is done transparently.
The following table shows the mapping in this example:
+----------+-----------+-----------+------------------------------------+
| **Port** | **Queue** | **lcore** | **Description** |
| | | | |
+==========+===========+===========+====================================+
- | 0 | 0 | 0 | Map queue 0 from port 0 to lcore 0 |
+ | 0 | 0 | 1 | Map queue 0 from port 0 to lcore 1 |
| | | | |
+----------+-----------+-----------+------------------------------------+
- | 1 | 1 | 1 | Map queue 0 from port 1 to lcore 1 |
+ | 1 | 0 | 2 | Map queue 0 from port 1 to lcore 2 |
| | | | |
+----------+-----------+-----------+------------------------------------+
diff --git a/doc/guides/sample_app_ug/link_status_intr.rst b/doc/guides/sample_app_ug/link_status_intr.rst
index 779df97d..f9af4749 100644
--- a/doc/guides/sample_app_ug/link_status_intr.rst
+++ b/doc/guides/sample_app_ug/link_status_intr.rst
@@ -104,7 +104,7 @@ issue the command:
.. code-block:: console
- $ ./build/link_status_interrupt -c f -n 4-- -q 8 -p ffff
+ $ ./build/link_status_interrupt -l 0-3 -n 4-- -q 8 -p ffff
Refer to the *DPDK Getting Started Guide* for general information on running applications
and the Environment Abstraction Layer (EAL) options.
@@ -138,7 +138,7 @@ To fully understand this code, it is recommended to study the chapters that rela
.. code-block:: c
- if (rte_eal_pci_probe() < 0)
+ if (rte_pci_probe() < 0)
rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
nb_ports = rte_eth_dev_count();
@@ -171,7 +171,7 @@ To fully understand this code, it is recommended to study the chapters that rela
Observe that:
-* rte_eal_pci_probe() parses the devices on the PCI bus and initializes recognized devices.
+* rte_pci_probe() parses the devices on the PCI bus and initializes recognized devices.
The next step is to configure the RX and TX queues.
For each port, there is only one RX queue (only one lcore is able to poll a given port).
diff --git a/doc/guides/sample_app_ug/load_balancer.rst b/doc/guides/sample_app_ug/load_balancer.rst
index fdd8cbd3..e101a5f2 100644
--- a/doc/guides/sample_app_ug/load_balancer.rst
+++ b/doc/guides/sample_app_ug/load_balancer.rst
@@ -178,7 +178,7 @@ Example:
.. code-block:: console
- ./load_balancer -c 0xf8 -n 4 -- --rx "(0,0,3),(1,0,3)" --tx "(0,3),(1,3)" --w "4,5,6,7" --lpm "1.0.0.0/24=>0; 1.0.1.0/24=>1;" --pos-lb 29
+ ./load_balancer -l 3-7 -n 4 -- --rx "(0,0,3),(1,0,3)" --tx "(0,3),(1,3)" --w "4,5,6,7" --lpm "1.0.0.0/24=>0; 1.0.1.0/24=>1;" --pos-lb 29
There is a single I/O lcore (lcore 3) that handles RX and TX for two NIC ports (ports 0 and 1) that
handles packets to/from four worker lcores (lcores 4, 5, 6 and 7) that
diff --git a/doc/guides/sample_app_ug/multi_process.rst b/doc/guides/sample_app_ug/multi_process.rst
index 35714905..d4df2fa7 100644
--- a/doc/guides/sample_app_ug/multi_process.rst
+++ b/doc/guides/sample_app_ug/multi_process.rst
@@ -82,11 +82,11 @@ Running the Application
^^^^^^^^^^^^^^^^^^^^^^^
To run the application, start one copy of the simple_mp binary in one terminal,
-passing at least two cores in the coremask, as follows:
+passing at least two cores in the coremask/corelist, as follows:
.. code-block:: console
- ./build/simple_mp -c 3 -n 4 --proc-type=primary
+ ./build/simple_mp -l 0-1 -n 4 --proc-type=primary
For the first DPDK process run, the proc-type flag can be omitted or set to auto,
since all DPDK processes will default to being a primary instance,
@@ -95,7 +95,7 @@ The process should start successfully and display a command prompt as follows:
.. code-block:: console
- $ ./build/simple_mp -c 3 -n 4 --proc-type=primary
+ $ ./build/simple_mp -l 0-1 -n 4 --proc-type=primary
EAL: coremask set to 3
EAL: Detected lcore 0 on socket 0
EAL: Detected lcore 1 on socket 0
@@ -119,11 +119,11 @@ The process should start successfully and display a command prompt as follows:
simple_mp >
To run the secondary process to communicate with the primary process,
-again run the same binary setting at least two cores in the coremask:
+again run the same binary setting at least two cores in the coremask/corelist:
.. code-block:: console
- ./build/simple_mp -c C -n 4 --proc-type=secondary
+ ./build/simple_mp -l 2-3 -n 4 --proc-type=secondary
When running a secondary process such as that shown above, the proc-type parameter can again be specified as auto.
However, omitting the parameter altogether will cause the process to try and start as a primary rather than secondary process.
@@ -229,10 +229,10 @@ the following commands can be used (assuming run as root):
.. code-block:: console
- # ./build/symmetric_mp -c 2 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=0
- # ./build/symmetric_mp -c 4 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=1
- # ./build/symmetric_mp -c 8 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=2
- # ./build/symmetric_mp -c 10 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=3
+ # ./build/symmetric_mp -l 1 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=0
+ # ./build/symmetric_mp -l 2 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=1
+ # ./build/symmetric_mp -l 3 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=2
+ # ./build/symmetric_mp -l 4 -n 4 --proc-type=auto -- -p 3 --num-procs=4 --proc-id=3
.. note::
@@ -254,7 +254,7 @@ How the Application Works
^^^^^^^^^^^^^^^^^^^^^^^^^
The initialization calls in both the primary and secondary instances are the same for the most part,
-calling the rte_eal_init(), 1 G and 10 G driver initialization and then rte_eal_pci_probe() functions.
+calling the rte_eal_init(), 1 G and 10 G driver initialization and then rte_pci_probe() functions.
Thereafter, the initialization done depends on whether the process is configured as a primary or secondary instance.
In the primary instance, a memory pool is created for the packet mbufs and the network ports to be used are initialized -
@@ -318,8 +318,8 @@ In addition to the EAL parameters, the application- specific parameters are:
.. note::
- In the server process, a single thread, the master thread, that is, the lowest numbered lcore in the coremask, performs all packet I/O.
- If a coremask is specified with more than a single lcore bit set in it,
+ In the server process, a single thread, the master thread, that is, the lowest numbered lcore in the coremask/corelist, performs all packet I/O.
+ If a coremask/corelist is specified with more than a single lcore bit set in it,
an additional lcore will be used for a thread to periodically print packet count statistics.
Since the server application stores configuration data in shared memory, including the network ports to be used,
@@ -329,9 +329,9 @@ the following commands could be used:
.. code-block:: console
- # ./mp_server/build/mp_server -c 6 -n 4 -- -p 3 -n 2
- # ./mp_client/build/mp_client -c 8 -n 4 --proc-type=auto -- -n 0
- # ./mp_client/build/mp_client -c 10 -n 4 --proc-type=auto -- -n 1
+ # ./mp_server/build/mp_server -l 1-2 -n 4 -- -p 3 -n 2
+ # ./mp_client/build/mp_client -l 3 -n 4 --proc-type=auto -- -n 0
+ # ./mp_client/build/mp_client -l 4 -n 4 --proc-type=auto -- -n 1
.. note::
@@ -524,7 +524,7 @@ The command is as follows:
.. code-block:: console
- #./build/l2fwd_fork -c 1c -n 4 -- -p 3 -f
+ #./build/l2fwd_fork -l 2-4 -n 4 -- -p 3 -f
This example provides another -f option to specify the use of floating process.
If not specified, the example will use a pinned process to perform the L2 forwarding task.
diff --git a/doc/guides/sample_app_ug/netmap_compatibility.rst b/doc/guides/sample_app_ug/netmap_compatibility.rst
index 41f05186..030fd980 100644
--- a/doc/guides/sample_app_ug/netmap_compatibility.rst
+++ b/doc/guides/sample_app_ug/netmap_compatibility.rst
@@ -104,7 +104,7 @@ Porting Netmap applications typically involves two major steps:
Since the ``compat_netmap`` functions have the same signature as the usual libc calls, the change is trivial in most cases.
-The usual DPDK initialization code involving ``rte_eal_init()`` and ``rte_eal_pci_probe()``
+The usual DPDK initialization code involving ``rte_eal_init()`` and ``rte_pci_probe()``
has to be added to the Netmap application in the same way it is used in all other DPDK sample applications.
Please refer to the *DPDK Programmer's Guide* and example source code for details about initialization.
diff --git a/doc/guides/sample_app_ug/performance_thread.rst b/doc/guides/sample_app_ug/performance_thread.rst
index d7d9b084..a55a6246 100644
--- a/doc/guides/sample_app_ug/performance_thread.rst
+++ b/doc/guides/sample_app_ug/performance_thread.rst
@@ -107,6 +107,7 @@ The application has a number of command line options::
--tx(lcore,thread)[,(lcore,thread)]
[--enable-jumbo] [--max-pkt-len PKTLEN]] [--no-numa]
[--hash-entry-num] [--ipv6] [--no-lthreads] [--stat-lcore lcore]
+ [--parse-ptype]
Where:
@@ -142,6 +143,9 @@ Where:
* ``--stat-lcore``: optional, run CPU load stats collector on the specified
lcore.
+* ``--parse-ptype:`` optional, set to use software to analyze packet type.
+ Without this option, hardware will check the packet type.
+
The parameters of the ``--rx`` and ``--tx`` options are:
* ``--rx`` parameters
@@ -183,14 +187,14 @@ in ``--rx/--tx`` are used to affinitize threads to the selected scheduler.
For example, the following places every l-thread on different lcores::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)"
The following places RX l-threads on lcore 0 and TX l-threads on lcore 1 and 2
and so on::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,0,1)" \
--tx="(1,0)(2,1)"
@@ -206,7 +210,7 @@ place every RX and TX thread on different lcores.
For example, the following places every EAL thread on different lcores::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--no-lthreads
@@ -218,7 +222,7 @@ parameter is used.
The following places RX EAL threads on lcore 0 and TX EAL threads on lcore 1
and 2 and so on::
- l3fwd-thread -c ff -n 2 --lcores="(0,1)@0,(2,3)@1" -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 --lcores="(0,1)@0,(2,3)@1" -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--no-lthreads
@@ -232,13 +236,13 @@ and its corresponding EAL threads command line can be realized as follows:
a) Start every thread on different scheduler (1:1)::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)"
EAL thread equivalent::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--no-lthreads
@@ -247,13 +251,13 @@ b) Start all threads on one core (N:1).
Start 4 L-threads on lcore 0::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,0,1)" \
--tx="(0,0)(0,1)"
Start 4 EAL threads on cpu-set 0::
- l3fwd-thread -c ff -n 2 --lcores="(0-3)@0" -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 --lcores="(0-3)@0" -- -P -p 3 \
--rx="(0,0,0,0)(1,0,0,1)" \
--tx="(2,0)(3,1)" \
--no-lthreads
@@ -262,14 +266,14 @@ c) Start threads on different cores (N:M).
Start 2 L-threads for RX on lcore 0, and 2 L-threads for TX on lcore 1::
- l3fwd-thread -c ff -n 2 -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 -- -P -p 3 \
--rx="(0,0,0,0)(1,0,0,1)" \
--tx="(1,0)(1,1)"
Start 2 EAL threads for RX on cpu-set 0, and 2 EAL threads for TX on
cpu-set 1::
- l3fwd-thread -c ff -n 2 --lcores="(0-1)@0,(2-3)@1" -- -P -p 3 \
+ l3fwd-thread -l 0-7 -n 2 --lcores="(0-1)@0,(2-3)@1" -- -P -p 3 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--no-lthreads
diff --git a/doc/guides/sample_app_ug/ptpclient.rst b/doc/guides/sample_app_ug/ptpclient.rst
index 6e425b79..d3ef1d11 100644
--- a/doc/guides/sample_app_ug/ptpclient.rst
+++ b/doc/guides/sample_app_ug/ptpclient.rst
@@ -119,7 +119,7 @@ To run the example in a ``linuxapp`` environment:
.. code-block:: console
- ./build/ptpclient -c 2 -n 4 -- -p 0x1 -T 0
+ ./build/ptpclient -l 1 -n 4 -- -p 0x1 -T 0
Refer to *DPDK Getting Started Guide* for general information on running
applications and the Environment Abstraction Layer (EAL) options.
@@ -171,15 +171,8 @@ used by the application:
.. code-block:: c
- mbuf_pool = rte_mempool_create("MBUF_POOL",
- NUM_MBUFS * nb_ports,
- MBUF_SIZE,
- MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(),
- 0);
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
Mbufs are the packet buffer structure used by DPDK. They are explained in
detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
diff --git a/doc/guides/sample_app_ug/qos_scheduler.rst b/doc/guides/sample_app_ug/qos_scheduler.rst
index 7f9e9259..a6654cb5 100644
--- a/doc/guides/sample_app_ug/qos_scheduler.rst
+++ b/doc/guides/sample_app_ug/qos_scheduler.rst
@@ -289,7 +289,7 @@ The following is an example command with a single packet flow configuration:
.. code-block:: console
- ./qos_sched -c a2 -n 4 -- --pfc "3,2,5,7" --cfg ./profile.cfg
+ ./qos_sched -l 1,5,7 -n 4 -- --pfc "3,2,5,7" --cfg ./profile.cfg
This example uses a single packet flow configuration which creates one RX thread on lcore 5 reading
from port 3 and a worker thread on lcore 7 writing to port 2.
@@ -298,12 +298,12 @@ Another example with 2 packet flow configurations using different ports but shar
.. code-block:: console
- ./qos_sched -c c6 -n 4 -- --pfc "3,2,2,6,7" --pfc "1,0,2,6,7" --cfg ./profile.cfg
+ ./qos_sched -l 1,2,6,7 -n 4 -- --pfc "3,2,2,6,7" --pfc "1,0,2,6,7" --cfg ./profile.cfg
Note that independent cores for the packet flow configurations for each of the RX, WT and TX thread are also supported,
providing flexibility to balance the work.
-The EAL coremask is constrained to contain the default mastercore 1 and the RX, WT and TX cores only.
+The EAL coremask/corelist is constrained to contain the default mastercore 1 and the RX, WT and TX cores only.
Explanation
-----------
diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index c56683aa..2c3a4320 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,13 @@
Quota and Watermark Sample Application
======================================
-The Quota and Watermark sample application is a simple example of packet processing using Data Plane Development Kit (DPDK) that
-showcases the use of a quota as the maximum number of packets enqueue/dequeue at a time and low and high watermarks
-to signal low and high ring usage respectively.
+The Quota and Watermark sample application is a simple example of packet
+processing using Data Plane Development Kit (DPDK) that showcases the use
+of a quota as the maximum number of packets enqueue/dequeue at a time and
+low and high thresholds, or watermarks, to signal low and high ring usage
+respectively.
-Additionally, it shows how ring watermarks can be used to feedback congestion notifications to data producers by
+Additionally, it shows how the thresholds can be used to feedback congestion notifications to data producers by
temporarily stopping processing overloaded rings and sending Ethernet flow control frames.
This sample application is split in two parts:
@@ -64,7 +66,7 @@ each stage of which being connected by rings, as shown in :numref:`figure_pipeli
An adjustable quota value controls how many packets are being moved through the pipeline per enqueue and dequeue.
-Adjustable watermark values associated with the rings control a back-off mechanism that
+Adjustable threshold values associated with the rings control a back-off mechanism that
tries to prevent the pipeline from being overloaded by:
* Stopping enqueuing on rings for which the usage has crossed the high watermark threshold
@@ -136,7 +138,7 @@ issue the following command:
.. code-block:: console
- ./qw/build/qw -c f -n 4 -- -p 5
+ ./qw/build/qw -l 0-3 -n 4 -- -p 5
Refer to the *DPDK Getting Started Guide* for general information on running applications and
the Environment Abstraction Layer (EAL) options.
@@ -157,7 +159,7 @@ To run the application in a linuxapp environment on logical core 0, issue the fo
.. code-block:: console
- ./qwctl/build/qwctl -c 1 -n 4 --proc-type=secondary
+ ./qwctl/build/qwctl -l 0 -n 4 --proc-type=secondary
Refer to the *DPDK Getting Started* Guide for general information on running applications and
the Environment Abstraction Layer (EAL) options.
@@ -202,9 +204,9 @@ Then, a call to init_dpdk(), defined in init.c, is made to initialize the poll m
/* Bind the drivers to usable devices */
- ret = rte_eal_pci_probe();
+ ret = rte_pci_probe();
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eal_pci_probe(): error %d\n", ret);
+ rte_exit(EXIT_FAILURE, "rte_pci_probe(): error %d\n", ret);
if (rte_eth_dev_count() < 2)
rte_exit(EXIT_FAILURE, "Not enough Ethernet port available\n");
@@ -216,25 +218,26 @@ in the *DPDK Getting Started Guide* and the *DPDK API Reference*.
Shared Variables Setup
^^^^^^^^^^^^^^^^^^^^^^
-The quota and low_watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
+The quota and high and low watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
.. code-block:: c
void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
-
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB);
+ const struct rte_memzone *qw_memzone;
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
- }
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
+ }
-These two variables are initialized to a default value in main() and
+These three variables are initialized to a default value in main() and
can be changed while qw is running using the qwctl control program.
Application Arguments
@@ -254,32 +257,24 @@ It contains a set of mbuf objects that are used by the driver and the applicatio
.. code-block:: c
/* Create a pool of mbuf to store packets */
-
- mbuf_pool = rte_mempool_create("mbuf_pool", MBUF_PER_POOL, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
+ MBUF_DATA_SIZE, rte_socket_id());
if (mbuf_pool == NULL)
rte_panic("%s\n", rte_strerror(rte_errno));
The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure, sizeof(struct rte_pktmbuf_pool_private) bytes.
+In this case, it is necessary to create a pool that will be used by the driver.
-The number of allocated pkt mbufs is MBUF_PER_POOL, with a size of MBUF_SIZE each.
+The number of allocated pkt mbufs is MBUF_PER_POOL, with a data room size
+of MBUF_DATA_SIZE each.
A per-lcore cache of 32 mbufs is kept.
The memory is allocated in on the master lcore's socket, but it is possible to extend this code to allocate one mbuf pool per socket.
-Two callback pointers are also given to the rte_mempool_create() function:
-
-* The first callback pointer is to rte_pktmbuf_pool_init() and is used to initialize the private data of the mempool,
- which is needed by the driver.
- This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-* The second callback pointer given to rte_mempool_create() is the mbuf initializer.
-
-The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
-If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
-a new function derived from rte_pktmbuf_init() can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
Ports Configuration and Pairing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -357,27 +352,37 @@ This is done using the following code:
/* Process each port round robin style */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- ring = rings[lcore_id][port_id];
-
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Enqueue received packets on the RX ring */
-
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, *quota);
-
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ ring = rings[lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
For each port in the port mask, the corresponding ring's pointer is fetched into ring and that ring's state is checked:
@@ -398,30 +403,40 @@ This thread is running on most of the logical cores to create and arbitrarily lo
previous_lcore_id = get_previous_lcore_id(lcore_id);
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Dequeue up to quota mbuf from rx */
-
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
-
- if (unlikely(nb_dq_pkts < 0))
- continue;
-
- /* Enqueue them on tx */
-
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
+
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
The thread's logic works mostly like receive_stage(),
@@ -490,5 +505,6 @@ low_watermark from the rte_memzone previously created by qw.
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/doc/guides/sample_app_ug/rxtx_callbacks.rst b/doc/guides/sample_app_ug/rxtx_callbacks.rst
index 9df57ed5..8d1bb86f 100644
--- a/doc/guides/sample_app_ug/rxtx_callbacks.rst
+++ b/doc/guides/sample_app_ug/rxtx_callbacks.rst
@@ -85,7 +85,7 @@ To run the example in a ``linuxapp`` environment:
.. code-block:: console
- ./build/rxtx_callbacks -c 2 -n 4
+ ./build/rxtx_callbacks -l 1 -n 4
Refer to *DPDK Getting Started Guide* for general information on running
applications and the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/sample_app_ug/server_node_efd.rst b/doc/guides/sample_app_ug/server_node_efd.rst
new file mode 100644
index 00000000..c2a5f20a
--- /dev/null
+++ b/doc/guides/sample_app_ug/server_node_efd.rst
@@ -0,0 +1,494 @@
+.. BSD LICENSE
+ Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Server-Node EFD Sample Application
+==================================
+
+This sample application demonstrates the use of EFD library as a flow-level
+load balancer, for more information about the EFD Library please refer to the
+DPDK programmer's guide.
+
+This sample application is a variant of the
+:ref:`client-server sample application <multi_process_app>`
+where a specific target node is specified for every and each flow
+(not in a round-robin fashion as the original load balancing sample application).
+
+Overview
+--------
+
+The architecture of the EFD flow-based load balancer sample application is
+presented in the following figure.
+
+.. _figure_efd_sample_app_overview:
+
+.. figure:: img/server_node_efd.*
+
+ Using EFD as a Flow-Level Load Balancer
+
+As shown in :numref:`figure_efd_sample_app_overview`,
+the sample application consists of a front-end node (server)
+using the EFD library to create a load-balancing table for flows,
+for each flow a target backend worker node is specified. The EFD table does not
+store the flow key (unlike a regular hash table), and hence, it can
+individually load-balance millions of flows (number of targets * maximum number
+of flows fit in a flow table per target) while still fitting in CPU cache.
+
+It should be noted that although they are referred to as nodes, the frontend
+server and worker nodes are processes running on the same platform.
+
+Front-end Server
+~~~~~~~~~~~~~~~~
+
+Upon initializing, the frontend server node (process) creates a flow
+distributor table (based on the EFD library) which is populated with flow
+information and its intended target node.
+
+The sample application assigns a specific target node_id (process) for each of
+the IP destination addresses as follows:
+
+.. code-block:: c
+
+ node_id = i % num_nodes; /* Target node id is generated */
+ ip_dst = rte_cpu_to_be_32(i); /* Specific ip destination address is
+ assigned to this target node */
+
+then the pair of <key,target> is inserted into the flow distribution table.
+
+The main loop of the server process receives a burst of packets, then for
+each packet, a flow key (IP destination address) is extracted. The flow
+distributor table is looked up and the target node id is returned. Packets are
+then enqueued to the specified target node id.
+
+It should be noted that flow distributor table is not a membership test table.
+I.e. if the key has already been inserted the target node id will be correct,
+but for new keys the flow distributor table will return a value (which can be
+valid).
+
+Backend Worker Nodes
+~~~~~~~~~~~~~~~~~~~~
+
+Upon initializing, the worker node (process) creates a flow table (a regular
+hash table that stores the key default size 1M flows) which is populated with
+only the flow information that is serviced at this node. This flow key is
+essential to point out new keys that have not been inserted before.
+
+The worker node's main loop is simply receiving packets then doing a hash table
+lookup. If a match occurs then statistics are updated for flows serviced by
+this node. If no match is found in the local hash table then this indicates
+that this is a new flow, which is dropped.
+
+
+Compiling the Application
+-------------------------
+
+The sequence of steps used to build the application is:
+
+#. Export the required environment variables:
+
+ .. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+
+#. Build the application executable file:
+
+ .. code-block:: console
+
+ cd ${RTE_SDK}/examples/server_node_efd/
+ make
+
+ For more details on how to build the DPDK libraries and sample
+ applications,
+ please refer to the *DPDK Getting Started Guide.*
+
+
+Running the Application
+-----------------------
+
+The application has two binaries to be run: the front-end server
+and the back-end node.
+
+The frontend server (server) has the following command line options::
+
+ ./server [EAL options] -- -p PORTMASK -n NUM_NODES -f NUM_FLOWS
+
+Where,
+
+* ``-p PORTMASK:`` Hexadecimal bitmask of ports to configure
+* ``-n NUM_NODES:`` Number of back-end nodes that will be used
+* ``-f NUM_FLOWS:`` Number of flows to be added in the EFD table (1 million, by default)
+
+The back-end node (node) has the following command line options::
+
+ ./node [EAL options] -- -n NODE_ID
+
+Where,
+
+* ``-n NODE_ID:`` Node ID, which cannot be equal or higher than NUM_MODES
+
+
+First, the server app must be launched, with the number of nodes that will be run.
+Once it has been started, the node instances can be run, with different NODE_ID.
+These instances have to be run as secondary processes, with ``--proc-type=secondary``
+in the EAL options, which will attach to the primary process memory, and therefore,
+they can access the queues created by the primary process to distribute packets.
+
+To successfully run the application, the command line used to start the
+application has to be in sync with the traffic flows configured on the traffic
+generator side.
+
+For examples of application command lines and traffic generator flows, please
+refer to the DPDK Test Report. For more details on how to set up and run the
+sample applications provided with DPDK package, please refer to the
+:ref:`DPDK Getting Started Guide for Linux <linux_gsg>` and
+:ref:`DPDK Getting Started Guide for FreeBSD <freebsd_gsg>`.
+
+
+Explanation
+-----------
+
+As described in previous sections, there are two processes in this example.
+
+The first process, the front-end server, creates and populates the EFD table,
+which is used to distribute packets to nodes, which the number of flows
+specified in the command line (1 million, by default).
+
+
+.. code-block:: c
+
+ static void
+ create_efd_table(void)
+ {
+ uint8_t socket_id = rte_socket_id();
+
+ /* create table */
+ efd_table = rte_efd_create("flow table", num_flows * 2, sizeof(uint32_t),
+ 1 << socket_id, socket_id);
+
+ if (efd_table == NULL)
+ rte_exit(EXIT_FAILURE, "Problem creating the flow table\n");
+ }
+
+ static void
+ populate_efd_table(void)
+ {
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint8_t socket_id = rte_socket_id();
+ uint64_t node_id;
+
+ /* Add flows in table */
+ for (i = 0; i < num_flows; i++) {
+ node_id = i % num_nodes;
+
+ ip_dst = rte_cpu_to_be_32(i);
+ ret = rte_efd_update(efd_table, socket_id,
+ (void *)&ip_dst, (efd_value_t)node_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
+ "EFD table\n", i);
+ }
+
+ printf("EFD table: Adding 0x%x keys\n", num_flows);
+ }
+
+After initialization, packets are received from the enabled ports, and the IPv4
+address from the packets is used as a key to look up in the EFD table,
+which tells the node where the packet has to be distributed.
+
+.. code-block:: c
+
+ static void
+ process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
+ uint16_t rx_count, unsigned int socket_id)
+ {
+ uint16_t i;
+ uint8_t node;
+ efd_value_t data[EFD_BURST_MAX];
+ const void *key_ptrs[EFD_BURST_MAX];
+
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[EFD_BURST_MAX];
+
+ for (i = 0; i < rx_count; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = (void *)&ipv4_dst_ip[i];
+ }
+
+ rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
+ (const void **) key_ptrs, data);
+ for (i = 0; i < rx_count; i++) {
+ node = (uint8_t) ((uintptr_t)data[i]);
+
+ if (node >= num_nodes) {
+ /*
+ * Node is out of range, which means that
+ * flow has not been inserted
+ */
+ flow_dist_stats.drop++;
+ rte_pktmbuf_free(pkts[i]);
+ } else {
+ flow_dist_stats.distributed++;
+ enqueue_rx_packet(node, pkts[i]);
+ }
+ }
+
+ for (i = 0; i < num_nodes; i++)
+ flush_rx_queue(i);
+ }
+
+The burst of packets received is enqueued in temporary buffers (per node),
+and enqueued in the shared ring between the server and the node.
+After this, a new burst of packets is received and this process is
+repeated infinitely.
+
+.. code-block:: c
+
+ static void
+ flush_rx_queue(uint16_t node)
+ {
+ uint16_t j;
+ struct node *cl;
+
+ if (cl_rx_buf[node].count == 0)
+ return;
+
+ cl = &nodes[node];
+ if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
+ for (j = 0; j < cl_rx_buf[node].count; j++)
+ rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
+ cl->stats.rx_drop += cl_rx_buf[node].count;
+ } else
+ cl->stats.rx += cl_rx_buf[node].count;
+
+ cl_rx_buf[node].count = 0;
+ }
+
+The second process, the back-end node, receives the packets from the shared
+ring with the server and send them out, if they belong to the node.
+
+At initialization, it attaches to the server process memory, to have
+access to the shared ring, parameters and statistics.
+
+.. code-block:: c
+
+ rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
+ if (rx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
+ "is server process running?\n");
+
+ mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
+ if (mp == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
+
+ mz = rte_memzone_lookup(MZ_SHARED_INFO);
+ if (mz == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
+ info = mz->addr;
+ tx_stats = &(info->tx_stats[node_id]);
+ filter_stats = &(info->filter_stats[node_id]);
+
+Then, the hash table that contains the flows that will be handled
+by the node is created and populated.
+
+.. code-block:: c
+
+ static struct rte_hash *
+ create_hash_table(const struct shared_info *info)
+ {
+ uint32_t num_flows_node = info->num_flows / info->num_nodes;
+ char name[RTE_HASH_NAMESIZE];
+ struct rte_hash *h;
+
+ /* create table */
+ struct rte_hash_parameters hash_params = {
+ .entries = num_flows_node * 2, /* table load = 50% */
+ .key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
+ .socket_id = rte_socket_id(),
+ .hash_func_init_val = 0,
+ };
+
+ snprintf(name, sizeof(name), "hash_table_%d", node_id);
+ hash_params.name = name;
+ h = rte_hash_create(&hash_params);
+
+ if (h == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Problem creating the hash table for node %d\n",
+ node_id);
+ return h;
+ }
+
+ static void
+ populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
+ {
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint32_t num_flows_node = 0;
+ uint64_t target_node;
+
+ /* Add flows in table */
+ for (i = 0; i < info->num_flows; i++) {
+ target_node = i % info->num_nodes;
+ if (target_node != node_id)
+ continue;
+
+ ip_dst = rte_cpu_to_be_32(i);
+
+ ret = rte_hash_add_key(h, (void *) &ip_dst);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u "
+ "in hash table\n", i);
+ else
+ num_flows_node++;
+
+ }
+
+ printf("Hash table: Adding 0x%x keys\n", num_flows_node);
+ }
+
+After initialization, packets are dequeued from the shared ring
+(from the server) and, like in the server process,
+the IPv4 address from the packets is used as a key to look up in the hash table.
+If there is a hit, packet is stored in a buffer, to be eventually transmitted
+in one of the enabled ports. If key is not there, packet is dropped, since the
+flow is not handled by the node.
+
+.. code-block:: c
+
+ static inline void
+ handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
+ {
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[PKT_READ_SIZE];
+ const void *key_ptrs[PKT_READ_SIZE];
+ unsigned int i;
+ int32_t positions[PKT_READ_SIZE] = {0};
+
+ for (i = 0; i < num_packets; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = &ipv4_dst_ip[i];
+ }
+ /* Check if packets belongs to any flows handled by this node */
+ rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
+
+ for (i = 0; i < num_packets; i++) {
+ if (likely(positions[i] >= 0)) {
+ filter_stats->passed++;
+ transmit_packet(bufs[i]);
+ } else {
+ filter_stats->drop++;
+ /* Drop packet, as flow is not handled by this node */
+ rte_pktmbuf_free(bufs[i]);
+ }
+ }
+ }
+
+Finally, note that both processes updates statistics, such as transmitted, received
+and dropped packets, which are shown and refreshed by the server app.
+
+.. code-block:: c
+
+ static void
+ do_stats_display(void)
+ {
+ unsigned int i, j;
+ const char clr[] = {27, '[', '2', 'J', '\0'};
+ const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
+ uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
+ uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
+
+ /* to get TX stats, we need to do some summing calculations */
+ memset(port_tx, 0, sizeof(port_tx));
+ memset(port_tx_drop, 0, sizeof(port_tx_drop));
+ memset(node_tx, 0, sizeof(node_tx));
+ memset(node_tx_drop, 0, sizeof(node_tx_drop));
+
+ for (i = 0; i < num_nodes; i++) {
+ const struct tx_stats *tx = &info->tx_stats[i];
+
+ for (j = 0; j < info->num_ports; j++) {
+ const uint64_t tx_val = tx->tx[info->id[j]];
+ const uint64_t drop_val = tx->tx_drop[info->id[j]];
+
+ port_tx[j] += tx_val;
+ port_tx_drop[j] += drop_val;
+ node_tx[i] += tx_val;
+ node_tx_drop[i] += drop_val;
+ }
+ }
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("PORTS\n");
+ printf("-----\n");
+ for (i = 0; i < info->num_ports; i++)
+ printf("Port %u: '%s'\t", (unsigned int)info->id[i],
+ get_printable_mac_addr(info->id[i]));
+ printf("\n\n");
+ for (i = 0; i < info->num_ports; i++) {
+ printf("Port %u - rx: %9"PRIu64"\t"
+ "tx: %9"PRIu64"\n",
+ (unsigned int)info->id[i], info->rx_stats.rx[i],
+ port_tx[i]);
+ }
+
+ printf("\nSERVER\n");
+ printf("-----\n");
+ printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
+ flow_dist_stats.distributed, flow_dist_stats.drop);
+
+ printf("\nNODES\n");
+ printf("-------\n");
+ for (i = 0; i < num_nodes; i++) {
+ const unsigned long long rx = nodes[i].stats.rx;
+ const unsigned long long rx_drop = nodes[i].stats.rx_drop;
+ const struct filter_stats *filter = &info->filter_stats[i];
+
+ printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
+ " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
+ " filter_passed: %9"PRIu64", "
+ "filter_drop: %9"PRIu64"\n",
+ i, rx, rx_drop, node_tx[i], node_tx_drop[i],
+ filter->passed, filter->drop);
+ }
+
+ printf("\n");
+ }
diff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst
index e832c132..c39b0332 100644
--- a/doc/guides/sample_app_ug/skeleton.rst
+++ b/doc/guides/sample_app_ug/skeleton.rst
@@ -74,7 +74,7 @@ To run the example in a ``linuxapp`` environment:
.. code-block:: console
- ./build/basicfwd -c 2 -n 4
+ ./build/basicfwd -l 1 -n 4
Refer to *DPDK Getting Started Guide* for general information on running
applications and the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/sample_app_ug/tep_termination.rst b/doc/guides/sample_app_ug/tep_termination.rst
index 88e08cf9..087823b1 100644
--- a/doc/guides/sample_app_ug/tep_termination.rst
+++ b/doc/guides/sample_app_ug/tep_termination.rst
@@ -169,7 +169,7 @@ Running the Sample Code
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
-p 0x1 --dev-basename tep-termination --nb-devices 4
--udp-port 4789 --filter-type 1
@@ -191,7 +191,7 @@ The default value is 2.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 2
**Tunneling UDP port.**
@@ -201,7 +201,7 @@ The default value is 4789.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 2 --udp-port 4789
**Filter Type.**
@@ -212,7 +212,7 @@ The default value is 1, which means the filter type of inner MAC and tenant ID i
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 2 --udp-port 4789 --filter-type 1
**TX Checksum.**
@@ -222,7 +222,7 @@ The default value is 0, which means the checksum offload is disabled.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 2 --tx-checksum
**TCP segment size.**
@@ -232,7 +232,7 @@ The default value is 0, which means TSO offload is disabled.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--tx-checksum --tso-segsz 800
**Decapsulation option.**
@@ -242,7 +242,7 @@ The default value is 1.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 4 --udp-port 4789 --decap 1
**Encapsulation option.**
@@ -252,7 +252,7 @@ The default value is 1.
.. code-block:: console
- user@target:~$ ./build/app/tep_termination -c f -n 4 --huge-dir /mnt/huge --
+ user@target:~$ ./build/app/tep_termination -l 0-3 -n 4 --huge-dir /mnt/huge --
--nb-devices 4 --udp-port 4789 --encap 1
diff --git a/doc/guides/sample_app_ug/test_pipeline.rst b/doc/guides/sample_app_ug/test_pipeline.rst
index fd288139..95c7e0fc 100644
--- a/doc/guides/sample_app_ug/test_pipeline.rst
+++ b/doc/guides/sample_app_ug/test_pipeline.rst
@@ -90,7 +90,7 @@ The application execution command line is:
./test-pipeline [EAL options] -- -p PORTMASK --TABLE_TYPE
-The -c EAL CPU core mask option has to contain exactly 3 CPU cores.
+The -c or -l EAL CPU coremask/corelist option has to contain exactly 3 CPU cores.
The first CPU core in the core mask is assigned for core A, the second for core B and the third for core C.
The PORTMASK parameter must contain 2 or 4 ports.
diff --git a/doc/guides/sample_app_ug/timer.rst b/doc/guides/sample_app_ug/timer.rst
index e4de359d..00b69532 100644
--- a/doc/guides/sample_app_ug/timer.rst
+++ b/doc/guides/sample_app_ug/timer.rst
@@ -65,7 +65,7 @@ To run the example in linuxapp environment:
.. code-block:: console
- $ ./build/timer -c f -n 4
+ $ ./build/timer -l 0-3 -n 4
Refer to the *DPDK Getting Started Guide* for general information on running applications and
the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index 1f6d0d96..a2a3909e 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -75,7 +75,7 @@ Start the vswitch example
.. code-block:: console
- ./vhost-switch -c f -n 4 --socket-mem 1024 \
+ ./vhost-switch -l 0-3 -n 4 --socket-mem 1024 \
-- --socket-file /tmp/sock0 --client \
...
@@ -115,13 +115,13 @@ could be done by:
.. code-block:: console
modprobe uio_pci_generic
- $RTE_SDK/tools/dpdk-devbind.py -b=uio_pci_generic 0000:00:04.0
+ $RTE_SDK/usertools/dpdk-devbind.py -b=uio_pci_generic 0000:00:04.0
Then start testpmd for packet forwarding testing.
.. code-block:: console
- ./x86_64-native-gcc/app/testpmd -c 0x3 -- -i
+ ./x86_64-native-gcc/app/testpmd -l 0-1 -- -i
> start tx_first
Inject packets
diff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst
index aa02c156..05c26b03 100644
--- a/doc/guides/sample_app_ug/vm_power_management.rst
+++ b/doc/guides/sample_app_ug/vm_power_management.rst
@@ -220,7 +220,7 @@ on cores 0 & 1 on a system with 4 memory channels:
.. code-block:: console
- ./build/vm_power_mgr -c 0x3 -n 4
+ ./build/vm_power_mgr -l 0-1 -n 4
After successful initialization the user is presented with VM Power Manager CLI:
@@ -342,7 +342,7 @@ for example to run on cores 0,1,2,3 on a system with 4 memory channels:
.. code-block:: console
- ./build/guest_vm_power_mgr -c 0xf -n 4
+ ./build/guest_vm_power_mgr -l 0-3 -n 4
After successful initialization the user is presented with VM Power Manager Guest CLI:
diff --git a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
index bf55fdad..70e1d19e 100644
--- a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
@@ -114,7 +114,7 @@ To run the example in a linuxapp environment:
.. code-block:: console
- user@target:~$ ./build/vmdq_dcb -c f -n 4 -- -p 0x3 --nb-pools 32 --nb-tcs 4
+ user@target:~$ ./build/vmdq_dcb -l 0-3 -n 4 -- -p 0x3 --nb-pools 32 --nb-tcs 4
Refer to the *DPDK Getting Started Guide* for general information on running applications and
the Environment Abstraction Layer (EAL) options.
diff --git a/doc/guides/testpmd_app_ug/index.rst b/doc/guides/testpmd_app_ug/index.rst
index 1c39a177..61a91218 100644
--- a/doc/guides/testpmd_app_ug/index.rst
+++ b/doc/guides/testpmd_app_ug/index.rst
@@ -28,6 +28,8 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.. _testpmd_ug:
+
Testpmd Application User Guide
==============================
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index d7c51209..2a43214e 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -72,7 +72,7 @@ See the DPDK Getting Started Guides for more information on these options.
* ``-b, --pci-blacklist domain:bus:devid.func``
- Blacklist a PCI devise to prevent EAL from using it. Multiple -b options are allowed.
+ Blacklist a PCI device to prevent EAL from using it. Multiple -b options are allowed.
* ``-d LIB.so``
@@ -165,7 +165,7 @@ They must be separated from the EAL options, shown in the previous section, with
.. code-block:: console
- sudo ./testpmd -c 0xF -n 4 -- -i --portmask=0x1 --nb-cores=2
+ sudo ./testpmd -l 0-3 -n 4 -- -i --portmask=0x1 --nb-cores=2
The commandline options are:
@@ -211,7 +211,12 @@ The commandline options are:
* ``--numa``
- Enable NUMA-aware allocation of RX/TX rings and of RX memory buffers (mbufs).
+ Enable NUMA-aware allocation of RX/TX rings and of RX memory buffers
+ (mbufs). [Default setting]
+
+* ``--no-numa``
+
+ Disable NUMA-aware allocation of RX/TX rings and of RX memory buffers (mbufs).
* ``--port-numa-config=(port,socket)[,(port,socket)]``
@@ -281,9 +286,9 @@ The commandline options are:
In perfect filter mode, when a rule is added with queue = -1, the packet will be enqueued into the RX drop-queue.
If the drop-queue does not exist, the packet is dropped. The default value is N=127.
-* ``--crc-strip``
+* ``--disable-crc-strip``
- Enable hardware CRC stripping.
+ Disable hardware CRC stripping.
* ``--enable-lro``
@@ -460,3 +465,25 @@ The commandline options are:
* ``--disable-link-check``
Disable check on link status when starting/stopping ports.
+
+* ``--no-lsc-interrupt``
+
+ Disable LSC interrupts for all ports, even those supporting it.
+
+* ``--no-rmv-interrupt``
+
+ Disable RMV interrupts for all ports, even those supporting it.
+
+* ``--bitrate-stats=N``
+
+ Set the logical core N to perform bitrate calculation.
+
+* ``--print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>``
+
+ Enable printing the occurrence of the designated event. Using all will
+ enable all of them.
+
+* ``--mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|all>``
+
+ Disable printing the occurrence of the designated event. Using all will
+ disable all of them.
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index f1c269a3..0e50c107 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -51,10 +51,10 @@ If you type a partial command and hit ``<TAB>`` you get a list of the available
testpmd> show port <TAB>
- info [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc X
- info [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc all
- stats [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc X
- stats [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc all
+ info [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc|cap X
+ info [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc|cap all
+ stats [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc|cap X
+ stats [Mul-choice STRING]: show|clear port info|stats|xstats|fdir|stat_qmap|dcb_tc|cap all
...
@@ -86,6 +86,61 @@ These are divided into sections and can be accessed using help, help section or
help all : All of the above sections.
+Command File Functions
+----------------------
+
+To facilitate loading large number of commands or to avoid cutting and pasting where not
+practical or possible testpmd supports alternative methods for executing commands.
+
+* If started with the ``--cmdline-file=FILENAME`` command line argument testpmd
+ will execute all CLI commands contained within the file immediately before
+ starting packet forwarding or entering interactive mode.
+
+.. code-block:: console
+
+ ./testpmd -n4 -r2 ... -- -i --cmdline-file=/home/ubuntu/flow-create-commands.txt
+ Interactive-mode selected
+ CLI commands to be read from /home/ubuntu/flow-create-commands.txt
+ Configuring Port 0 (socket 0)
+ Port 0: 7C:FE:90:CB:74:CE
+ Configuring Port 1 (socket 0)
+ Port 1: 7C:FE:90:CB:74:CA
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Port 1 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ Flow rule #0 created
+ Flow rule #1 created
+ ...
+ ...
+ Flow rule #498 created
+ Flow rule #499 created
+ Read all CLI commands from /home/ubuntu/flow-create-commands.txt
+ testpmd>
+
+
+* At run-time additional commands can be loaded in bulk by invoking the ``load FILENAME``
+ command.
+
+.. code-block:: console
+
+ testpmd> load /home/ubuntu/flow-create-commands.txt
+ Flow rule #0 created
+ Flow rule #1 created
+ ...
+ ...
+ Flow rule #498 created
+ Flow rule #499 created
+ Read all CLI commands from /home/ubuntu/flow-create-commands.txt
+ testpmd>
+
+
+In all cases output from any included command will be displayed as standard output.
+Execution will continue until the end of the file is reached regardless of
+whether any errors occur. The end user must examine the output to determine if
+any failures occurred.
+
+
Control Functions
-----------------
@@ -131,7 +186,7 @@ show port
Display information for a given port or all ports::
- testpmd> show port (info|stats|xstats|fdir|stat_qmap|dcb_tc) (port_id|all)
+ testpmd> show port (info|stats|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)
The available information categories are:
@@ -147,6 +202,8 @@ The available information categories are:
* ``dcb_tc``: DCB information such as TC mapping.
+* ``cap``: Supported offload capabilities.
+
For example:
.. code-block:: console
@@ -323,6 +380,19 @@ For example::
testpmd> read txd 0 0 4
0x00000001 - 0x24C3C440 / 0x000F0000 - 0x2330003C
+show vf stats
+~~~~~~~~~~~~~
+
+Display VF statistics::
+
+ testpmd> show vf stats (port_id) (vf_id)
+
+clear vf stats
+~~~~~~~~~~~~~~
+
+Reset VF statistics::
+
+ testpmd> clear vf stats (port_id) (vf_id)
Configuration Functions
-----------------------
@@ -507,6 +577,45 @@ Set mac antispoof for a VF from the PF::
testpmd> set vf mac antispoof (port_id) (vf_id) (on|off)
+set macsec offload
+~~~~~~~~~~~~~~~~~~
+
+Enable/disable MACsec offload::
+
+ testpmd> set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)
+ testpmd> set macsec offload (port_id) off
+
+set macsec sc
+~~~~~~~~~~~~~
+
+Configure MACsec secure connection (SC)::
+
+ testpmd> set macsec sc (tx|rx) (port_id) (mac) (pi)
+
+.. note::
+
+ The pi argument is ignored for tx.
+ Check the NIC Datasheet for hardware limits.
+
+set macsec sa
+~~~~~~~~~~~~~
+
+Configure MACsec secure association (SA)::
+
+ testpmd> set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)
+
+.. note::
+
+ The IDX value must be 0 or 1.
+ Check the NIC Datasheet for hardware limits.
+
+set broadcast mode (for VF)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set broadcast mode for a VF from the PF::
+
+ testpmd> set vf broadcast (port_id) (vf_id) (on|off)
+
vlan set strip
~~~~~~~~~~~~~~
@@ -535,6 +644,13 @@ Set VLAN insert for a VF from the PF::
testpmd> set vf vlan insert (port_id) (vf_id) (vlan_id)
+vlan set tag (for VF)
+~~~~~~~~~~~~~~~~~~~~~
+
+Set VLAN tag for a VF from the PF::
+
+ testpmd> set vf vlan tag (port_id) (vf_id) (on|off)
+
vlan set antispoof (for VF)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -789,6 +905,13 @@ Add an alternative MAC address for a VF to a port::
testpmd> mac_add add port (port_id) vf (vf_id) (XX:XX:XX:XX:XX:XX)
+mac_addr set
+~~~~~~~~~~~~
+
+Set the default MAC address for a port::
+
+ testpmd> mac_addr set (port_id) (XX:XX:XX:XX:XX:XX)
+
mac_addr set (for VF)
~~~~~~~~~~~~~~~~~~~~~
@@ -820,6 +943,59 @@ Set the allmulti mode for a port or for all ports::
Same as the ifconfig (8) option. Controls how multicast packets are handled.
+set promisc (for VF)
+~~~~~~~~~~~~~~~~~~~~
+
+Set the unicast promiscuous mode for a VF from PF.
+It's supported by Intel i40e NICs now.
+In promiscuous mode packets are not dropped if they aren't for the specified MAC address::
+
+ testpmd> set vf promisc (port_id) (vf_id) (on|off)
+
+set allmulticast (for VF)
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set the multicast promiscuous mode for a VF from PF.
+It's supported by Intel i40e NICs now.
+In promiscuous mode packets are not dropped if they aren't for the specified MAC address::
+
+ testpmd> set vf allmulti (port_id) (vf_id) (on|off)
+
+set tx max bandwidth (for VF)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set TX max absolute bandwidth (Mbps) for a VF from PF::
+
+ testpmd> set vf tx max-bandwidth (port_id) (vf_id) (max_bandwidth)
+
+set tc tx min bandwidth (for VF)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set all TCs' TX min relative bandwidth (%) for a VF from PF::
+
+ testpmd> set vf tc tx min-bandwidth (port_id) (vf_id) (bw1, bw2, ...)
+
+set tc tx max bandwidth (for VF)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set a TC's TX max absolute bandwidth (Mbps) for a VF from PF::
+
+ testpmd> set vf tc tx max-bandwidth (port_id) (vf_id) (tc_no) (max_bandwidth)
+
+set tc strict link priority mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Set some TCs' strict link priority mode on a physical port::
+
+ testpmd> set tx strict-link-priority (port_id) (tc_bitmap)
+
+set tc tx min bandwidth
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Set all TCs' TX min relative bandwidth (%) globally for all PF and VFs::
+
+ testpmd> set tc tx min-bandwidth (port_id) (bw1, bw2, ...)
+
set flow_ctrl rx
~~~~~~~~~~~~~~~~
@@ -841,7 +1017,7 @@ Where:
* ``mac_ctrl_frame_fwd``: Enable receiving MAC control frames.
-* ``autoneg``: Change the auto-negotiation para mete.
+* ``autoneg``: Change the auto-negotiation parameter.
set pfc_ctrl rx
~~~~~~~~~~~~~~~
@@ -1035,6 +1211,42 @@ Add an E-tag forwarding filter on a port::
Delete an E-tag forwarding filter on a port::
testpmd> E-tag set filter del e-tag-id (value) port (port_id)
+ptype mapping
+~~~~~~~~~~~~~
+
+List all items from the ptype mapping table::
+
+ testpmd> ptype mapping get (port_id) (valid_only)
+
+Where:
+
+* ``valid_only``: A flag indicates if only list valid items(=1) or all itemss(=0).
+
+Replace a specific or a group of software defined ptype with a new one::
+
+ testpmd> ptype mapping replace (port_id) (target) (mask) (pkt_type)
+
+where:
+
+* ``target``: A specific software ptype or a mask to represent a group of software ptypes.
+
+* ``mask``: A flag indicate if "target" is a specific software ptype(=0) or a ptype mask(=1).
+
+* ``pkt_type``: The new software ptype to replace the old ones.
+
+Update hardware defined ptype to software defined packet type mapping table::
+
+ testpmd> ptype mapping update (port_id) (hw_ptype) (sw_ptype)
+
+where:
+
+* ``hw_ptype``: hardware ptype as the index of the ptype mapping table.
+
+* ``sw_ptype``: software ptype as the value of the ptype mapping table.
+
+Reset ptype mapping table::
+
+ testpmd> ptype mapping reset (port_id)
Port Functions
--------------
@@ -1061,7 +1273,7 @@ For example, to move a pci device using ixgbe under DPDK management:
.. code-block:: console
# Check the status of the available devices.
- ./tools/dpdk-devbind.py --status
+ ./usertools/dpdk-devbind.py --status
Network devices using DPDK-compatible driver
============================================
@@ -1073,11 +1285,11 @@ For example, to move a pci device using ixgbe under DPDK management:
# Bind the device to igb_uio.
- sudo ./tools/dpdk-devbind.py -b igb_uio 0000:0a:00.0
+ sudo ./usertools/dpdk-devbind.py -b igb_uio 0000:0a:00.0
# Recheck the status of the devices.
- ./tools/dpdk-devbind.py --status
+ ./usertools/dpdk-devbind.py --status
Network devices using DPDK-compatible driver
============================================
0000:0a:00.0 '82599ES 10-Gigabit' drv=igb_uio unused=
@@ -1180,9 +1392,9 @@ For example, to move a pci device under kernel management:
.. code-block:: console
- sudo ./tools/dpdk-devbind.py -b ixgbe 0000:0a:00.0
+ sudo ./usertools/dpdk-devbind.py -b ixgbe 0000:0a:00.0
- ./tools/dpdk-devbind.py --status
+ ./usertools/dpdk-devbind.py --status
Network devices using DPDK-compatible driver
============================================
@@ -1257,9 +1469,9 @@ Set hardware CRC stripping on or off for all ports::
testpmd> port config all crc-strip (on|off)
-CRC stripping is off by default.
+CRC stripping is on by default.
-The ``on`` option is equivalent to the ``--crc-strip`` command-line option.
+The ``off`` option is equivalent to the ``--disable-crc-strip`` command-line option.
port config - scatter
~~~~~~~~~~~~~~~~~~~~~~~
@@ -1631,6 +1843,9 @@ Filter Functions
This section details the available filter functions that are available.
+Note these functions interface the deprecated legacy filtering framework,
+superseded by *rte_flow*. See `Flow rules management`_.
+
ethertype_filter
~~~~~~~~~~~~~~~~~~~~
@@ -2041,3 +2256,690 @@ Set different GRE key length for input set::
For example to set GRE key length for input set to 4 bytes on port 0::
testpmd> global_config 0 gre-key-len 4
+
+
+.. _testpmd_rte_flow:
+
+Flow rules management
+---------------------
+
+Control of the generic flow API (*rte_flow*) is fully exposed through the
+``flow`` command (validation, creation, destruction and queries).
+
+Considering *rte_flow* overlaps with all `Filter Functions`_, using both
+features simultaneously may cause undefined side-effects and is therefore
+not recommended.
+
+``flow`` syntax
+~~~~~~~~~~~~~~~
+
+Because the ``flow`` command uses dynamic tokens to handle the large number
+of possible flow rules combinations, its behavior differs slightly from
+other commands, in particular:
+
+- Pressing *?* or the *<tab>* key displays contextual help for the current
+ token, not that of the entire command.
+
+- Optional and repeated parameters are supported (provided they are listed
+ in the contextual help).
+
+The first parameter stands for the operation mode. Possible operations and
+their general syntax are described below. They are covered in detail in the
+following sections.
+
+- Check whether a flow rule can be created::
+
+ flow validate {port_id}
+ [group {group_id}] [priority {level}] [ingress] [egress]
+ pattern {item} [/ {item} [...]] / end
+ actions {action} [/ {action} [...]] / end
+
+- Create a flow rule::
+
+ flow create {port_id}
+ [group {group_id}] [priority {level}] [ingress] [egress]
+ pattern {item} [/ {item} [...]] / end
+ actions {action} [/ {action} [...]] / end
+
+- Destroy specific flow rules::
+
+ flow destroy {port_id} rule {rule_id} [...]
+
+- Destroy all flow rules::
+
+ flow flush {port_id}
+
+- Query an existing flow rule::
+
+ flow query {port_id} {rule_id} {action}
+
+- List existing flow rules sorted by priority, filtered by group
+ identifiers::
+
+ flow list {port_id} [group {group_id}] [...]
+
+Validating flow rules
+~~~~~~~~~~~~~~~~~~~~~
+
+``flow validate`` reports whether a flow rule would be accepted by the
+underlying device in its current state but stops short of creating it. It is
+bound to ``rte_flow_validate()``::
+
+ flow validate {port_id}
+ [group {group_id}] [priority {level}] [ingress] [egress]
+ pattern {item} [/ {item} [...]] / end
+ actions {action} [/ {action} [...]] / end
+
+If successful, it will show::
+
+ Flow rule validated
+
+Otherwise it will show an error message of the form::
+
+ Caught error type [...] ([...]): [...]
+
+This command uses the same parameters as ``flow create``, their format is
+described in `Creating flow rules`_.
+
+Check whether redirecting any Ethernet packet received on port 0 to RX queue
+index 6 is supported::
+
+ testpmd> flow validate 0 ingress pattern eth / end
+ actions queue index 6 / end
+ Flow rule validated
+ testpmd>
+
+Port 0 does not support TCPv6 rules::
+
+ testpmd> flow validate 0 ingress pattern eth / ipv6 / tcp / end
+ actions drop / end
+ Caught error type 9 (specific pattern item): Invalid argument
+ testpmd>
+
+Creating flow rules
+~~~~~~~~~~~~~~~~~~~
+
+``flow create`` validates and creates the specified flow rule. It is bound
+to ``rte_flow_create()``::
+
+ flow create {port_id}
+ [group {group_id}] [priority {level}] [ingress] [egress]
+ pattern {item} [/ {item} [...]] / end
+ actions {action} [/ {action} [...]] / end
+
+If successful, it will return a flow rule ID usable with other commands::
+
+ Flow rule #[...] created
+
+Otherwise it will show an error message of the form::
+
+ Caught error type [...] ([...]): [...]
+
+Parameters describe in the following order:
+
+- Attributes (*group*, *priority*, *ingress*, *egress* tokens).
+- A matching pattern, starting with the *pattern* token and terminated by an
+ *end* pattern item.
+- Actions, starting with the *actions* token and terminated by an *end*
+ action.
+
+These translate directly to *rte_flow* objects provided as-is to the
+underlying functions.
+
+The shortest valid definition only comprises mandatory tokens::
+
+ testpmd> flow create 0 pattern end actions end
+
+Note that PMDs may refuse rules that essentially do nothing such as this
+one.
+
+**All unspecified object values are automatically initialized to 0.**
+
+Attributes
+^^^^^^^^^^
+
+These tokens affect flow rule attributes (``struct rte_flow_attr``) and are
+specified before the ``pattern`` token.
+
+- ``group {group id}``: priority group.
+- ``priority {level}``: priority level within group.
+- ``ingress``: rule applies to ingress traffic.
+- ``egress``: rule applies to egress traffic.
+
+Each instance of an attribute specified several times overrides the previous
+value as shown below (group 4 is used)::
+
+ testpmd> flow create 0 group 42 group 24 group 4 [...]
+
+Note that once enabled, ``ingress`` and ``egress`` cannot be disabled.
+
+While not specifying a direction is an error, some rules may allow both
+simultaneously.
+
+Most rules affect RX therefore contain the ``ingress`` token::
+
+ testpmd> flow create 0 ingress pattern [...]
+
+Matching pattern
+^^^^^^^^^^^^^^^^
+
+A matching pattern starts after the ``pattern`` token. It is made of pattern
+items and is terminated by a mandatory ``end`` item.
+
+Items are named after their type (*RTE_FLOW_ITEM_TYPE_* from ``enum
+rte_flow_item_type``).
+
+The ``/`` token is used as a separator between pattern items as shown
+below::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 / udp / end [...]
+
+Note that protocol items like these must be stacked from lowest to highest
+layer to make sense. For instance, the following rule is either invalid or
+unlikely to match any packet::
+
+ testpmd> flow create 0 ingress pattern eth / udp / ipv4 / end [...]
+
+More information on these restrictions can be found in the *rte_flow*
+documentation.
+
+Several items support additional specification structures, for example
+``ipv4`` allows specifying source and destination addresses as follows::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 src is 10.1.1.1
+ dst is 10.2.0.0 / end [...]
+
+This rule matches all IPv4 traffic with the specified properties.
+
+In this example, ``src`` and ``dst`` are field names of the underlying
+``struct rte_flow_item_ipv4`` object. All item properties can be specified
+in a similar fashion.
+
+The ``is`` token means that the subsequent value must be matched exactly,
+and assigns ``spec`` and ``mask`` fields in ``struct rte_flow_item``
+accordingly. Possible assignment tokens are:
+
+- ``is``: match value perfectly (with full bit-mask).
+- ``spec``: match value according to configured bit-mask.
+- ``last``: specify upper bound to establish a range.
+- ``mask``: specify bit-mask with relevant bits set to one.
+- ``prefix``: generate bit-mask from a prefix length.
+
+These yield identical results::
+
+ ipv4 src is 10.1.1.1
+
+::
+
+ ipv4 src spec 10.1.1.1 src mask 255.255.255.255
+
+::
+
+ ipv4 src spec 10.1.1.1 src prefix 32
+
+::
+
+ ipv4 src is 10.1.1.1 src last 10.1.1.1 # range with a single value
+
+::
+
+ ipv4 src is 10.1.1.1 src last 0 # 0 disables range
+
+Inclusive ranges can be defined with ``last``::
+
+ ipv4 src is 10.1.1.1 src last 10.2.3.4 # 10.1.1.1 to 10.2.3.4
+
+Note that ``mask`` affects both ``spec`` and ``last``::
+
+ ipv4 src is 10.1.1.1 src last 10.2.3.4 src mask 255.255.0.0
+ # matches 10.1.0.0 to 10.2.255.255
+
+Properties can be modified multiple times::
+
+ ipv4 src is 10.1.1.1 src is 10.1.2.3 src is 10.2.3.4 # matches 10.2.3.4
+
+::
+
+ ipv4 src is 10.1.1.1 src prefix 24 src prefix 16 # matches 10.1.0.0/16
+
+Pattern items
+^^^^^^^^^^^^^
+
+This section lists supported pattern items and their attributes, if any.
+
+- ``end``: end list of pattern items.
+
+- ``void``: no-op pattern item.
+
+- ``invert``: perform actions when pattern does not match.
+
+- ``any``: match any protocol for the current layer.
+
+ - ``num {unsigned}``: number of layers covered.
+
+- ``pf``: match packets addressed to the physical function.
+
+- ``vf``: match packets addressed to a virtual function ID.
+
+ - ``id {unsigned}``: destination VF ID.
+
+- ``port``: device-specific physical port index to use.
+
+ - ``index {unsigned}``: physical port index.
+
+- ``raw``: match an arbitrary byte string.
+
+ - ``relative {boolean}``: look for pattern after the previous item.
+ - ``search {boolean}``: search pattern from offset (see also limit).
+ - ``offset {integer}``: absolute or relative offset for pattern.
+ - ``limit {unsigned}``: search area limit for start of pattern.
+ - ``pattern {string}``: byte string to look for.
+
+- ``eth``: match Ethernet header.
+
+ - ``dst {MAC-48}``: destination MAC.
+ - ``src {MAC-48}``: source MAC.
+ - ``type {unsigned}``: EtherType.
+
+- ``vlan``: match 802.1Q/ad VLAN tag.
+
+ - ``tpid {unsigned}``: tag protocol identifier.
+ - ``tci {unsigned}``: tag control information.
+ - ``pcp {unsigned}``: priority code point.
+ - ``dei {unsigned}``: drop eligible indicator.
+ - ``vid {unsigned}``: VLAN identifier.
+
+- ``ipv4``: match IPv4 header.
+
+ - ``tos {unsigned}``: type of service.
+ - ``ttl {unsigned}``: time to live.
+ - ``proto {unsigned}``: next protocol ID.
+ - ``src {ipv4 address}``: source address.
+ - ``dst {ipv4 address}``: destination address.
+
+- ``ipv6``: match IPv6 header.
+
+ - ``tc {unsigned}``: traffic class.
+ - ``flow {unsigned}``: flow label.
+ - ``proto {unsigned}``: protocol (next header).
+ - ``hop {unsigned}``: hop limit.
+ - ``src {ipv6 address}``: source address.
+ - ``dst {ipv6 address}``: destination address.
+
+- ``icmp``: match ICMP header.
+
+ - ``type {unsigned}``: ICMP packet type.
+ - ``code {unsigned}``: ICMP packet code.
+
+- ``udp``: match UDP header.
+
+ - ``src {unsigned}``: UDP source port.
+ - ``dst {unsigned}``: UDP destination port.
+
+- ``tcp``: match TCP header.
+
+ - ``src {unsigned}``: TCP source port.
+ - ``dst {unsigned}``: TCP destination port.
+
+- ``sctp``: match SCTP header.
+
+ - ``src {unsigned}``: SCTP source port.
+ - ``dst {unsigned}``: SCTP destination port.
+ - ``tag {unsigned}``: validation tag.
+ - ``cksum {unsigned}``: checksum.
+
+- ``vxlan``: match VXLAN header.
+
+ - ``vni {unsigned}``: VXLAN identifier.
+
+- ``e_tag``: match IEEE 802.1BR E-Tag header.
+
+ - ``grp_ecid_b {unsigned}``: GRP and E-CID base.
+
+- ``nvgre``: match NVGRE header.
+
+ - ``tni {unsigned}``: virtual subnet ID.
+
+- ``mpls``: match MPLS header.
+
+ - ``label {unsigned}``: MPLS label.
+
+- ``gre``: match GRE header.
+
+ - ``protocol {unsigned}``: protocol type.
+
+Actions list
+^^^^^^^^^^^^
+
+A list of actions starts after the ``actions`` token in the same fashion as
+`Matching pattern`_; actions are separated by ``/`` tokens and the list is
+terminated by a mandatory ``end`` action.
+
+Actions are named after their type (*RTE_FLOW_ACTION_TYPE_* from ``enum
+rte_flow_action_type``).
+
+Dropping all incoming UDPv4 packets can be expressed as follows::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 / udp / end
+ actions drop / end
+
+Several actions have configurable properties which must be specified when
+there is no valid default value. For example, ``queue`` requires a target
+queue index.
+
+This rule redirects incoming UDPv4 traffic to queue index 6::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 / udp / end
+ actions queue index 6 / end
+
+While this one could be rejected by PMDs (unspecified queue index)::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 / udp / end
+ actions queue / end
+
+As defined by *rte_flow*, the list is not ordered, all actions of a given
+rule are performed simultaneously. These are equivalent::
+
+ queue index 6 / void / mark id 42 / end
+
+::
+
+ void / mark id 42 / queue index 6 / end
+
+All actions in a list should have different types, otherwise only the last
+action of a given type is taken into account::
+
+ queue index 4 / queue index 5 / queue index 6 / end # will use queue 6
+
+::
+
+ drop / drop / drop / end # drop is performed only once
+
+::
+
+ mark id 42 / queue index 3 / mark id 24 / end # mark will be 24
+
+Considering they are performed simultaneously, opposite and overlapping
+actions can sometimes be combined when the end result is unambiguous::
+
+ drop / queue index 6 / end # drop has no effect
+
+::
+
+ drop / dup index 6 / end # same as above
+
+::
+
+ queue index 6 / rss queues 6 7 8 / end # queue has no effect
+
+::
+
+ drop / passthru / end # drop has no effect
+
+Note that PMDs may still refuse such combinations.
+
+Actions
+^^^^^^^
+
+This section lists supported actions and their attributes, if any.
+
+- ``end``: end list of actions.
+
+- ``void``: no-op action.
+
+- ``passthru``: let subsequent rule process matched packets.
+
+- ``mark``: attach 32 bit value to packets.
+
+ - ``id {unsigned}``: 32 bit value to return with packets.
+
+- ``flag``: flag packets.
+
+- ``queue``: assign packets to a given queue index.
+
+ - ``index {unsigned}``: queue index to use.
+
+- ``drop``: drop packets (note: passthru has priority).
+
+- ``count``: enable counters for this rule.
+
+- ``dup``: duplicate packets to a given queue index.
+
+ - ``index {unsigned}``: queue index to duplicate packets to.
+
+- ``rss``: spread packets among several queues.
+
+ - ``queues [{unsigned} [...]] end``: queue indices to use.
+
+- ``pf``: redirect packets to physical device function.
+
+- ``vf``: redirect packets to virtual device function.
+
+ - ``original {boolean}``: use original VF ID if possible.
+ - ``id {unsigned}``: VF ID to redirect packets to.
+
+Destroying flow rules
+~~~~~~~~~~~~~~~~~~~~~
+
+``flow destroy`` destroys one or more rules from their rule ID (as returned
+by ``flow create``), this command calls ``rte_flow_destroy()`` as many
+times as necessary::
+
+ flow destroy {port_id} rule {rule_id} [...]
+
+If successful, it will show::
+
+ Flow rule #[...] destroyed
+
+It does not report anything for rule IDs that do not exist. The usual error
+message is shown when a rule cannot be destroyed::
+
+ Caught error type [...] ([...]): [...]
+
+``flow flush`` destroys all rules on a device and does not take extra
+arguments. It is bound to ``rte_flow_flush()``::
+
+ flow flush {port_id}
+
+Any errors are reported as above.
+
+Creating several rules and destroying them::
+
+ testpmd> flow create 0 ingress pattern eth / ipv6 / end
+ actions queue index 2 / end
+ Flow rule #0 created
+ testpmd> flow create 0 ingress pattern eth / ipv4 / end
+ actions queue index 3 / end
+ Flow rule #1 created
+ testpmd> flow destroy 0 rule 0 rule 1
+ Flow rule #1 destroyed
+ Flow rule #0 destroyed
+ testpmd>
+
+The same result can be achieved using ``flow flush``::
+
+ testpmd> flow create 0 ingress pattern eth / ipv6 / end
+ actions queue index 2 / end
+ Flow rule #0 created
+ testpmd> flow create 0 ingress pattern eth / ipv4 / end
+ actions queue index 3 / end
+ Flow rule #1 created
+ testpmd> flow flush 0
+ testpmd>
+
+Non-existent rule IDs are ignored::
+
+ testpmd> flow create 0 ingress pattern eth / ipv6 / end
+ actions queue index 2 / end
+ Flow rule #0 created
+ testpmd> flow create 0 ingress pattern eth / ipv4 / end
+ actions queue index 3 / end
+ Flow rule #1 created
+ testpmd> flow destroy 0 rule 42 rule 10 rule 2
+ testpmd>
+ testpmd> flow destroy 0 rule 0
+ Flow rule #0 destroyed
+ testpmd>
+
+Querying flow rules
+~~~~~~~~~~~~~~~~~~~
+
+``flow query`` queries a specific action of a flow rule having that
+ability. Such actions collect information that can be reported using this
+command. It is bound to ``rte_flow_query()``::
+
+ flow query {port_id} {rule_id} {action}
+
+If successful, it will display either the retrieved data for known actions
+or the following message::
+
+ Cannot display result for action type [...] ([...])
+
+Otherwise, it will complain either that the rule does not exist or that some
+error occurred::
+
+ Flow rule #[...] not found
+
+::
+
+ Caught error type [...] ([...]): [...]
+
+Currently only the ``count`` action is supported. This action reports the
+number of packets that hit the flow rule and the total number of bytes. Its
+output has the following format::
+
+ count:
+ hits_set: [...] # whether "hits" contains a valid value
+ bytes_set: [...] # whether "bytes" contains a valid value
+ hits: [...] # number of packets
+ bytes: [...] # number of bytes
+
+Querying counters for TCPv6 packets redirected to queue 6::
+
+ testpmd> flow create 0 ingress pattern eth / ipv6 / tcp / end
+ actions queue index 6 / count / end
+ Flow rule #4 created
+ testpmd> flow query 0 4 count
+ count:
+ hits_set: 1
+ bytes_set: 0
+ hits: 386446
+ bytes: 0
+ testpmd>
+
+Listing flow rules
+~~~~~~~~~~~~~~~~~~
+
+``flow list`` lists existing flow rules sorted by priority and optionally
+filtered by group identifiers::
+
+ flow list {port_id} [group {group_id}] [...]
+
+This command only fails with the following message if the device does not
+exist::
+
+ Invalid port [...]
+
+Output consists of a header line followed by a short description of each
+flow rule, one per line. There is no output at all when no flow rules are
+configured on the device::
+
+ ID Group Prio Attr Rule
+ [...] [...] [...] [...] [...]
+
+``Attr`` column flags:
+
+- ``i`` for ``ingress``.
+- ``e`` for ``egress``.
+
+Creating several flow rules and listing them::
+
+ testpmd> flow create 0 ingress pattern eth / ipv4 / end
+ actions queue index 6 / end
+ Flow rule #0 created
+ testpmd> flow create 0 ingress pattern eth / ipv6 / end
+ actions queue index 2 / end
+ Flow rule #1 created
+ testpmd> flow create 0 priority 5 ingress pattern eth / ipv4 / udp / end
+ actions rss queues 6 7 8 end / end
+ Flow rule #2 created
+ testpmd> flow list 0
+ ID Group Prio Attr Rule
+ 0 0 0 i- ETH IPV4 => QUEUE
+ 1 0 0 i- ETH IPV6 => QUEUE
+ 2 0 5 i- ETH IPV4 UDP => RSS
+ testpmd>
+
+Rules are sorted by priority (i.e. group ID first, then priority level)::
+
+ testpmd> flow list 1
+ ID Group Prio Attr Rule
+ 0 0 0 i- ETH => COUNT
+ 6 0 500 i- ETH IPV6 TCP => DROP COUNT
+ 5 0 1000 i- ETH IPV6 ICMP => QUEUE
+ 1 24 0 i- ETH IPV4 UDP => QUEUE
+ 4 24 10 i- ETH IPV4 TCP => DROP
+ 3 24 20 i- ETH IPV4 => DROP
+ 2 24 42 i- ETH IPV4 UDP => QUEUE
+ 7 63 0 i- ETH IPV6 UDP VXLAN => MARK QUEUE
+ testpmd>
+
+Output can be limited to specific groups::
+
+ testpmd> flow list 1 group 0 group 63
+ ID Group Prio Attr Rule
+ 0 0 0 i- ETH => COUNT
+ 6 0 500 i- ETH IPV6 TCP => DROP COUNT
+ 5 0 1000 i- ETH IPV6 ICMP => QUEUE
+ 7 63 0 i- ETH IPV6 UDP VXLAN => MARK QUEUE
+ testpmd>
+
+Sample QinQ flow rules
+~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating QinQ rule(s) the following commands should be issued to enable QinQ::
+
+ testpmd> port stop 0
+ testpmd> vlan set qinq on 0
+
+The above command sets the inner and outer TPID's to 0x8100.
+
+To change the TPID's the following commands should be used::
+
+ testpmd> vlan set outer tpid 0xa100 0
+ testpmd> vlan set inner tpid 0x9100 0
+ testpmd> port start 0
+
+Validate and create a QinQ rule on port 0 to steer traffic to a VF queue in a VM.
+
+::
+
+ testpmd> flow validate 0 ingress pattern eth / vlan tci is 123 /
+ vlan tci is 456 / end actions vf id 1 / queue index 0 / end
+ Flow rule #0 validated
+
+ testpmd> flow create 0 ingress pattern eth / vlan tci is 4 /
+ vlan tci is 456 / end actions vf id 123 / queue index 0 / end
+ Flow rule #0 created
+
+ testpmd> flow list 0
+ ID Group Prio Attr Rule
+ 0 0 0 i- ETH VLAN VLAN=>VF QUEUE
+
+Validate and create a QinQ rule on port 0 to steer traffic to a queue on the host.
+
+::
+
+ testpmd> flow validate 0 ingress pattern eth / vlan tci is 321 /
+ vlan tci is 654 / end actions pf / queue index 0 / end
+ Flow rule #1 validated
+
+ testpmd> flow create 0 ingress pattern eth / vlan tci is 321 /
+ vlan tci is 654 / end actions pf / queue index 1 / end
+ Flow rule #1 created
+
+ testpmd> flow list 0
+ ID Group Prio Attr Rule
+ 0 0 0 i- ETH VLAN VLAN=>VF QUEUE
+ 1 0 0 i- ETH VLAN VLAN=>PF QUEUE
+
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
new file mode 100644
index 00000000..2d225d56
--- /dev/null
+++ b/doc/guides/tools/cryptoperf.rst
@@ -0,0 +1,426 @@
+.. BSD LICENSE
+ Copyright(c) 2016 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+dpdk-test-crypto-perf Application
+=================================
+
+The ``dpdk-test-crypto-perf`` tool is a Data Plane Development Kit (DPDK)
+utility that allows measuring performance parameters of PMDs available in the
+crypto tree. There are available two measurement types: throughput and latency.
+User can use multiply cores to run tests on but only
+one type of crypto PMD can be measured during single application
+execution. Cipher parameters, type of device, type of operation and
+chain mode have to be specified in the command line as application
+parameters. These parameters are checked using device capabilities
+structure.
+
+Limitations
+-----------
+On hardware devices the cycle-count doesn't always represent the actual offload
+cost. The cycle-count only represents the offload cost when the hardware
+accelerator is not fully loaded, when loaded the cpu cycles freed up by the
+offload are still consumed by the test tool and included in the cycle-count.
+These cycles are consumed by retries and inefficient API calls enqueuing and
+dequeuing smaller bursts than specified by the cmdline parameter. This results
+in a larger cycle-count measurement and should not be interpreted as an offload
+cost measurement.
+
+On hardware devices the throughput measurement is not necessarily the maximum
+possible for the device, e.g. it may be necessary to use multiple cores to keep
+the hardware accelerator fully loaded and so measure maximum throughput.
+
+Compiling the Application
+-------------------------
+
+**Step 1: PMD setting**
+
+The ``dpdk-test-crypto-perf`` tool depends on crypto device drivers PMD which
+are disabled by default in the build configuration file ``common_base``.
+The crypto device drivers PMD which should be tested can be enabled by setting::
+
+ CONFIG_RTE_LIBRTE_PMD_<name>=y
+
+Setting example for open ssl PMD::
+
+ CONFIG_RTE_LIBRTE_PMD_OPENSSL=y
+
+**Step 2: Linearization setting**
+
+It is possible linearized input segmented packets just before crypto operation
+for devices which doesn't support scatter-gather, and allows to measure
+performance also for this use case.
+
+To set on the linearization options add below definition to the
+``cperf_ops.h`` file::
+
+ #define CPERF_LINEARIZATION_ENABLE
+
+**Step 3: Build the application**
+
+Execute the ``dpdk-setup.sh`` script to build the DPDK library together with the
+``dpdk-test-crypto-perf`` applcation.
+
+Initially, the user must select a DPDK target to choose the correct target type
+and compiler options to use when building the libraries.
+The user must have all libraries, modules, updates and compilers installed
+in the system prior to this,
+as described in the earlier chapters in this Getting Started Guide.
+
+Running the Application
+-----------------------
+
+The tool application has a number of command line options:
+
+.. code-block:: console
+
+ dpdk-test-crypto-perf [EAL Options] -- [Application Options]
+
+EAL Options
+~~~~~~~~~~~
+
+The following are the EAL command-line options that can be used in conjunction
+with the ``dpdk-test-crypto-perf`` applcation.
+See the DPDK Getting Started Guides for more information on these options.
+
+* ``-c <COREMASK>`` or ``-l <CORELIST>``
+
+ Set the hexadecimal bitmask of the cores to run on. The corelist is a
+ list cores to use.
+
+* ``-w <PCI>``
+
+ Add a PCI device in white list.
+
+* ``--vdev <driver><id>``
+
+ Add a virtual device.
+
+Appication Options
+~~~~~~~~~~~~~~~~~~
+
+The following are the appication command-line options:
+
+* ``--ptest type``
+
+ Set test type, where ``type`` is one of the following::
+
+ throughput
+ latency
+ verify
+
+* ``--silent``
+
+ Disable options dump.
+
+* ``--pool-sz <n>``
+
+ Set the number of mbufs to be allocated in the mbuf pool.
+
+* ``--total-ops <n>``
+
+ Set the number of total operations performed.
+
+* ``--burst-sz <n>``
+
+ Set the number of packets per burst.
+
+ This can be set as:
+ * Single value (i.e. ``--burst-sz 16``)
+ * Range of values, using the following structure ``min:inc:max``,
+ where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+ is the maximum size (i.e. ``--burst-sz 16:2:32``)
+ * List of values, up to 32 values, separated in commas (i.e. ``--burst-sz 16,24,32``)
+
+* ``--buffer-sz <n>``
+
+ Set the size of single packet (plaintext or ciphertext in it).
+
+ This can be set as:
+ * Single value (i.e. ``--buffer-sz 16``)
+ * Range of values, using the following structure ``min:inc:max``,
+ where ``min`` is minimum size, ``inc`` is the increment size and ``max``
+ is the maximum size (i.e. ``--buffer-sz 16:2:32``)
+ * List of values, up to 32 values, separated in commas (i.e. ``--buffer-sz 32,64,128``)
+
+
+* ``--segments-nb <n>``
+
+ Set the number of segments per packet.
+
+* ``--devtype <name>``
+
+ Set device type, where ``name`` is one of the following::
+
+ crypto_null
+ crypto_aesni_mb
+ crypto_aesni_gcm
+ crypto_openssl
+ crypto_qat
+ crypto_snow3g
+ crypto_kasumi
+ crypto_zuc
+
+* ``--optype <name>``
+
+ Set operation type, where ``name`` is one of the following::
+
+ cipher-only
+ auth-only
+ cipher-then-auth
+ auth-then-cipher
+ aead
+
+ For GCM/CCM algorithms you should use aead flag.
+
+* ``--sessionless``
+
+ Enable session-less crypto operations mode.
+
+* ``--out-of-place``
+
+ Enable out-of-place crypto operations mode.
+
+* ``--test-file <name>``
+
+ Set test vector file path. See the Test Vector File chapter.
+
+* ``--test-name <name>``
+
+ Set specific test name section in the test vector file.
+
+* ``--cipher-algo <name>``
+
+ Set cipher algorithm name, where ``name`` is one of the following::
+
+ 3des-cbc
+ 3des-ecb
+ 3des-ctr
+ aes-cbc
+ aes-ccm
+ aes-ctr
+ aes-ecb
+ aes-gcm
+ aes-f8
+ aes-xts
+ arc4
+ null
+ kasumi-f8
+ snow3g-uea2
+ zuc-eea3
+
+* ``--cipher-op <mode>``
+
+ Set cipher operation mode, where ``mode`` is one of the following::
+
+ encrypt
+ decrypt
+
+* ``--cipher-key-sz <n>``
+
+ Set the size of cipher key.
+
+* ``--cipher-iv-sz <n>``
+
+ Set the size of cipher iv.
+
+* ``--auth-algo <name>``
+
+ Set authentication algorithm name, where ``name`` is one
+ of the following::
+
+ 3des-cbc
+ aes-cbc-mac
+ aes-ccm
+ aes-cmac
+ aes-gcm
+ aes-gmac
+ aes-xcbc-mac
+ md5
+ md5-hmac
+ sha1
+ sha1-hmac
+ sha2-224
+ sha2-224-hmac
+ sha2-256
+ sha2-256-hmac
+ sha2-384
+ sha2-384-hmac
+ sha2-512
+ sha2-512-hmac
+ kasumi-f9
+ snow3g-uia2
+ zuc-eia3
+
+* ``--auth-op <mode>``
+
+ Set authentication operation mode, where ``mode`` is one of
+ the following::
+
+ verify
+ generate
+
+* ``--auth-key-sz <n>``
+
+ Set the size of authentication key.
+
+* ``--auth-digest-sz <n>``
+
+ Set the size of authentication digest.
+
+* ``--auth-aad-sz <n>``
+
+ Set the size of authentication aad.
+
+* ``--csv-friendly``
+
+ Enable test result output CSV friendly rather than human friendly.
+
+Test Vector File
+~~~~~~~~~~~~~~~~
+
+The test vector file is a text file contain information about test vectors.
+The file is made of the sections. The first section doesn't have header.
+It contain global information used in each test variant vectors -
+typicaly information about plaintext, ciphertext, cipher key, aut key,
+initial vector. All other sections begin header.
+The sections contain particular information typicaly digest.
+
+**Format of the file:**
+
+Each line beginig with sign '#' contain comment and it is ignored by parser::
+
+ # <comment>
+
+Header line is just name in square bracket::
+
+ [<section name>]
+
+Data line contain information tocken then sign '=' and
+a string of bytes in C byte array format::
+
+ <tocken> = <C byte array>
+
+**Tockens list:**
+
+* ``plaintext``
+
+ Original plaintext to be crypted.
+
+* ``ciphertext``
+
+ Encrypted plaintext string.
+
+* ``cipher_key``
+
+ Key used in cipher operation.
+
+* ``auth_key``
+
+ Key used in auth operation.
+
+* ``iv``
+
+ Initial vector.
+
+* ``aad``
+
+ Additional data.
+
+* ``digest``
+
+ Digest string.
+
+Examples
+--------
+
+Call application for performance throughput test of single Aesni MB PMD
+for cipher encryption aes-cbc and auth generation sha1-hmac,
+one milion operations, burst size 32, packet size 64::
+
+ dpdk-test-crypto-perf -l 6-7 --vdev crypto_aesni_mb_pmd -w 0000:00:00.0 --
+ --ptest throughput --devtype crypto_aesni_mb --optype cipher-then-auth
+ --cipher-algo aes-cbc --cipher-op encrypt --cipher-key-sz 16 --auth-algo
+ sha1-hmac --auth-op generate --auth-key-sz 64 --auth-digest-sz 12
+ --total-ops 10000000 --burst-sz 32 --buffer-sz 64
+
+Call application for performance latency test of two Aesni MB PMD executed
+on two cores for cipher encryption aes-cbc, ten operations in silent mode::
+
+ dpdk-test-crypto-perf -l 4-7 --vdev crypto_aesni_mb_pmd1
+ --vdev crypto_aesni_mb_pmd2 -w 0000:00:00.0 -- --devtype crypto_aesni_mb
+ --cipher-algo aes-cbc --cipher-key-sz 16 --cipher-iv-sz 16
+ --cipher-op encrypt --optype cipher-only --silent
+ --ptest latency --total-ops 10
+
+Call application for verification test of single open ssl PMD
+for cipher encryption aes-gcm and auth generation aes-gcm,ten operations
+in silent mode, test vector provide in file "test_aes_gcm.data"
+with packet verification::
+
+ dpdk-test-crypto-perf -l 4-7 --vdev crypto_openssl -w 0000:00:00.0 --
+ --devtype crypto_openssl --cipher-algo aes-gcm --cipher-key-sz 16
+ --cipher-iv-sz 16 --cipher-op encrypt --auth-algo aes-gcm --auth-key-sz 16
+ --auth-digest-sz 16 --auth-aad-sz 16 --auth-op generate --optype aead
+ --silent --ptest verify --total-ops 10
+ --test-file test_aes_gcm.data
+
+Test vector file for cipher algorithm aes cbc 256 with authorization sha::
+
+ # Global Section
+ plaintext =
+ 0xff, 0xca, 0xfb, 0xf1, 0x38, 0x20, 0x2f, 0x7b, 0x24, 0x98, 0x26, 0x7d, 0x1d, 0x9f, 0xb3, 0x93,
+ 0xd9, 0xef, 0xbd, 0xad, 0x4e, 0x40, 0xbd, 0x60, 0xe9, 0x48, 0x59, 0x90, 0x67, 0xd7, 0x2b, 0x7b,
+ 0x8a, 0xe0, 0x4d, 0xb0, 0x70, 0x38, 0xcc, 0x48, 0x61, 0x7d, 0xee, 0xd6, 0x35, 0x49, 0xae, 0xb4,
+ 0xaf, 0x6b, 0xdd, 0xe6, 0x21, 0xc0, 0x60, 0xce, 0x0a, 0xf4, 0x1c, 0x2e, 0x1c, 0x8d, 0xe8, 0x7b
+ ciphertext =
+ 0x77, 0xF9, 0xF7, 0x7A, 0xA3, 0xCB, 0x68, 0x1A, 0x11, 0x70, 0xD8, 0x7A, 0xB6, 0xE2, 0x37, 0x7E,
+ 0xD1, 0x57, 0x1C, 0x8E, 0x85, 0xD8, 0x08, 0xBF, 0x57, 0x1F, 0x21, 0x6C, 0xAD, 0xAD, 0x47, 0x1E,
+ 0x0D, 0x6B, 0x79, 0x39, 0x15, 0x4E, 0x5B, 0x59, 0x2D, 0x76, 0x87, 0xA6, 0xD6, 0x47, 0x8F, 0x82,
+ 0xB8, 0x51, 0x91, 0x32, 0x60, 0xCB, 0x97, 0xDE, 0xBE, 0xF0, 0xAD, 0xFC, 0x23, 0x2E, 0x22, 0x02
+ cipher_key =
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2, 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A,
+ 0xd0, 0xe7, 0x4b, 0xfb, 0x5d, 0xe5, 0x0c, 0xe7, 0x6f, 0x21, 0xb5, 0x52, 0x2a, 0xbb, 0xc7, 0xf7
+ auth_key =
+ 0xaf, 0x96, 0x42, 0xf1, 0x8c, 0x50, 0xdc, 0x67, 0x1a, 0x43, 0x47, 0x62, 0xc7, 0x04, 0xab, 0x05,
+ 0xf5, 0x0c, 0xe7, 0xa2, 0xa6, 0x23, 0xd5, 0x3d, 0x95, 0xd8, 0xcd, 0x86, 0x79, 0xf5, 0x01, 0x47,
+ 0x4f, 0xf9, 0x1d, 0x9d, 0x36, 0xf7, 0x68, 0x1a, 0x64, 0x44, 0x58, 0x5d, 0xe5, 0x81, 0x15, 0x2a,
+ 0x41, 0xe4, 0x0e, 0xaa, 0x1f, 0x04, 0x21, 0xff, 0x2c, 0xf3, 0x73, 0x2b, 0x48, 0x1e, 0xd2, 0xf7
+ iv =
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ # Section sha 1 hmac buff 32
+ [sha1_hmac_buff_32]
+ digest =
+ 0x36, 0xCA, 0x49, 0x6A, 0xE3, 0x54, 0xD8, 0x4F, 0x0B, 0x76, 0xD8, 0xAA, 0x78, 0xEB, 0x9D, 0x65,
+ 0x2C, 0xCA, 0x1F, 0x97
+ # Section sha 256 hmac buff 32
+ [sha256_hmac_buff_32]
+ digest =
+ 0x1C, 0xB2, 0x3D, 0xD1, 0xF9, 0xC7, 0x6C, 0x49, 0x2E, 0xDA, 0x94, 0x8B, 0xF1, 0xCF, 0x96, 0x43,
+ 0x67, 0x50, 0x39, 0x76, 0xB5, 0xA1, 0xCE, 0xA1, 0xD7, 0x77, 0x10, 0x07, 0x43, 0x37, 0x05, 0xB4
diff --git a/doc/guides/tools/index.rst b/doc/guides/tools/index.rst
index 513221ca..6dc5d202 100644
--- a/doc/guides/tools/index.rst
+++ b/doc/guides/tools/index.rst
@@ -39,4 +39,5 @@ DPDK Tools User Guides
pdump
pmdinfo
devbind
+ cryptoperf
diff --git a/doc/guides/tools/proc_info.rst b/doc/guides/tools/proc_info.rst
index baaf977f..fd17e278 100644
--- a/doc/guides/tools/proc_info.rst
+++ b/doc/guides/tools/proc_info.rst
@@ -56,7 +56,7 @@ The stats parameter controls the printing of generic port statistics. If no
port mask is specified stats are printed for all DPDK ports.
**--xstats**
-The stats parameter controls the printing of extended port statistics. If no
+The xstats parameter controls the printing of extended port statistics. If no
port mask is specified xstats are printed for all DPDK ports.
**--stats-reset**
diff --git a/doc/guides/xen/pkt_switch.rst b/doc/guides/xen/pkt_switch.rst
index a45841b9..717a04b2 100644
--- a/doc/guides/xen/pkt_switch.rst
+++ b/doc/guides/xen/pkt_switch.rst
@@ -323,7 +323,7 @@ Building and Running the Switching Backend
.. code-block:: console
modprobe uio_pci_generic
- python tools/dpdk-devbind.py -b uio_pci_generic 0000:09:00:00.0
+ python usertools/dpdk-devbind.py -b uio_pci_generic 0000:09:00:00.0
In this case, 0000:09:00.0 is the PCI address for the NIC controller.
@@ -331,7 +331,7 @@ Building and Running the Switching Backend
.. code-block:: console
- examples/vhost_xen/build/vhost-switch -c f -n 3 --xen-dom0 -- -p1
+ examples/vhost_xen/build/vhost-switch -l 0-3 -n 3 --xen-dom0 -- -p1
.. note::
@@ -409,7 +409,7 @@ Building and Running the Front End
.. code-block:: console
- ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11"
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11"
testpmd>set fwd mac
testpmd>start
@@ -430,7 +430,7 @@ Run TestPMD in a guest VM:
.. code-block:: console
- ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22
testpmd> set fwd mac
testpmd> start
@@ -453,13 +453,13 @@ Run TestPMD in guest VM1:
.. code-block:: console
- ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22 -- -i
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:11" -- -i --eth-peer=0,00:00:00:00:00:22 -- -i
Run TestPMD in guest VM2:
.. code-block:: console
- ./x86_64-native-linuxapp-gcc/app/testpmd -c f -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:22" -- -i --eth-peer=0,00:00:00:00:00:33
+ ./x86_64-native-linuxapp-gcc/app/testpmd -l 0-3 -n 4 --vdev="net_xenvirt0,mac=00:00:00:00:00:22" -- -i --eth-peer=0,00:00:00:00:00:33
Configure a packet stream in the packet generator, and set the destination MAC address to 00:00:00:00:00:11 and VLAN to 1000.
The packets received in Virtio in guest VM1 will be forwarded to Virtio in guest VM2 and
diff --git a/drivers/Makefile b/drivers/Makefile
index 81c03a85..a04a01fb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -31,7 +31,13 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-y += bus
+DIRS-y += mempool
+DEPDIRS-mempool := bus
DIRS-y += net
+DEPDIRS-net := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
+DEPDIRS-crypto := mempool
+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/mk/internal/rte.depdirs-pre.mk b/drivers/bus/Makefile
index 8825db03..1e5b281c 100644
--- a/mk/internal/rte.depdirs-pre.mk
+++ b/drivers/bus/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2016 NXP. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -13,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Intel Corporation nor the names of its
+# * Neither the name of NXP nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -29,4 +29,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# nothing
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
+
+DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
+DEPDIRS-fslmc = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
new file mode 100644
index 00000000..973d279b
--- /dev/null
+++ b/drivers/bus/fslmc/Makefile
@@ -0,0 +1,75 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 NXP.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_fslmc.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
+CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_bus_fslmc_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ qbman/qbman_portal.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
+ mc/dpbp.c \
+ mc/dpio.c \
+ mc/mc_sys.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_vfio.c
+SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc_bus.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
new file mode 100644
index 00000000..b24642dd
--- /dev/null
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -0,0 +1,141 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <dirent.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_ethdev.h>
+
+#include "rte_fslmc.h"
+#include "fslmc_vfio.h"
+
+#define FSLMC_BUS_LOG(level, fmt, args...) \
+ RTE_LOG(level, EAL, "%s(): " fmt "\n", __func__, ##args)
+
+struct rte_fslmc_bus rte_fslmc_bus;
+
+static int
+rte_fslmc_scan(void)
+{
+ int ret;
+
+ ret = fslmc_vfio_setup_group();
+ if (ret) {
+ FSLMC_BUS_LOG(ERR, "fslmc: Unable to setup VFIO");
+ return ret;
+ }
+
+ ret = fslmc_vfio_process_group();
+ if (ret) {
+ FSLMC_BUS_LOG(ERR, "fslmc: Unable to setup devices");
+ return -1;
+ }
+
+ RTE_LOG(INFO, EAL, "fslmc: Bus scan completed\n");
+ return 0;
+}
+
+static int
+rte_fslmc_match(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ if (dpaa2_drv->drv_type == dpaa2_dev->dev_type)
+ return 0;
+
+ return 1;
+}
+
+static int
+rte_fslmc_probe(void)
+{
+ int ret = 0;
+ struct rte_dpaa2_device *dev;
+ struct rte_dpaa2_driver *drv;
+
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
+ ret = rte_fslmc_match(drv, dev);
+ if (ret)
+ continue;
+
+ if (!drv->probe)
+ continue;
+
+ ret = drv->probe(drv, dev);
+ if (ret)
+ FSLMC_BUS_LOG(ERR, "Unable to probe.\n");
+ break;
+ }
+ }
+ return ret;
+}
+
+/*register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_register(struct rte_dpaa2_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&rte_fslmc_bus.driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = &rte_fslmc_bus;
+}
+
+/*un-register a fslmc bus based dpaa2 driver */
+void
+rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver)
+{
+ struct rte_fslmc_bus *fslmc_bus;
+
+ fslmc_bus = driver->fslmc_bus;
+
+ TAILQ_REMOVE(&fslmc_bus->driver_list, driver, next);
+ /* Update Bus references */
+ driver->fslmc_bus = NULL;
+}
+
+struct rte_fslmc_bus rte_fslmc_bus = {
+ .bus = {
+ .scan = rte_fslmc_scan,
+ .probe = rte_fslmc_probe,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_fslmc_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
new file mode 100644
index 00000000..a890e6c3
--- /dev/null
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSLMC_LOGS_H_
+#define _FSLMC_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _FSLMC_LOGS_H_ */
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
new file mode 100644
index 00000000..5d4ac67c
--- /dev/null
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -0,0 +1,642 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/vfs.h>
+#include <libgen.h>
+#include <dirent.h>
+#include <sys/eventfd.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_bus.h>
+
+#include "rte_fslmc.h"
+#include "fslmc_vfio.h"
+
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+#define VFIO_MAX_CONTAINERS 1
+
+#define FSLMC_VFIO_LOG(level, fmt, args...) \
+ RTE_LOG(level, EAL, "%s(): " fmt "\n", __func__, ##args)
+
+/** Pathname of FSL-MC devices directory. */
+#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
+
+/* Number of VFIO containers & groups with in */
+static struct fslmc_vfio_group vfio_groups[VFIO_MAX_GRP];
+static struct fslmc_vfio_container vfio_containers[VFIO_MAX_CONTAINERS];
+static int container_device_fd;
+static uint32_t *msi_intr_vaddr;
+void *(*rte_mcp_ptr_list);
+static uint32_t mcp_id;
+static int is_dma_done;
+
+static int vfio_connect_container(struct fslmc_vfio_group *vfio_group)
+{
+ struct fslmc_vfio_container *container;
+ int i, fd, ret;
+
+ /* Try connecting to vfio container if already created */
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ container = &vfio_containers[i];
+ if (!ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER,
+ &container->fd)) {
+ FSLMC_VFIO_LOG(INFO, "Container pre-exists with"
+ " FD[0x%x] for this group",
+ container->fd);
+ vfio_group->container = container;
+ return 0;
+ }
+ }
+
+ /* Opens main vfio file descriptor which represents the "container" */
+ fd = vfio_get_container_fd();
+ if (fd < 0) {
+ FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container");
+ return -errno;
+ }
+
+ /* Check whether support for SMMU type IOMMU present or not */
+ if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
+ /* Connect group to container */
+ ret = ioctl(vfio_group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, "Failed to setup group container");
+ close(fd);
+ return -errno;
+ }
+
+ ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu");
+ close(fd);
+ return -errno;
+ }
+ } else {
+ FSLMC_VFIO_LOG(ERR, "No supported IOMMU available");
+ close(fd);
+ return -EINVAL;
+ }
+
+ container = NULL;
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ if (vfio_containers[i].used)
+ continue;
+ FSLMC_VFIO_LOG(DEBUG, "Unused container at index %d", i);
+ container = &vfio_containers[i];
+ }
+ if (!container) {
+ FSLMC_VFIO_LOG(ERR, "No free container found");
+ close(fd);
+ return -ENOMEM;
+ }
+
+ container->used = 1;
+ container->fd = fd;
+ container->group_list[container->index] = vfio_group;
+ vfio_group->container = container;
+ container->index++;
+ return 0;
+}
+
+static int vfio_map_irq_region(struct fslmc_vfio_group *group)
+{
+ int ret;
+ unsigned long *vaddr = NULL;
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ .vaddr = 0x6030000,
+ .iova = 0x6030000,
+ .size = 0x1000,
+ };
+
+ vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
+ PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
+ if (vaddr == MAP_FAILED) {
+ FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno);
+ return -errno;
+ }
+
+ msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
+ map.vaddr = (unsigned long)vaddr;
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
+ if (ret == 0)
+ return 0;
+
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno);
+ return -errno;
+}
+
+int vfio_dmamap_mem_region(uint64_t vaddr,
+ uint64_t iova,
+ uint64_t size)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ };
+
+ dma_map.vaddr = vaddr;
+ dma_map.size = size;
+ dma_map.iova = iova;
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_groups[0];
+ if (ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map)) {
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA (errno = %d)", errno);
+ return -1;
+ }
+ return 0;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+ int ret;
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_map dma_map = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_map),
+ .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
+ };
+
+ int i;
+ const struct rte_memseg *memseg;
+
+ if (is_dma_done)
+ return 0;
+ is_dma_done = 1;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ memseg = rte_eal_get_physmem_layout();
+ if (memseg == NULL) {
+ FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
+ return -ENODEV;
+ }
+
+ if (memseg[i].addr == NULL && memseg[i].len == 0)
+ break;
+
+ dma_map.size = memseg[i].len;
+ dma_map.vaddr = memseg[i].addr_64;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = memseg[i].phys_addr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
+
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_groups[0];
+
+ if (!group->container) {
+ FSLMC_VFIO_LOG(ERR, "Container is not connected ");
+ return -1;
+ }
+
+ FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
+ dma_map.vaddr);
+ FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX\n", dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
+ &dma_map);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API"
+ "(errno = %d)", errno);
+ return ret;
+ }
+ FSLMC_VFIO_LOG(DEBUG, "-----> dma_map.vaddr = 0x%llX",
+ dma_map.vaddr);
+ }
+
+ /* TODO - This is a W.A. as VFIO currently does not add the mapping of
+ * the interrupt region to SMMU. This should be removed once the
+ * support is added in the Kernel.
+ */
+ vfio_map_irq_region(group);
+
+ return 0;
+}
+
+static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
+{
+ int64_t v_addr = (int64_t)MAP_FAILED;
+ int32_t ret, mc_fd;
+
+ struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
+
+ /* getting the mcp object's fd*/
+ mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
+ if (mc_fd < 0) {
+ FSLMC_VFIO_LOG(ERR, "error in VFIO get device %s fd from group"
+ " %d", mcp_obj, group->fd);
+ return v_addr;
+ }
+
+ /* getting device info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO");
+ goto MC_FAILURE;
+ }
+
+ /* getting device region info*/
+ ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO");
+ goto MC_FAILURE;
+ }
+
+ FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
+ reg_info.offset, reg_info.size);
+
+ v_addr = (uint64_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ mc_fd, reg_info.offset);
+
+MC_FAILURE:
+ close(mc_fd);
+
+ return v_addr;
+}
+
+static inline int
+dpaa2_compare_dpaa2_dev(const struct rte_dpaa2_device *dev,
+ const struct rte_dpaa2_device *dev2)
+{
+ /*not the same family device */
+ if (dev->dev_type != DPAA2_MC_DPNI_DEVID ||
+ dev->dev_type != DPAA2_MC_DPSECI_DEVID)
+ return -1;
+
+ if (dev->object_id == dev2->object_id)
+ return 0;
+ else
+ return 1;
+}
+
+static void
+fslmc_bus_add_device(struct rte_dpaa2_device *dev)
+{
+ struct rte_fslmc_device_list *dev_l;
+
+ dev_l = &rte_fslmc_bus.device_list;
+
+ /* device is valid, add in list (sorted) */
+ if (TAILQ_EMPTY(dev_l)) {
+ TAILQ_INSERT_TAIL(dev_l, dev, next);
+ } else {
+ struct rte_dpaa2_device *dev2;
+ int ret;
+
+ TAILQ_FOREACH(dev2, dev_l, next) {
+ ret = dpaa2_compare_dpaa2_dev(dev, dev2);
+ if (ret <= 0)
+ continue;
+
+ TAILQ_INSERT_BEFORE(dev2, dev, next);
+ return;
+ }
+
+ TAILQ_INSERT_TAIL(dev_l, dev, next);
+ }
+}
+
+/* Following function shall fetch total available list of MC devices
+ * from VFIO container & populate private list of devices and other
+ * data structures
+ */
+int fslmc_vfio_process_group(void)
+{
+ struct fslmc_vfio_device *vdev;
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+ char *temp_obj, *object_type, *mcp_obj, *dev_name;
+ int32_t object_id, i, dev_fd, ret;
+ DIR *d;
+ struct dirent *dir;
+ char path[PATH_MAX];
+ int64_t v_addr;
+ int ndev_count;
+ int dpio_count = 0, dpbp_count = 0;
+ struct fslmc_vfio_group *group = &vfio_groups[0];
+ static int process_once;
+
+ /* if already done once */
+ if (process_once) {
+ FSLMC_VFIO_LOG(DEBUG, "Already scanned once - re-scan "
+ "not supported");
+ return 0;
+ }
+ process_once = 0;
+
+ sprintf(path, "/sys/kernel/iommu_groups/%d/devices", group->groupid);
+
+ d = opendir(path);
+ if (!d) {
+ FSLMC_VFIO_LOG(ERR, "Unable to open directory %s", path);
+ return -1;
+ }
+
+ /*Counting the number of devices in a group and getting the mcp ID*/
+ ndev_count = 0;
+ mcp_obj = NULL;
+ while ((dir = readdir(d)) != NULL) {
+ if (dir->d_type == DT_LNK) {
+ ndev_count++;
+ if (!strncmp("dpmcp", dir->d_name, 5)) {
+ if (mcp_obj)
+ free(mcp_obj);
+ mcp_obj = malloc(sizeof(dir->d_name));
+ if (!mcp_obj) {
+ FSLMC_VFIO_LOG(ERR, "mcp obj:Unable to"
+ " allocate memory");
+ closedir(d);
+ return -ENOMEM;
+ }
+ strcpy(mcp_obj, dir->d_name);
+ temp_obj = strtok(dir->d_name, ".");
+ temp_obj = strtok(NULL, ".");
+ sscanf(temp_obj, "%d", &mcp_id);
+ }
+ }
+ }
+ closedir(d);
+ d = NULL;
+ if (!mcp_obj) {
+ FSLMC_VFIO_LOG(ERR, "DPAA2 MCP Object not Found");
+ return -ENODEV;
+ }
+ RTE_LOG(INFO, EAL, "fslmc: DPRC contains = %d devices\n", ndev_count);
+
+ /* Allocate the memory depends upon number of objects in a group*/
+ group->vfio_device = (struct fslmc_vfio_device *)malloc(ndev_count *
+ sizeof(struct fslmc_vfio_device));
+ if (!(group->vfio_device)) {
+ FSLMC_VFIO_LOG(ERR, "vfio device: Unable to allocate memory\n");
+ free(mcp_obj);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for MC Portal list */
+ rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
+ if (!rte_mcp_ptr_list) {
+ FSLMC_VFIO_LOG(ERR, "portal list: Unable to allocate memory!");
+ free(mcp_obj);
+ goto FAILURE;
+ }
+
+ v_addr = vfio_map_mcp_obj(group, mcp_obj);
+ free(mcp_obj);
+ if (v_addr == (int64_t)MAP_FAILED) {
+ FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)", errno);
+ goto FAILURE;
+ }
+
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2 MC has VIR_ADD = %ld", v_addr);
+
+ rte_mcp_ptr_list[0] = (void *)v_addr;
+
+ d = opendir(path);
+ if (!d) {
+ FSLMC_VFIO_LOG(ERR, "Unable to open %s Directory", path);
+ goto FAILURE;
+ }
+
+ i = 0;
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2 - Parsing devices:");
+ /* Parsing each object and initiating them*/
+ while ((dir = readdir(d)) != NULL) {
+ if (dir->d_type != DT_LNK)
+ continue;
+ if (!strncmp("dprc", dir->d_name, 4) ||
+ !strncmp("dpmcp", dir->d_name, 5))
+ continue;
+ dev_name = malloc(sizeof(dir->d_name));
+ if (!dev_name) {
+ FSLMC_VFIO_LOG(ERR, "name: Unable to allocate memory");
+ goto FAILURE;
+ }
+ strcpy(dev_name, dir->d_name);
+ object_type = strtok(dir->d_name, ".");
+ temp_obj = strtok(NULL, ".");
+ sscanf(temp_obj, "%d", &object_id);
+ FSLMC_VFIO_LOG(DEBUG, " - %s ", dev_name);
+
+ /* getting the device fd*/
+ dev_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, dev_name);
+ if (dev_fd < 0) {
+ FSLMC_VFIO_LOG(ERR, "VFIO_GROUP_GET_DEVICE_FD error"
+ " Device fd: %s, Group: %d",
+ dev_name, group->fd);
+ free(dev_name);
+ goto FAILURE;
+ }
+
+ free(dev_name);
+ vdev = &group->vfio_device[group->object_index++];
+ vdev->fd = dev_fd;
+ vdev->index = i;
+ i++;
+ /* Get Device inofrmation */
+ if (ioctl(vdev->fd, VFIO_DEVICE_GET_INFO, &device_info)) {
+ FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
+ goto FAILURE;
+ }
+ if (!strcmp(object_type, "dpni") ||
+ !strcmp(object_type, "dpseci")) {
+ struct rte_dpaa2_device *dev;
+
+ dev = malloc(sizeof(struct rte_dpaa2_device));
+ if (dev == NULL)
+ return -1;
+
+ memset(dev, 0, sizeof(*dev));
+ /* store hw_id of dpni/dpseci device */
+ dev->object_id = object_id;
+ dev->dev_type = (strcmp(object_type, "dpseci")) ?
+ DPAA2_MC_DPNI_DEVID : DPAA2_MC_DPSECI_DEVID;
+
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added [%s-%d]\n",
+ object_type, object_id);
+
+ fslmc_bus_add_device(dev);
+ }
+ if (!strcmp(object_type, "dpio")) {
+ ret = dpaa2_create_dpio_device(vdev,
+ &device_info,
+ object_id);
+ if (!ret)
+ dpio_count++;
+ }
+ if (!strcmp(object_type, "dpbp")) {
+ ret = dpaa2_create_dpbp_device(object_id);
+ if (!ret)
+ dpbp_count++;
+ }
+ }
+ closedir(d);
+
+ ret = dpaa2_affine_qbman_swp();
+ if (ret)
+ FSLMC_VFIO_LOG(DEBUG, "Error in affining qbman swp %d", ret);
+
+ FSLMC_VFIO_LOG(DEBUG, "DPAA2: Added dpbp_count = %d dpio_count=%d\n",
+ dpbp_count, dpio_count);
+ return 0;
+
+FAILURE:
+ if (d)
+ closedir(d);
+ if (rte_mcp_ptr_list) {
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ }
+
+ free(group->vfio_device);
+ group->vfio_device = NULL;
+ return -1;
+}
+
+int fslmc_vfio_setup_group(void)
+{
+ struct fslmc_vfio_group *group = NULL;
+ int groupid;
+ int ret, i;
+ char *container;
+ struct vfio_group_status status = { .argsz = sizeof(status) };
+
+ /* if already done once */
+ if (container_device_fd)
+ return 0;
+
+ container = getenv("DPRC");
+
+ if (container == NULL) {
+ FSLMC_VFIO_LOG(ERR, "VFIO container not set in env DPRC");
+ return -EOPNOTSUPP;
+ }
+
+ /* get group number */
+ ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, container, &groupid);
+ if (ret == 0) {
+ RTE_LOG(WARNING, EAL, "%s not managed by VFIO, skipping\n",
+ container);
+ return -EOPNOTSUPP;
+ }
+
+ /* if negative, something failed */
+ if (ret < 0)
+ return ret;
+
+ FSLMC_VFIO_LOG(DEBUG, "VFIO iommu group id = %d", groupid);
+
+ /* Check if group already exists */
+ for (i = 0; i < VFIO_MAX_GRP; i++) {
+ group = &vfio_groups[i];
+ if (group->groupid == groupid) {
+ FSLMC_VFIO_LOG(ERR, "groupid already exists %d",
+ groupid);
+ return 0;
+ }
+ }
+
+ /* get the actual group fd */
+ ret = vfio_get_group_fd(groupid);
+ if (ret < 0)
+ return ret;
+ group->fd = ret;
+
+ /*
+ * at this point, we know that this group is viable (meaning,
+ * all devices are either bound to VFIO or not bound to anything)
+ */
+
+ ret = ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, " VFIO error getting group status");
+ close(group->fd);
+ return ret;
+ }
+
+ if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
+ FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
+ close(group->fd);
+ return -EPERM;
+ }
+ /* Since Group is VIABLE, Store the groupid */
+ group->groupid = groupid;
+
+ /* check if group does not have a container yet */
+ if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
+ /* Now connect this IOMMU group to given container */
+ ret = vfio_connect_container(group);
+ if (ret) {
+ FSLMC_VFIO_LOG(ERR, "VFIO error connecting container"
+ " with groupid %d", groupid);
+ close(group->fd);
+ return ret;
+ }
+ }
+
+ /* Get Device information */
+ ret = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, container);
+ if (ret < 0) {
+ FSLMC_VFIO_LOG(ERR, "VFIO error getting device %s fd from"
+ " group %d", container, group->groupid);
+ return ret;
+ }
+ container_device_fd = ret;
+ FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]",
+ container_device_fd);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
new file mode 100644
index 00000000..53dd0b74
--- /dev/null
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSLMC_VFIO_H_
+#define _FSLMC_VFIO_H_
+
+#include "eal_vfio.h"
+
+#define DPAA2_VENDOR_ID 0x1957
+#define DPAA2_MC_DPNI_DEVID 7
+#define DPAA2_MC_DPSECI_DEVID 3
+
+#define VFIO_MAX_GRP 1
+
+typedef struct fslmc_vfio_device {
+ int fd; /* fslmc root container device ?? */
+ int index; /*index of child object */
+ struct fslmc_vfio_device *child; /* Child object */
+} fslmc_vfio_device;
+
+typedef struct fslmc_vfio_group {
+ int fd; /* /dev/vfio/"groupid" */
+ int groupid;
+ struct fslmc_vfio_container *container;
+ int object_index;
+ struct fslmc_vfio_device *vfio_device;
+} fslmc_vfio_group;
+
+typedef struct fslmc_vfio_container {
+ int fd; /* /dev/vfio/vfio */
+ int used;
+ int index; /* index in group list */
+ struct fslmc_vfio_group *group_list[VFIO_MAX_GRP];
+} fslmc_vfio_container;
+
+int vfio_dmamap_mem_region(
+ uint64_t vaddr,
+ uint64_t iova,
+ uint64_t size);
+
+int fslmc_vfio_setup_group(void);
+int fslmc_vfio_process_group(void);
+int rte_fslmc_vfio_dmamap(void);
+
+/* create dpio device */
+int dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
+ struct vfio_device_info *obj_info,
+ int object_id);
+
+int dpaa2_create_dpbp_device(int dpbp_id);
+
+#endif /* _FSLMC_VFIO_H_ */
diff --git a/drivers/bus/fslmc/mc/dpbp.c b/drivers/bus/fslmc/mc/dpbp.c
new file mode 100644
index 00000000..12f9180a
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpbp.c
@@ -0,0 +1,261 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpbp.h>
+#include <fsl_dpbp_cmd.h>
+
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPBP_CMD_OPEN(cmd, dpbp_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return err;
+}
+
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ (void)(cfg); /* unused */
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPBP_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPBP_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
+
+
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPBP_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
+
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_FREE_BUFFERS_NUM,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPBP_RSP_GET_NUM_FREE_BUFS(cmd, *num_free_bufs);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/dpio.c b/drivers/bus/fslmc/mc/dpio.c
new file mode 100644
index 00000000..d84232a3
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpio.c
@@ -0,0 +1,279 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpio.h>
+#include <fsl_dpio_cmd.h>
+
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPIO_CMD_OPEN(cmd, dpio_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPIO_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_GET_ATTRIBUTES(cmd, attr);
+
+ return 0;
+}
+
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST,
+ cmd_flags,
+ token);
+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest);
+
+ return 0;
+}
+
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPIO_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp.h b/drivers/bus/fslmc/mc/fsl_dpbp.h
new file mode 100644
index 00000000..d14e25d1
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpbp.h
@@ -0,0 +1,241 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPBP_H
+#define __FSL_DPBP_H
+
+/* Data Path Buffer Pool API
+ * Contains initialization APIs and runtime control APIs for DPBP
+ */
+
+struct fsl_mc_io;
+
+/**
+ * dpbp_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpbp_id: DPBP unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpbp_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpbp_id,
+ uint16_t *token);
+
+/**
+ * dpbp_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_cfg - Structure representing DPBP configuration
+ * @options: place holder
+ */
+struct dpbp_cfg {
+ uint32_t options;
+};
+
+/**
+ * dpbp_create() - Create the DPBP object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPBP object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpbp_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpbp_destroy() - Destroy the DPBP object and release all its resources.
+ * @dprc_token: Parent container token; '0' for default container
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpbp_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpbp_enable() - Enable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpbp_disable() - Disable the DPBP.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpbp_is_enabled() - Check if the DPBP is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpbp_reset() - Reset the DPBP, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpbp_attr - Structure representing DPBP attributes
+ * @id: DPBP object ID
+ * @bpid: Hardware buffer pool ID; should be used as an argument in
+ * acquire/release operations on buffers
+ */
+struct dpbp_attr {
+ int id;
+ uint16_t bpid;
+};
+
+/**
+ * dpbp_get_attributes - Retrieve DPBP attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpbp_attr *attr);
+
+/**
+ * dpbp_get_api_version() - Get buffer pool API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path buffer pool API
+ * @minor_ver: Minor version of data path buffer pool API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+/**
+ * dpbp_get_num_free_bufs() - Get number of free buffers in the buffer pool
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @num_free_bufs: Number of free buffers
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_num_free_bufs(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint32_t *num_free_bufs);
+
+#endif /* __FSL_DPBP_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
new file mode 100644
index 00000000..1428dc6e
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpbp_cmd.h
@@ -0,0 +1,88 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPBP_CMD_H
+#define _FSL_DPBP_CMD_H
+
+/* DPBP Version */
+#define DPBP_VER_MAJOR 3
+#define DPBP_VER_MINOR 2
+
+/* Command IDs */
+#define DPBP_CMDID_CLOSE 0x8001
+#define DPBP_CMDID_OPEN 0x8041
+#define DPBP_CMDID_CREATE 0x9041
+#define DPBP_CMDID_DESTROY 0x9841
+#define DPBP_CMDID_GET_API_VERSION 0xa041
+
+#define DPBP_CMDID_ENABLE 0x0021
+#define DPBP_CMDID_DISABLE 0x0031
+#define DPBP_CMDID_GET_ATTR 0x0041
+#define DPBP_CMDID_RESET 0x0051
+#define DPBP_CMDID_IS_ENABLED 0x0061
+
+#define DPBP_CMDID_GET_FREE_BUFFERS_NUM 0x1b21
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_CMD_OPEN(cmd, dpbp_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpbp_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, (attr)->bpid); \
+ MC_RSP_OP(cmd, 0, 32, 32, int, (attr)->id);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPBP_RSP_GET_NUM_FREE_BUFS(cmd, num_free_bufs) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, num_free_bufs)
+
+#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpio.h b/drivers/bus/fslmc/mc/fsl_dpio.h
new file mode 100644
index 00000000..6d86f07d
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpio.h
@@ -0,0 +1,282 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPIO_H
+#define __FSL_DPIO_H
+
+/* Data Path I/O Portal API
+ * Contains initialization APIs and runtime control APIs for DPIO
+ */
+
+struct fsl_mc_io;
+
+/**
+ * dpio_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpio_id: DPIO unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpio_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and any MC portals
+ * assigned to the parent container; this token must be used in
+ * all subsequent commands for this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpio_id,
+ uint16_t *token);
+
+/**
+ * dpio_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * enum dpio_channel_mode - DPIO notification channel mode
+ * @DPIO_NO_CHANNEL: No support for notification channel
+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
+ * dedicated channel in the DPIO; user should point the queue's
+ * destination in the relevant interface to this DPIO
+ */
+enum dpio_channel_mode {
+ DPIO_NO_CHANNEL = 0,
+ DPIO_LOCAL_CHANNEL = 1,
+};
+
+/**
+ * struct dpio_cfg - Structure representing DPIO configuration
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ */
+struct dpio_cfg {
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+};
+
+/**
+ * dpio_create() - Create the DPIO object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPIO object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpio_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpio_destroy() - Destroy the DPIO object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpio_enable() - Enable the DPIO, allow I/O portal operations.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpio_is_enabled() - Check if the DPIO is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpio_reset() - Reset the DPIO, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpio_set_stashing_destination() - Set the stashing destination.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t sdest);
+
+/**
+ * dpio_get_stashing_destination() - Get the stashing destination..
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @sdest: Returns the stashing destination value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t *sdest);
+
+/**
+ * struct dpio_attr - Structure representing DPIO attributes
+ * @id: DPIO object ID
+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area
+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
+ * @qbman_portal_id: Software portal ID
+ * @channel_mode: Notification channel mode
+ * @num_priorities: Number of priorities for the notification channel (1-8);
+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
+ * @qbman_version: QBMAN version
+ */
+struct dpio_attr {
+ int id;
+ uint64_t qbman_portal_ce_offset;
+ uint64_t qbman_portal_ci_offset;
+ uint16_t qbman_portal_id;
+ enum dpio_channel_mode channel_mode;
+ uint8_t num_priorities;
+ uint32_t qbman_version;
+ uint32_t clk;
+};
+
+/**
+ * dpio_get_attributes() - Retrieve DPIO attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPIO object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise
+ */
+int dpio_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpio_attr *attr);
+
+/**
+ * dpio_get_api_version() - Get Data Path I/O API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path i/o API
+ * @minor_ver: Minor version of data path i/o API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpio_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPIO_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpio_cmd.h b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
new file mode 100644
index 00000000..b1147de2
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpio_cmd.h
@@ -0,0 +1,122 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPIO_CMD_H
+#define _FSL_DPIO_CMD_H
+
+/* DPIO Version */
+#define DPIO_VER_MAJOR 4
+#define DPIO_VER_MINOR 2
+
+/* Command IDs */
+#define DPIO_CMDID_CLOSE 0x8001
+#define DPIO_CMDID_OPEN 0x8031
+#define DPIO_CMDID_CREATE 0x9031
+#define DPIO_CMDID_DESTROY 0x9831
+#define DPIO_CMDID_GET_API_VERSION 0xa031
+
+#define DPIO_CMDID_ENABLE 0x0021
+#define DPIO_CMDID_DISABLE 0x0031
+#define DPIO_CMDID_GET_ATTR 0x0041
+#define DPIO_CMDID_RESET 0x0051
+#define DPIO_CMDID_IS_ENABLED 0x0061
+
+#define DPIO_CMDID_SET_STASHING_DEST 0x1201
+#define DPIO_CMDID_GET_STASHING_DEST 0x1211
+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x1221
+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x1231
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_OPEN(cmd, dpio_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, dpio_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \
+ cfg->channel_mode);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_GET_ATTRIBUTES(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, (attr)->id);\
+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, (attr)->qbman_portal_id);\
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->num_priorities);\
+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode,\
+ (attr)->channel_mode);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, (attr)->qbman_portal_ce_offset);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (attr)->qbman_portal_ci_offset);\
+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, (attr)->qbman_version);\
+ MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (attr)->clk);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPIO_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPIO_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
new file mode 100644
index 00000000..d2252228
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -0,0 +1,239 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_MC_CMD_H
+#define __FSL_MC_CMD_H
+
+#define MC_CMD_NUM_OF_PARAMS 7
+
+#define MAKE_UMASK64(_width) \
+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \
+ (uint64_t)-1))
+
+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val)
+{
+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset);
+}
+
+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width)
+{
+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width));
+}
+
+struct mc_command {
+ uint64_t header;
+ uint64_t params[MC_CMD_NUM_OF_PARAMS];
+};
+
+/**
+ * enum mc_cmd_status - indicates MC status at command response
+ * @MC_CMD_STATUS_OK: Completed successfully
+ * @MC_CMD_STATUS_READY: Ready to be processed
+ * @MC_CMD_STATUS_AUTH_ERR: Authentication error
+ * @MC_CMD_STATUS_NO_PRIVILEGE: No privilege
+ * @MC_CMD_STATUS_DMA_ERR: DMA or I/O error
+ * @MC_CMD_STATUS_CONFIG_ERR: Configuration error
+ * @MC_CMD_STATUS_TIMEOUT: Operation timed out
+ * @MC_CMD_STATUS_NO_RESOURCE: No resources
+ * @MC_CMD_STATUS_NO_MEMORY: No memory available
+ * @MC_CMD_STATUS_BUSY: Device is busy
+ * @MC_CMD_STATUS_UNSUPPORTED_OP: Unsupported operation
+ * @MC_CMD_STATUS_INVALID_STATE: Invalid state
+ */
+enum mc_cmd_status {
+ MC_CMD_STATUS_OK = 0x0,
+ MC_CMD_STATUS_READY = 0x1,
+ MC_CMD_STATUS_AUTH_ERR = 0x3,
+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4,
+ MC_CMD_STATUS_DMA_ERR = 0x5,
+ MC_CMD_STATUS_CONFIG_ERR = 0x6,
+ MC_CMD_STATUS_TIMEOUT = 0x7,
+ MC_CMD_STATUS_NO_RESOURCE = 0x8,
+ MC_CMD_STATUS_NO_MEMORY = 0x9,
+ MC_CMD_STATUS_BUSY = 0xA,
+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB,
+ MC_CMD_STATUS_INVALID_STATE = 0xC
+};
+
+/* MC command flags */
+
+/**
+ * High priority flag
+ */
+#define MC_CMD_FLAG_PRI 0x00008000
+/**
+ * Command completion flag
+ */
+#define MC_CMD_FLAG_INTR_DIS 0x01000000
+
+/**
+ * Command ID field offset
+ */
+#define MC_CMD_HDR_CMDID_O 48
+/**
+ * Command ID field size
+ */
+#define MC_CMD_HDR_CMDID_S 16
+/**
+ * Token field offset
+ */
+#define MC_CMD_HDR_TOKEN_O 32
+/**
+ * Token field size
+ */
+#define MC_CMD_HDR_TOKEN_S 16
+/**
+ * Status field offset
+ */
+#define MC_CMD_HDR_STATUS_O 16
+/**
+ * Status field size
+ */
+#define MC_CMD_HDR_STATUS_S 8
+/**
+ * Flags field offset
+ */
+#define MC_CMD_HDR_FLAGS_O 0
+/**
+ * Flags field size
+ */
+#define MC_CMD_HDR_FLAGS_S 32
+/**
+ * Command flags mask
+ */
+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00
+
+#define MC_CMD_HDR_READ_STATUS(_hdr) \
+ ((enum mc_cmd_status)mc_dec((_hdr), \
+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
+
+#define MC_CMD_HDR_READ_TOKEN(_hdr) \
+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
+
+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \
+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg)))
+
+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width)))
+
+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+
+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
+
+/* cmd, param, offset, width, type, arg_name */
+#define CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, object_id) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, object_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, object_id)
+
+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ uint64_t hdr;
+
+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
+ MC_CMD_STATUS_READY);
+
+ return hdr;
+}
+
+/**
+ * mc_write_command - writes a command to a Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @cmd: pointer to a filled command
+ */
+static inline void mc_write_command(struct mc_command __iomem *portal,
+ struct mc_command *cmd)
+{
+ int i;
+ uint32_t word;
+ char *header = (char *)&portal->header;
+
+ /* copy command parameters into the portal */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ iowrite64(cmd->params[i], &portal->params[i]);
+
+ /* submit the command by writing the header */
+ word = (uint32_t)mc_dec(cmd->header, 32, 32);
+ iowrite32(word, (((uint32_t *)header) + 1));
+
+ word = (uint32_t)mc_dec(cmd->header, 0, 32);
+ iowrite32(word, (uint32_t *)header);
+}
+
+/**
+ * mc_read_response - reads the response for the last MC command from a
+ * Management Complex (MC) portal
+ *
+ * @portal: pointer to an MC portal
+ * @resp: pointer to command response buffer
+ *
+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise.
+ */
+static inline enum mc_cmd_status mc_read_response(
+ struct mc_command __iomem *portal,
+ struct mc_command *resp)
+{
+ int i;
+ enum mc_cmd_status status;
+
+ /* Copy command response header from MC portal: */
+ resp->header = ioread64(&portal->header);
+ status = MC_CMD_HDR_READ_STATUS(resp->header);
+ if (status != MC_CMD_STATUS_OK)
+ return status;
+
+ /* Copy command response data from MC portal: */
+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
+ resp->params[i] = ioread64(&portal->params[i]);
+
+ return status;
+}
+
+#endif /* __FSL_MC_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_sys.h b/drivers/bus/fslmc/mc/fsl_mc_sys.h
new file mode 100644
index 00000000..ebada60e
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_mc_sys.h
@@ -0,0 +1,105 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_MC_SYS_H
+#define _FSL_MC_SYS_H
+
+#ifdef __linux_driver__
+
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <linux/slab.h>
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#ifndef ENOTSUP
+#define ENOTSUP 95
+#endif
+
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+
+#else /* __linux_driver__ */
+
+#include <stdio.h>
+#include <libio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/uio.h>
+#include <linux/byteorder/little_endian.h>
+
+#define cpu_to_le64(x) __cpu_to_le64(x)
+#ifndef dmb
+#define dmb() {__asm__ __volatile__("" : : : "memory"); }
+#endif
+#define __iormb() dmb()
+#define __iowmb() dmb()
+#define __arch_getq(a) (*(volatile unsigned long *)(a))
+#define __arch_putq(v, a) (*(volatile unsigned long *)(a) = (v))
+#define __arch_putq32(v, a) (*(volatile unsigned int *)(a) = (v))
+#define readq(c) \
+ ({ uint64_t __v = __arch_getq(c); __iormb(); __v; })
+#define writeq(v, c) \
+ ({ uint64_t __v = v; __iowmb(); __arch_putq(__v, c); __v; })
+#define writeq32(v, c) \
+ ({ uint32_t __v = v; __iowmb(); __arch_putq32(__v, c); __v; })
+#define ioread64(_p) readq(_p)
+#define iowrite64(_v, _p) writeq(_v, _p)
+#define iowrite32(_v, _p) writeq32(_v, _p)
+#define __iomem
+
+struct fsl_mc_io {
+ void *regs;
+};
+
+#ifndef ENOTSUP
+#define ENOTSUP 95
+#endif
+
+/*GPP is supposed to use MC commands with low priority*/
+#define CMD_PRI_LOW 0 /*!< Low Priority command indication */
+
+struct mc_command;
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd);
+
+#endif /* __linux_driver__ */
+
+#endif /* _FSL_MC_SYS_H */
diff --git a/drivers/bus/fslmc/mc/mc_sys.c b/drivers/bus/fslmc/mc/mc_sys.c
new file mode 100644
index 00000000..45731658
--- /dev/null
+++ b/drivers/bus/fslmc/mc/mc_sys.c
@@ -0,0 +1,114 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+
+#include <rte_spinlock.h>
+
+/** User space framework uses MC Portal in shared mode. Following change
+ * introduces lock in MC FLIB
+ */
+
+/**
+ * A static spinlock initializer.
+ */
+static rte_spinlock_t mc_portal_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int mc_status_to_error(enum mc_cmd_status status)
+{
+ switch (status) {
+ case MC_CMD_STATUS_OK:
+ return 0;
+ case MC_CMD_STATUS_AUTH_ERR:
+ return -EACCES; /* Token error */
+ case MC_CMD_STATUS_NO_PRIVILEGE:
+ return -EPERM; /* Permission denied */
+ case MC_CMD_STATUS_DMA_ERR:
+ return -EIO; /* Input/Output error */
+ case MC_CMD_STATUS_CONFIG_ERR:
+ return -EINVAL; /* Device not configured */
+ case MC_CMD_STATUS_TIMEOUT:
+ return -ETIMEDOUT; /* Operation timed out */
+ case MC_CMD_STATUS_NO_RESOURCE:
+ return -ENAVAIL; /* Resource temporarily unavailable */
+ case MC_CMD_STATUS_NO_MEMORY:
+ return -ENOMEM; /* Cannot allocate memory */
+ case MC_CMD_STATUS_BUSY:
+ return -EBUSY; /* Device busy */
+ case MC_CMD_STATUS_UNSUPPORTED_OP:
+ return -ENOTSUP; /* Operation not supported by device */
+ case MC_CMD_STATUS_INVALID_STATE:
+ return -ENODEV; /* Invalid device state */
+ default:
+ break;
+ }
+
+ /* Not expected to reach here */
+ return -EINVAL;
+}
+
+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
+{
+ enum mc_cmd_status status;
+
+ if (!mc_io || !mc_io->regs)
+ return -EACCES;
+
+ /* --- Call lock function here in case portal is shared --- */
+ rte_spinlock_lock(&mc_portal_lock);
+
+ mc_write_command(mc_io->regs, cmd);
+
+ /* Spin until status changes */
+ do {
+ status = MC_CMD_HDR_READ_STATUS(ioread64(mc_io->regs));
+
+ /* --- Call wait function here to prevent blocking ---
+ * Change the loop condition accordingly to exit on timeout.
+ */
+ } while (status == MC_CMD_STATUS_READY);
+
+ /* Read the response back into the command buffer */
+ mc_read_response(mc_io->regs, cmd);
+
+ /* --- Call unlock function here in case portal is shared --- */
+ rte_spinlock_unlock(&mc_portal_lock);
+
+ return mc_status_to_error(status);
+}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
new file mode 100644
index 00000000..2fb285c1
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -0,0 +1,139 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <mc/fsl_dpbp.h>
+#include "portal/dpaa2_hw_pvt.h"
+#include "portal/dpaa2_hw_dpio.h"
+
+TAILQ_HEAD(dpbp_device_list, dpaa2_dpbp_dev);
+static struct dpbp_device_list *dpbp_dev_list; /*!< DPBP device list */
+
+int
+dpaa2_create_dpbp_device(
+ int dpbp_id)
+{
+ struct dpaa2_dpbp_dev *dpbp_node;
+ int ret;
+
+ if (!dpbp_dev_list) {
+ dpbp_dev_list = malloc(sizeof(struct dpbp_device_list));
+ if (!dpbp_dev_list) {
+ PMD_INIT_LOG(ERR, "Memory alloc failed in DPBP list\n");
+ return -1;
+ }
+ /* Initialize the DPBP List */
+ TAILQ_INIT(dpbp_dev_list);
+ }
+
+ /* Allocate DPAA2 dpbp handle */
+ dpbp_node = (struct dpaa2_dpbp_dev *)
+ malloc(sizeof(struct dpaa2_dpbp_dev));
+ if (!dpbp_node) {
+ PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
+ return -1;
+ }
+
+ /* Open the dpbp object */
+ dpbp_node->dpbp.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpbp_open(&dpbp_node->dpbp,
+ CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
+ ret);
+ free(dpbp_node);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
+ " error code %d\n", ret);
+ dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
+ free(dpbp_node);
+ return -1;
+ }
+
+ dpbp_node->dpbp_id = dpbp_id;
+ rte_atomic16_init(&dpbp_node->in_use);
+
+ TAILQ_INSERT_HEAD(dpbp_dev_list, dpbp_node, next);
+
+ PMD_INIT_LOG(DEBUG, "Buffer pool resource initialized %d", dpbp_id);
+
+ return 0;
+}
+
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Get DPBP dev handle from list using index */
+ TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ if (dpbp_dev && rte_atomic16_test_and_set(&dpbp_dev->in_use))
+ break;
+ }
+
+ return dpbp_dev;
+}
+
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp)
+{
+ struct dpaa2_dpbp_dev *dpbp_dev = NULL;
+
+ /* Match DPBP handle and mark it free */
+ TAILQ_FOREACH(dpbp_dev, dpbp_dev_list, next) {
+ if (dpbp_dev == dpbp) {
+ rte_atomic16_dec(&dpbp_dev->in_use);
+ return;
+ }
+ }
+}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
new file mode 100644
index 00000000..a1a58b9c
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -0,0 +1,445 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include "dpaa2_hw_pvt.h"
+#include "dpaa2_hw_dpio.h"
+
+#define NUM_HOST_CPUS RTE_MAX_LCORE
+
+struct dpaa2_io_portal_t dpaa2_io_portal[RTE_MAX_LCORE];
+RTE_DEFINE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+TAILQ_HEAD(dpio_device_list, dpaa2_dpio_dev);
+static struct dpio_device_list *dpio_dev_list; /*!< DPIO device list */
+static uint32_t io_space_count;
+
+/*Stashing Macros default for LS208x*/
+static int dpaa2_core_cluster_base = 0x04;
+static int dpaa2_cluster_sz = 2;
+
+/* For LS208X platform There are four clusters with following mapping:
+ * Cluster 1 (ID = x04) : CPU0, CPU1;
+ * Cluster 2 (ID = x05) : CPU2, CPU3;
+ * Cluster 3 (ID = x06) : CPU4, CPU5;
+ * Cluster 4 (ID = x07) : CPU6, CPU7;
+ */
+/* For LS108X platform There are two clusters with following mapping:
+ * Cluster 1 (ID = x02) : CPU0, CPU1, CPU2, CPU3;
+ * Cluster 2 (ID = x03) : CPU4, CPU5, CPU6, CPU7;
+ */
+
+/* Set the STASH Destination depending on Current CPU ID.
+ * e.g. Valid values of SDEST are 4,5,6,7. Where,
+ * CPU 0-1 will have SDEST 4
+ * CPU 2-3 will have SDEST 5.....and so on.
+ */
+static int
+dpaa2_core_cluster_sdest(int cpu_id)
+{
+ int x = cpu_id / dpaa2_cluster_sz;
+
+ if (x > 3)
+ x = 3;
+
+ return dpaa2_core_cluster_base + x;
+}
+
+static int
+configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
+{
+ struct qbman_swp_desc p_des;
+ struct dpio_attr attr;
+
+ dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
+ if (!dpio_dev->dpio) {
+ PMD_INIT_LOG(ERR, "Memory allocation failure\n");
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "\t Allocated DPIO Portal[%p]", dpio_dev->dpio);
+ dpio_dev->dpio->regs = dpio_dev->mc_portal;
+ if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
+ &dpio_dev->token)) {
+ PMD_INIT_LOG(ERR, "Failed to allocate IO space\n");
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ PMD_INIT_LOG(ERR, "Failed to reset dpio\n");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
+ PMD_INIT_LOG(ERR, "Failed to Enable dpio\n");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, &attr)) {
+ PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n");
+ dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "Qbman Portal ID %d", attr.qbman_portal_id);
+ PMD_INIT_LOG(DEBUG, "Portal CE adr 0x%lX", attr.qbman_portal_ce_offset);
+ PMD_INIT_LOG(DEBUG, "Portal CI adr 0x%lX", attr.qbman_portal_ci_offset);
+
+ /* Configure & setup SW portal */
+ p_des.block = NULL;
+ p_des.idx = attr.qbman_portal_id;
+ p_des.cena_bar = (void *)(dpio_dev->qbman_portal_ce_paddr);
+ p_des.cinh_bar = (void *)(dpio_dev->qbman_portal_ci_paddr);
+ p_des.irq = -1;
+ p_des.qman_version = attr.qbman_version;
+
+ dpio_dev->sw_portal = qbman_swp_init(&p_des);
+ if (dpio_dev->sw_portal == NULL) {
+ PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
+ dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
+ free(dpio_dev->dpio);
+ return -1;
+ }
+
+ PMD_INIT_LOG(DEBUG, "QBMan SW Portal 0x%p\n", dpio_dev->sw_portal);
+
+ return 0;
+}
+
+static int
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
+{
+ int sdest;
+ int cpu_id, ret;
+
+ /* Set the Stashing Destination */
+ cpu_id = rte_lcore_id();
+ if (cpu_id < 0) {
+ cpu_id = rte_get_master_lcore();
+ if (cpu_id < 0) {
+ RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n");
+ return -1;
+ }
+ }
+ /* Set the STASH Destination depending on Current CPU ID.
+ * Valid values of SDEST are 4,5,6,7. Where,
+ * CPU 0-1 will have SDEST 4
+ * CPU 2-3 will have SDEST 5.....and so on.
+ */
+
+ sdest = dpaa2_core_cluster_sdest(cpu_id);
+ PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d",
+ dpio_dev->index, cpu_id, sdest);
+
+ ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
+ dpio_dev->token, sdest);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
+{
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ int ret;
+
+ /* Get DPIO dev handle from list using index */
+ TAILQ_FOREACH(dpio_dev, dpio_dev_list, next) {
+ if (dpio_dev && rte_atomic16_test_and_set(&dpio_dev->ref_count))
+ break;
+ }
+ if (!dpio_dev)
+ return NULL;
+
+ PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
+ dpio_dev, dpio_dev->index, syscall(SYS_gettid));
+
+ ret = dpaa2_configure_stashing(dpio_dev);
+ if (ret)
+ PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
+
+ return dpio_dev;
+}
+
+int
+dpaa2_affine_qbman_swp(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
+ " between thread %lu and current %lu",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ dpaa2_io_portal[lcore_id].net_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+ dpaa2_io_portal[lcore_id].dpio_dev,
+ dpaa2_io_portal[lcore_id].dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp();
+
+ if (dpaa2_io_portal[lcore_id].dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).dpio_dev
+ = dpaa2_io_portal[lcore_id].dpio_dev;
+ dpaa2_io_portal[lcore_id].net_tid = tid;
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+dpaa2_affine_qbman_swp_sec(void)
+{
+ unsigned int lcore_id = rte_lcore_id();
+ uint64_t tid = syscall(SYS_gettid);
+
+ if (lcore_id == LCORE_ID_ANY)
+ lcore_id = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else if (lcore_id >= RTE_MAX_LCORE)
+ return -1;
+
+ if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
+ PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
+ " between thread %lu and current %lu",
+ dpaa2_io_portal[lcore_id].sec_dpio_dev,
+ dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
+ dpaa2_io_portal[lcore_id].sec_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
+ = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ rte_atomic16_inc(&dpaa2_io_portal
+ [lcore_id].sec_dpio_dev->ref_count);
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+
+ PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+ dpaa2_io_portal[lcore_id].sec_dpio_dev,
+ dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
+ tid);
+ return 0;
+ }
+
+ /* Populate the dpaa2_io_portal structure */
+ dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp();
+
+ if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
+ = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ dpaa2_io_portal[lcore_id].sec_tid = tid;
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+int
+dpaa2_create_dpio_device(struct fslmc_vfio_device *vdev,
+ struct vfio_device_info *obj_info,
+ int object_id)
+{
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
+
+ if (obj_info->num_regions < NUM_DPIO_REGIONS) {
+ PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
+ "of DPIO regions.\n");
+ return -1;
+ }
+
+ if (!dpio_dev_list) {
+ dpio_dev_list = malloc(sizeof(struct dpio_device_list));
+ if (!dpio_dev_list) {
+ PMD_INIT_LOG(ERR, "Memory alloc failed in DPIO list\n");
+ return -1;
+ }
+
+ /* Initialize the DPIO List */
+ TAILQ_INIT(dpio_dev_list);
+ }
+
+ dpio_dev = malloc(sizeof(struct dpaa2_dpio_dev));
+ if (!dpio_dev) {
+ PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
+ return -1;
+ }
+
+ PMD_DRV_LOG(INFO, "\t Aloocated DPIO [%p]", dpio_dev);
+ dpio_dev->dpio = NULL;
+ dpio_dev->hw_id = object_id;
+ dpio_dev->vfio_fd = vdev->fd;
+ rte_atomic16_init(&dpio_dev->ref_count);
+ /* Using single portal for all devices */
+ dpio_dev->mc_portal = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+
+ reg_info.index = 0;
+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ free(dpio_dev);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
+ PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
+ dpio_dev->ce_size = reg_info.size;
+ dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ dpio_dev->vfio_fd, reg_info.offset);
+
+ /* Create Mapping for QBMan Cache Enabled area. This is a fix for
+ * SMMU fault for DQRR statshing transaction.
+ */
+ if (vfio_dmamap_mem_region(dpio_dev->qbman_portal_ce_paddr,
+ reg_info.offset, reg_info.size)) {
+ PMD_INIT_LOG(ERR, "DMAMAP for Portal CE area failed.\n");
+ free(dpio_dev);
+ return -1;
+ }
+
+ reg_info.index = 1;
+ if (ioctl(dpio_dev->vfio_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
+ PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ free(dpio_dev);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "\t Region Offset = %llx", reg_info.offset);
+ PMD_DRV_LOG(DEBUG, "\t Region Size = %llx", reg_info.size);
+ dpio_dev->ci_size = reg_info.size;
+ dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ PROT_WRITE | PROT_READ, MAP_SHARED,
+ dpio_dev->vfio_fd, reg_info.offset);
+
+ if (configure_dpio_qbman_swp(dpio_dev)) {
+ PMD_INIT_LOG(ERR,
+ "Fail to configure the dpio qbman portal for %d\n",
+ dpio_dev->hw_id);
+ free(dpio_dev);
+ return -1;
+ }
+
+ io_space_count++;
+ dpio_dev->index = io_space_count;
+ TAILQ_INSERT_HEAD(dpio_dev_list, dpio_dev, next);
+
+ return 0;
+}
+
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ if (q_storage->dq_storage[i])
+ rte_free(q_storage->dq_storage[i]);
+ }
+}
+
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage)
+{
+ int i = 0;
+
+ for (i = 0; i < NUM_DQS_PER_QUEUE; i++) {
+ q_storage->dq_storage[i] = rte_malloc(NULL,
+ DPAA2_DQRR_RING_SIZE * sizeof(struct qbman_result),
+ RTE_CACHE_LINE_SIZE);
+ if (!q_storage->dq_storage[i])
+ goto fail;
+ }
+ return 0;
+fail:
+ i -= 1;
+ while (i >= 0)
+ rte_free(q_storage->dq_storage[i]);
+
+ return -1;
+}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
new file mode 100644
index 00000000..f2e11680
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_HW_DPIO_H_
+#define _DPAA2_HW_DPIO_H_
+
+#include <mc/fsl_dpio.h>
+#include <mc/fsl_mc_sys.h>
+
+struct dpaa2_io_portal_t {
+ struct dpaa2_dpio_dev *dpio_dev;
+ struct dpaa2_dpio_dev *sec_dpio_dev;
+ uint64_t net_tid;
+ uint64_t sec_tid;
+};
+
+/*! Global per thread DPIO portal */
+RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
+
+#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
+#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
+
+#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
+#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+
+/* Affine a DPIO portal to current processing thread */
+int dpaa2_affine_qbman_swp(void);
+
+/* Affine additional DPIO portal to current crypto processing thread */
+int dpaa2_affine_qbman_swp_sec(void);
+
+/* allocate memory for FQ - dq storage */
+int
+dpaa2_alloc_dq_storage(struct queue_storage_info_t *q_storage);
+
+/* free memory for FQ- dq storage */
+void
+dpaa2_free_dq_storage(struct queue_storage_info_t *q_storage);
+
+#endif /* _DPAA2_HW_DPIO_H_ */
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
new file mode 100644
index 00000000..c0223734
--- /dev/null
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -0,0 +1,270 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_HW_PVT_H_
+#define _DPAA2_HW_PVT_H_
+
+#include <mc/fsl_mc_sys.h>
+#include <fsl_qbman_portal.h>
+
+#ifndef false
+#define false 0
+#endif
+#ifndef true
+#define true 1
+#endif
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#ifndef ETH_VLAN_HLEN
+#define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
+#endif
+
+#define MAX_TX_RING_SLOTS 8
+ /** <Maximum number of slots available in TX ring*/
+
+#define DPAA2_DQRR_RING_SIZE 16
+ /** <Maximum number of slots available in RX ring*/
+
+#define MC_PORTAL_INDEX 0
+#define NUM_DPIO_REGIONS 2
+#define NUM_DQS_PER_QUEUE 2
+
+/* Maximum release/acquire from QBMAN */
+#define DPAA2_MBUF_MAX_ACQ_REL 7
+
+#define MAX_BPID 256
+#define DPAA2_MBUF_HW_ANNOTATION 64
+#define DPAA2_FD_PTA_SIZE 64
+
+#if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
+#error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
+#endif
+
+/* we will re-use the HEADROOM for annotation in RX */
+#define DPAA2_HW_BUF_RESERVE 0
+#define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
+
+struct dpaa2_dpio_dev {
+ TAILQ_ENTRY(dpaa2_dpio_dev) next;
+ /**< Pointer to Next device instance */
+ uint16_t index; /**< Index of a instance in the list */
+ rte_atomic16_t ref_count;
+ /**< How many thread contexts are sharing this.*/
+ struct fsl_mc_io *dpio; /** handle to DPIO portal object */
+ uint16_t token;
+ struct qbman_swp *sw_portal; /** SW portal object */
+ const struct qbman_result *dqrr[4];
+ /**< DQRR Entry for this SW portal */
+ void *mc_portal; /**< MC Portal for configuring this device */
+ uintptr_t qbman_portal_ce_paddr;
+ /**< Physical address of Cache Enabled Area */
+ uintptr_t ce_size; /**< Size of the CE region */
+ uintptr_t qbman_portal_ci_paddr;
+ /**< Physical address of Cache Inhibit Area */
+ uintptr_t ci_size; /**< Size of the CI region */
+ int32_t vfio_fd; /**< File descriptor received via VFIO */
+ int32_t hw_id; /**< An unique ID of this DPIO device instance */
+};
+
+struct dpaa2_dpbp_dev {
+ TAILQ_ENTRY(dpaa2_dpbp_dev) next;
+ /**< Pointer to Next device instance */
+ struct fsl_mc_io dpbp; /** handle to DPBP portal object */
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpbp_id; /*HW ID for DPBP object */
+};
+
+struct queue_storage_info_t {
+ struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
+};
+
+struct dpaa2_queue {
+ struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
+ void *dev;
+ int32_t eventfd; /*!< Event Fd of this queue */
+ uint32_t fqid; /*!< Unique ID of this queue */
+ uint8_t tc_index; /*!< traffic class identifier */
+ uint16_t flow_id; /*!< To be used by DPAA2 frmework */
+ uint64_t rx_pkts;
+ uint64_t tx_pkts;
+ uint64_t err_pkts;
+ struct queue_storage_info_t *q_storage;
+};
+
+/*! Global MCP list */
+extern void *(*rte_mcp_ptr_list);
+
+/* Refer to Table 7-3 in SEC BG */
+struct qbman_fle {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t length;
+ /* FMT must be 00, MSB is final bit */
+ uint32_t fin_bpid_offset;
+ uint32_t frc;
+ uint32_t reserved[3]; /* Not used currently */
+};
+
+/*Macros to define operations on FD*/
+#define DPAA2_SET_FD_ADDR(fd, addr) do { \
+ fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
+ fd->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FD_LEN(fd, length) (fd)->simple.len = length
+#define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
+#define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
+#define DPAA2_SET_FD_OFFSET(fd, offset) \
+ ((fd->simple.bpid_offset |= (uint32_t)(offset) << 16))
+#define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len))
+#define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc
+#define DPAA2_RESET_FD_CTRL(fd) (fd)->simple.ctrl = 0
+
+#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
+#define DPAA2_SET_FD_FLC(fd, addr) do { \
+ fd->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
+ fd->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
+} while (0)
+#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) (fle->frc = (0x80000000 | (len)))
+#define DPAA2_GET_FLE_ADDR(fle) \
+ (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
+#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
+ fle->addr_lo = lower_32_bits((uint64_t)addr); \
+ fle->addr_hi = upper_32_bits((uint64_t)addr); \
+} while (0)
+#define DPAA2_SET_FLE_OFFSET(fle, offset) \
+ ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
+#define DPAA2_GET_FLE_BPID(fle, bpid) (fle->fin_bpid_offset & 0x000000ff)
+#define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
+ (fd->simple.bpid_offset |= (uint32_t)1 << 28)
+#define DPAA2_GET_FD_ADDR(fd) \
+((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+
+#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
+#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
+#define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
+#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
+#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
+ ((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
+
+#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
+ ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
+
+#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
+
+/* Only Enqueue Error responses will be
+ * pushed on FQID_ERR of Enqueue FQ
+ */
+#define DPAA2_EQ_RESP_ERR_FQ 0
+/* All Enqueue responses will be pushed on address
+ * set with qbman_eq_desc_set_response
+ */
+#define DPAA2_EQ_RESP_ALWAYS 1
+
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
+/* todo - this is costly, need to write a fast coversion routine */
+static void *dpaa2_mem_ptov(phys_addr_t paddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (paddr >= memseg[i].phys_addr &&
+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ return (void *)(memseg[i].addr_64
+ + (paddr - memseg[i].phys_addr));
+ }
+ return NULL;
+}
+
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
+static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (vaddr >= memseg[i].addr_64 &&
+ vaddr < memseg[i].addr_64 + memseg[i].len)
+ return memseg[i].phys_addr
+ + (vaddr - memseg[i].addr_64);
+ }
+ return (phys_addr_t)(NULL);
+}
+
+/**
+ * When we are using Physical addresses as IO Virtual Addresses,
+ * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
+ * whereever required.
+ * These routines are called with help of below MACRO's
+ */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_physaddr)
+#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
+
+/**
+ * macro to convert Virtual address to IOVA
+ */
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+
+/**
+ * macro to convert IOVA to Virtual address
+ */
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+
+/**
+ * macro to convert modify the memory containing IOVA to Virtual address
+ */
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
+ {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+
+#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
+#define DPAA2_OP_VADDR_TO_IOVA(op) (op)
+#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
+#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
+#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
+
+#endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
+
+struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
+void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
+
+#endif
diff --git a/drivers/bus/fslmc/qbman/include/compat.h b/drivers/bus/fslmc/qbman/include/compat.h
new file mode 100644
index 00000000..41effe37
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/include/compat.h
@@ -0,0 +1,410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2008-2016 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HEADER_COMPAT_H
+#define HEADER_COMPAT_H
+
+#include <sched.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdint.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <net/ethernet.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <malloc.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <limits.h>
+#include <assert.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <error.h>
+#include <rte_atomic.h>
+
+/* The following definitions are primarily to allow the single-source driver
+ * interfaces to be included by arbitrary program code. Ie. for interfaces that
+ * are also available in kernel-space, these definitions provide compatibility
+ * with certain attributes and types used in those interfaces.
+ */
+
+/* Required compiler attributes */
+#define __user
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#define __stringify_1(x) #x
+#define __stringify(x) __stringify_1(x)
+
+#ifdef ARRAY_SIZE
+#undef ARRAY_SIZE
+#endif
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+/* Required types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+typedef cpu_set_t cpumask_t;
+typedef u32 compat_uptr_t;
+
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
+/* I/O operations */
+static inline u32 in_be32(volatile void *__p)
+{
+ volatile u32 *p = __p;
+ return *p;
+}
+
+static inline void out_be32(volatile void *__p, u32 val)
+{
+ volatile u32 *p = __p;
+ *p = val;
+}
+
+/* Debugging */
+#define prflush(fmt, args...) \
+ do { \
+ printf(fmt, ##args); \
+ fflush(stdout); \
+ } while (0)
+#define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
+#define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
+#define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
+#define pr_info(fmt, args...) prflush(fmt, ##args)
+
+#ifdef pr_debug
+#undef pr_debug
+#endif
+#define pr_debug(fmt, args...) {}
+#define might_sleep_if(c) {}
+#define msleep(x) {}
+#define WARN_ON(c, str) \
+do { \
+ static int warned_##__LINE__; \
+ if ((c) && !warned_##__LINE__) { \
+ pr_warn("%s\n", str); \
+ pr_warn("(%s:%d)\n", __FILE__, __LINE__); \
+ warned_##__LINE__ = 1; \
+ } \
+} while (0)
+#ifdef CONFIG_BUGON
+#define QBMAN_BUG_ON(c) WARN_ON(c, "BUG")
+#else
+#define QBMAN_BUG_ON(c) {}
+#endif
+
+#define ALIGN(x, a) (((x) + ((typeof(x))(a) - 1)) & ~((typeof(x))(a) - 1))
+
+/****************/
+/* Linked-lists */
+/****************/
+
+struct list_head {
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+#define LIST_HEAD(n) \
+struct list_head n = { \
+ .prev = &n, \
+ .next = &n \
+}
+
+#define INIT_LIST_HEAD(p) \
+do { \
+ struct list_head *__p298 = (p); \
+ __p298->next = __p298; \
+ __p298->prev = __p298->next; \
+} while (0)
+#define list_entry(node, type, member) \
+ (type *)((void *)node - offsetof(type, member))
+#define list_empty(p) \
+({ \
+ const struct list_head *__p298 = (p); \
+ ((__p298->next == __p298) && (__p298->prev == __p298)); \
+})
+#define list_add(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->next = __l298->next; \
+ __p298->prev = __l298; \
+ __l298->next->prev = __p298; \
+ __l298->next = __p298; \
+} while (0)
+#define list_add_tail(p, l) \
+do { \
+ struct list_head *__p298 = (p); \
+ struct list_head *__l298 = (l); \
+ __p298->prev = __l298->prev; \
+ __p298->next = __l298; \
+ __l298->prev->next = __p298; \
+ __l298->prev = __p298; \
+} while (0)
+#define list_for_each(i, l) \
+ for (i = (l)->next; i != (l); i = i->next)
+#define list_for_each_safe(i, j, l) \
+ for (i = (l)->next, j = i->next; i != (l); \
+ i = j, j = i->next)
+#define list_for_each_entry(i, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name); &i->name != (l); \
+ i = list_entry(i->name.next, typeof(*i), name))
+#define list_for_each_entry_safe(i, j, l, name) \
+ for (i = list_entry((l)->next, typeof(*i), name), \
+ j = list_entry(i->name.next, typeof(*j), name); \
+ &i->name != (l); \
+ i = j, j = list_entry(j->name.next, typeof(*j), name))
+#define list_del(i) \
+do { \
+ (i)->next->prev = (i)->prev; \
+ (i)->prev->next = (i)->next; \
+} while (0)
+
+/* Other miscellaneous interfaces our APIs depend on; */
+
+#define lower_32_bits(x) ((u32)(x))
+#define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
+
+/* Compiler/type stuff */
+typedef unsigned int gfp_t;
+typedef uint32_t phandle;
+
+#define __iomem
+#define EINTR 4
+#define ENODEV 19
+#define GFP_KERNEL 0
+#define __raw_readb(p) (*(const volatile unsigned char *)(p))
+#define __raw_readl(p) (*(const volatile unsigned int *)(p))
+#define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
+
+/* memcpy() stuff - when you know alignments in advance */
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+
+ QBMAN_BUG_ON((unsigned long)dest & 0x3);
+ QBMAN_BUG_ON((unsigned long)src & 0x3);
+ QBMAN_BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+
+ QBMAN_BUG_ON((unsigned long)dest & 0x1);
+ QBMAN_BUG_ON((unsigned long)src & 0x1);
+ QBMAN_BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/* Completion stuff */
+#define DECLARE_COMPLETION(n) int n = 0
+#define complete(n) { *n = 1; }
+#define wait_for_completion(n) \
+do { \
+ while (!*n) { \
+ bman_poll(); \
+ qman_poll(); \
+ } \
+ *n = 0; \
+} while (0)
+
+/* Allocator stuff */
+#define kmalloc(sz, t) malloc(sz)
+#define vmalloc(sz) malloc(sz)
+#define kfree(p) { if (p) free(p); }
+static inline void *kzalloc(size_t sz, gfp_t __foo __rte_unused)
+{
+ void *ptr = malloc(sz);
+
+ if (ptr)
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
+{
+ void *p;
+
+ if (posix_memalign(&p, 4096, 4096))
+ return 0;
+ memset(p, 0, 4096);
+ return (unsigned long)p;
+}
+
+static inline void free_page(unsigned long p)
+{
+ free((void *)p);
+}
+
+/* Bitfield stuff. */
+#define BITS_PER_ULONG (sizeof(unsigned long) << 3)
+#define SHIFT_PER_ULONG (((1 << 5) == BITS_PER_ULONG) ? 5 : 6)
+#define BITS_MASK(idx) ((unsigned long)1 << ((idx) & (BITS_PER_ULONG - 1)))
+#define BITS_IDX(idx) ((idx) >> SHIFT_PER_ULONG)
+static inline unsigned long test_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ return *p & mask;
+}
+
+static inline int test_bit(int idx, volatile unsigned long *bits)
+{
+ return test_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p |= mask;
+}
+
+static inline void set_bit(int idx, volatile unsigned long *bits)
+{
+ set_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void clear_bit(int idx, volatile unsigned long *bits)
+{
+ clear_bits(BITS_MASK(idx), bits + BITS_IDX(idx));
+}
+
+static inline unsigned long test_and_set_bits(unsigned long mask,
+ volatile unsigned long *p)
+{
+ unsigned long ret = test_bits(mask, p);
+
+ set_bits(mask, p);
+ return ret;
+}
+
+static inline int test_and_set_bit(int idx, volatile unsigned long *bits)
+{
+ int ret = test_bit(idx, bits);
+
+ set_bit(idx, bits);
+ return ret;
+}
+
+static inline int test_and_clear_bit(int idx, volatile unsigned long *bits)
+{
+ int ret = test_bit(idx, bits);
+
+ clear_bit(idx, bits);
+ return ret;
+}
+
+static inline int find_next_zero_bit(unsigned long *bits, int limit, int idx)
+{
+ while ((++idx < limit) && test_bit(idx, bits))
+ ;
+ return idx;
+}
+
+static inline int find_first_zero_bit(unsigned long *bits, int limit)
+{
+ int idx = 0;
+
+ while (test_bit(idx, bits) && (++idx < limit))
+ ;
+ return idx;
+}
+
+static inline u64 div64_u64(u64 n, u64 d)
+{
+ return n / d;
+}
+
+#define atomic_t rte_atomic32_t
+#define atomic_read(v) rte_atomic32_read(v)
+#define atomic_set(v, i) rte_atomic32_set(v, i)
+
+#define atomic_inc(v) rte_atomic32_add(v, 1)
+#define atomic_dec(v) rte_atomic32_sub(v, 1)
+
+#define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
+#define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
+
+#define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
+#define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
+#define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
+
+#endif /* HEADER_COMPAT_H */
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
new file mode 100644
index 00000000..ee4b772c
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -0,0 +1,160 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_QBMAN_BASE_H
+#define _FSL_QBMAN_BASE_H
+
+typedef uint64_t dma_addr_t;
+
+/**
+ * DOC: QBMan basic structures
+ *
+ * The QBMan block descriptor, software portal descriptor and Frame descriptor
+ * are defined here.
+ *
+ */
+
+#define QMAN_REV_4000 0x04000000
+#define QMAN_REV_4100 0x04010000
+#define QMAN_REV_4101 0x04010001
+
+/**
+ * struct qbman_block_desc - qbman block descriptor structure
+ * @ccsr_reg_bar: CCSR register map.
+ * @irq_rerr: Recoverable error interrupt line.
+ * @irq_nrerr: Non-recoverable error interrupt line
+ *
+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not
+ * control this QBMan instance, these values may simply be place-holders. The
+ * idea is simply that we be able to distinguish between them, eg. so that SWP
+ * descriptors can identify which QBMan instance they belong to.
+ */
+struct qbman_block_desc {
+ void *ccsr_reg_bar;
+ int irq_rerr;
+ int irq_nrerr;
+};
+
+enum qbman_eqcr_mode {
+ qman_eqcr_vb_ring = 2, /* Valid bit, with eqcr in ring mode */
+ qman_eqcr_vb_array, /* Valid bit, with eqcr in array mode */
+};
+
+/**
+ * struct qbman_swp_desc - qbman software portal descriptor structure
+ * @block: The QBMan instance.
+ * @cena_bar: Cache-enabled portal register map.
+ * @cinh_bar: Cache-inhibited portal register map.
+ * @irq: -1 if unused (or unassigned)
+ * @idx: SWPs within a QBMan are indexed. -1 if opaque to the user.
+ * @qman_version: the qman version.
+ * @eqcr_mode: Select the eqcr mode, currently only valid bit ring mode and
+ * valid bit array mode are supported.
+ *
+ * Descriptor for a QBMan software portal, expressed in terms that make sense to
+ * the user context. Ie. on MC, this information is likely to be true-physical,
+ * and instantiated statically at compile-time. On GPP, this information is
+ * likely to be obtained via "discovery" over a partition's "MC bus"
+ * (ie. in response to a MC portal command), and would take into account any
+ * virtualisation of the GPP user's address space and/or interrupt numbering.
+ */
+struct qbman_swp_desc {
+ const struct qbman_block_desc *block;
+ uint8_t *cena_bar;
+ uint8_t *cinh_bar;
+ int irq;
+ int idx;
+ uint32_t qman_version;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* Driver object for managing a QBMan portal */
+struct qbman_swp;
+
+/**
+ * struct qbman_fd - basci structure for qbman frame descriptor
+ * @words: for easier/faster copying the whole FD structure.
+ * @addr_lo: the lower 32 bits of the address in FD.
+ * @addr_hi: the upper 32 bits of the address in FD.
+ * @len: the length field in FD.
+ * @bpid_offset: represent the bpid and offset fields in FD. offset in
+ * the MS 16 bits, BPID in the LS 16 bits.
+ * @frc: frame context
+ * @ctrl: the 32bit control bits including dd, sc,... va, err.
+ * @flc_lo: the lower 32bit of flow context.
+ * @flc_hi: the upper 32bits of flow context.
+ *
+ * Place-holder for FDs, we represent it via the simplest form that we need for
+ * now. Different overlays may be needed to support different options, etc. (It
+ * is impractical to define One True Struct, because the resulting encoding
+ * routines (lots of read-modify-writes) would be worst-case performance whether
+ * or not circumstances required them.)
+ *
+ * Note, as with all data-structures exchanged between software and hardware (be
+ * they located in the portal register map or DMA'd to and from main-memory),
+ * the driver ensures that the caller of the driver API sees the data-structures
+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words
+ * contained within this structure are represented in host-endianness, even if
+ * hardware always treats them as little-endian. As such, if any of these fields
+ * are interpreted in a binary (rather than numerical) fashion by hardware
+ * blocks (eg. accelerators), then the user should be careful. We illustrate
+ * with an example;
+ *
+ * Suppose the desired behaviour of an accelerator is controlled by the "frc"
+ * field of the FDs that are sent to it. Suppose also that the behaviour desired
+ * by the user corresponds to an "frc" value which is expressed as the literal
+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit
+ * value in which 0xfe is the first byte and 0xba is the last byte, and as
+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If
+ * the software is little-endian also, this can simply be achieved by setting
+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set
+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is
+ * to treat the 32-bit words as numerical values, in which the offset of a field
+ * from the beginning of the first byte (as required or generated by hardware)
+ * is numerically encoded by a left-shift (ie. by raising the field to a
+ * corresponding power of 2). Ie. in the current example, software could set
+ * "frc" in the following way, and it would work correctly on both little-endian
+ * and big-endian operation;
+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24);
+ */
+struct qbman_fd {
+ union {
+ uint32_t words[8];
+ struct qbman_fd_simple {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t len;
+ uint32_t bpid_offset;
+ uint32_t frc;
+ uint32_t ctrl;
+ uint32_t flc_lo;
+ uint32_t flc_hi;
+ } simple;
+ };
+};
+
+#endif /* !_FSL_QBMAN_BASE_H */
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
new file mode 100644
index 00000000..77317723
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
@@ -0,0 +1,1093 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_QBMAN_PORTAL_H
+#define _FSL_QBMAN_PORTAL_H
+
+#include <fsl_qbman_base.h>
+
+/**
+ * DOC - QBMan portal APIs to implement the following functions:
+ * - Initialize and destroy Software portal object.
+ * - Read and write Software portal interrupt registers.
+ * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
+ * command etc.
+ * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
+ * parsing the dequeue response in DQRR and memeory, parsing the state change
+ * notifications etc.
+ * - Release, including setting the release descriptor, and issuing the buffer
+ * release command.
+ * - Acquire, acquire the buffer from the given buffer pool.
+ * - FQ management.
+ * - Channel management, enable/disable CDAN with or without context.
+ */
+
+/**
+ * qbman_swp_init() - Create a functional object representing the given
+ * QBMan portal descriptor.
+ * @d: the given qbman swp descriptor
+ *
+ * Return qbman_swp portal object for success, NULL if the object cannot
+ * be created.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
+
+/**
+ * qbman_swp_finish() - Create and destroy a functional object representing
+ * the given QBMan portal descriptor.
+ * @p: the qbman_swp object to be destroyed.
+ *
+ */
+void qbman_swp_finish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_get_desc() - Get the descriptor of the given portal object.
+ * @p: the given portal object.
+ *
+ * Return the descriptor for this portal.
+ */
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
+
+ /**************/
+ /* Interrupts */
+ /**************/
+
+/* EQCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
+/* Enqueue command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
+/* DQRR non-empty interrupt */
+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
+/* RCR ring interrupt */
+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
+/* Release command dispatched interrupt */
+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
+/* Volatile dequeue command interrupt */
+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
+
+/**
+ * qbman_swp_interrupt_get_vanish() - Get the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISDR register.
+ */
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_vanish() - Set the data in software portal
+ * interrupt status disable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IDSR register.
+ */
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_read_status() - Get the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_ISR register.
+ */
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_clear_status() - Set the data in software portal
+ * interrupt status register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_ISR register.
+ */
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_trigger() - Get the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IER register.
+ */
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_trigger() - Set the data in software portal
+ * interrupt enable register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IER register.
+ */
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
+
+/**
+ * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ *
+ * Return the settings in SWP_IIR register.
+ */
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
+
+/**
+ * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
+ * interrupt inhibit register.
+ * @p: the given software portal object.
+ * @mask: The value to set in SWP_IIR register.
+ */
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
+
+ /************/
+ /* Dequeues */
+ /************/
+
+/**
+ * struct qbman_result - structure for qbman dequeue response and/or
+ * notification.
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * possible qbman dequeue result.
+ */
+struct qbman_result {
+ uint32_t dont_manipulate_directly[16];
+};
+
+/* TODO:
+ *A DQRI interrupt can be generated when there are dequeue results on the
+ * portal's DQRR (this mechanism does not deal with "pull" dequeues to
+ * user-supplied 'storage' addresses). There are two parameters to this
+ * interrupt source, one is a threshold and the other is a timeout. The
+ * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
+ * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
+ * For timeout, an approximation to the desired nanosecond-granularity value is
+ * made, so there are get and set APIs to allow the user to see what actual
+ * timeout is set (compared to the timeout that was requested).
+ */
+int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
+int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
+int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
+
+/* ------------------- */
+/* Push-mode dequeuing */
+/* ------------------- */
+
+/* The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+
+/**
+ * qbman_swp_push_get() - Get the push dequeue setup.
+ * @s: the software portal object.
+ * @channel_idx: the channel index to query.
+ * @enabled: returned boolean to show whether the push dequeue is enabled for
+ * the given channel.
+ */
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
+
+/**
+ * qbman_swp_push_set() - Enable or disable push dequeue.
+ * @s: the software portal object.
+ * @channel_idx: the channel index..
+ * @enable: enable or disable push dequeue.
+ *
+ * The user of a portal can enable and disable push-mode dequeuing of up to 16
+ * channels independently. It does not specify this toggling by channel IDs, but
+ * rather by specifying the index (from 0 to 15) that has been mapped to the
+ * desired channel.
+ */
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
+
+/* ------------------- */
+/* Pull-mode dequeuing */
+/* ------------------- */
+
+/**
+ * struct qbman_pull_desc - the structure for pull dequeue descriptor
+ * @dont_manipulate_directly: the 6 32bit data to represent the whole
+ * possible settings for pull dequeue descriptor.
+ */
+struct qbman_pull_desc {
+ uint32_t dont_manipulate_directly[6];
+};
+
+enum qbman_pull_type_e {
+ /* dequeue with priority precedence, respect intra-class scheduling */
+ qbman_pull_type_prio = 1,
+ /* dequeue with active FQ precedence, respect ICS */
+ qbman_pull_type_active,
+ /* dequeue with active FQ precedence, no ICS */
+ qbman_pull_type_active_noics
+};
+
+/**
+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the pull dequeue descriptor to be cleared.
+ */
+void qbman_pull_desc_clear(struct qbman_pull_desc *d);
+
+/**
+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage
+ * @d: the pull dequeue descriptor to be set.
+ * @storage: the pointer of the memory to store the dequeue result.
+ * @storage_phys: the physical address of the storage memory.
+ * @stash: to indicate whether write allocate is enabled.
+ *
+ * If not called, or if called with 'storage' as NULL, the result pull dequeues
+ * will produce results to DQRR. If 'storage' is non-NULL, then results are
+ * produced to the given memory location (using the physical/DMA address which
+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not
+ * those writes to main-memory express a cache-warming attribute.
+ */
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ dma_addr_t storage_phys,
+ int stash);
+/**
+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
+ * @d: the pull dequeue descriptor to be set.
+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
+ */
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
+ uint8_t numframes);
+/**
+ * qbman_pull_desc_set_token() - Set dequeue token for pull command
+ * @d: the dequeue descriptor
+ * @token: the token to be set
+ *
+ * token is the value that shows up in the dequeue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing a dequeue, and use any non-zero 'token' value
+ */
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - pull dequeue from the given frame queue (FQ)
+ * - pull dequeue from any FQ in the given work queue (WQ)
+ * - pull dequeue from any FQ in any WQ in the given channel
+ */
+/**
+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
+ * @fqid: the frame queue index of the given FQ.
+ */
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
+
+/**
+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
+ * @wqid: composed of channel id and wqid within the channel.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct);
+
+/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
+ * dequeues.
+ * @chid: the channel id to be dequeued.
+ * @dct: the dequeue command type.
+ */
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct);
+
+/**
+ * qbman_swp_pull() - Issue the pull dequeue command
+ * @s: the software portal object.
+ * @d: the software portal descriptor which has been configured with
+ * the set of qbman_pull_desc_set_*() calls.
+ *
+ * Return 0 for success, and -EBUSY if the software portal is not ready
+ * to do pull dequeue.
+ */
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
+
+/* -------------------------------- */
+/* Polling DQRR for dequeue results */
+/* -------------------------------- */
+
+/**
+ * qbman_swp_dqrr_next() - Get an valid DQRR entry.
+ * @s: the software portal object.
+ *
+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
+
+/**
+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
+ * qbman_swp_dqrr_next().
+ * @s: the software portal object.
+ * @dq: the DQRR entry to be consumed.
+ */
+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
+
+/**
+ * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
+ * @dqrr: the given dqrr object.
+ *
+ * Return dqrr index.
+ */
+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr);
+
+/**
+ * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
+ * given portal
+ * @s: the given portal.
+ * @idx: the dqrr index.
+ *
+ * Return dqrr entry object.
+ */
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
+
+/* ------------------------------------------------- */
+/* Polling user-provided storage for dequeue results */
+/* ------------------------------------------------- */
+
+/**
+ * qbman_result_has_new_result() - Check and get the dequeue response from the
+ * dq storage memory set in pull dequeue command
+ * @s: the software portal object.
+ * @dq: the dequeue result read from the memory.
+ *
+ * Only used for user-provided storage of dequeue results, not DQRR. For
+ * efficiency purposes, the driver will perform any required endianness
+ * conversion to ensure that the user's dequeue result storage is in host-endian
+ * format (whether or not that is the same as the little-endian format that
+ * hardware DMA'd to the user's storage). As such, once the user has called
+ * qbman_result_has_new_result() and been returned a valid dequeue result,
+ * they should not call it again on the same memory location (except of course
+ * if another dequeue command has been executed to produce a new result to that
+ * location).
+ *
+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
+ * dequeue result.
+ */
+int qbman_result_has_new_result(struct qbman_swp *s,
+ const struct qbman_result *dq);
+
+/* -------------------------------------------------------- */
+/* Parsing dequeue entries (DQRR and user-provided storage) */
+/* -------------------------------------------------------- */
+
+/**
+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
+ * @dq: the dequeue result to be checked.
+ *
+ * DQRR entries may contain non-dequeue results, ie. notifications
+ */
+int qbman_result_is_DQ(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_SCN() - Check the dequeue result is notification or not
+ * @dq: the dequeue result to be checked.
+ *
+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
+ * notifications" of one type or another. Some APIs apply to all of them, of the
+ * form qbman_result_SCN_***().
+ */
+static inline int qbman_result_is_SCN(const struct qbman_result *dq)
+{
+ return !qbman_result_is_DQ(dq);
+}
+
+/* Recognise different notification types, only required if the user allows for
+ * these to occur, and cares about them when they do.
+ */
+
+/**
+ * qbman_result_is_FQDAN() - Check for FQ Data Availability
+ * @dq: the qbman_result object.
+ *
+ * Return 1 if this is FQDAN.
+ */
+int qbman_result_is_FQDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CDAN() - Check for Channel Data Availability
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CDAN.
+ */
+int qbman_result_is_CDAN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CSCN() - Check for Congestion State Change
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CSCN.
+ */
+int qbman_result_is_CSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is BPSCN.
+ */
+int qbman_result_is_BPSCN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is CGCU.
+ */
+int qbman_result_is_CGCU(const struct qbman_result *dq);
+
+/* Frame queue state change notifications; (FQDAN in theory counts too as it
+ * leaves a FQ parked, but it is primarily a data availability notification)
+ */
+
+/**
+ * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRN.
+ */
+int qbman_result_is_FQRN(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQRNI.
+ */
+int qbman_result_is_FQRNI(const struct qbman_result *dq);
+
+/**
+ * qbman_result_is_FQPN() - Check for FQ Park Notification
+ * @dq: the qbman_result object to check.
+ *
+ * Return 1 if this is FQPN.
+ */
+int qbman_result_is_FQPN(const struct qbman_result *dq);
+
+/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
+ */
+/* FQ empty */
+#define QBMAN_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define QBMAN_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
+/* Valid frame */
+#define QBMAN_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define QBMAN_DQ_STAT_ODPVALID 0x04
+/* Volatile dequeue */
+#define QBMAN_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define QBMAN_DQ_STAT_EXPIRED 0x01
+
+/**
+ * qbman_result_DQ_flags() - Get the STAT field of dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the state field.
+ */
+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result.
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
+}
+
+/**
+ * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
+ * completed.
+ * @dq: the dequeue result.
+ *
+ * Return boolean.
+ */
+static inline int qbman_result_DQ_is_pull_complete(
+ const struct qbman_result *dq)
+{
+ return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
+}
+
+/**
+ * qbman_result_DQ_seqnum() - Get the seqnum field in dequeue response
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ * @dq: the dequeue result.
+ *
+ * Return seqnum.
+ */
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
+ * odpid is valid only if ODPVAILD flag is TRUE.
+ * @dq: the dequeue result.
+ *
+ * Return odpid.
+ */
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return fqid.
+ */
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the byte count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_frame_count - Get the frame count in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame count remaining in the FQ.
+ */
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame queue context.
+ */
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
+
+/**
+ * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result.
+ *
+ * Return the frame descriptor.
+ */
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
+
+/* State-change notifications (FQDAN/CDAN/CSCN/...). */
+
+/**
+ * qbman_result_SCN_state() - Get the state field in State-change notification
+ * @scn: the state change notification.
+ *
+ * Return the state in the notifiation.
+ */
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_rid() - Get the resource id from the notification
+ * @scn: the state change notification.
+ *
+ * Return the resource id.
+ */
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_ctx() - get the context from the notification
+ * @scn: the state change notification.
+ *
+ * Return the context.
+ */
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_state_in_mem() - Get the state in notification written
+ * in memory
+ * @scn: the state change notification.
+ *
+ * Return the state.
+ */
+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn);
+
+/**
+ * qbman_result_SCN_rid_in_mem() - Get the resource id in notification written
+ * in memory.
+ * @scn: the state change notification.
+ *
+ * Return the resource id.
+ */
+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn);
+
+/* Type-specific "resource IDs". Mainly for illustration purposes, though it
+ * also gives the appropriate type widths.
+ */
+/* Get the FQID from the FQDAN */
+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRN */
+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQRNI */
+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the FQID from the FQPN */
+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
+/* Get the channel ID from the CDAN */
+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+/* Get the CGID from the CSCN */
+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
+
+/**
+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
+ * @scn: the state change notification.
+ *
+ * Return the buffer pool id.
+ */
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free
+ * buffers in the pool from BPSCN.
+ * @scn: the state change notification.
+ *
+ * Return the number of free buffers.
+ */
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
+ * buffer pool is depleted.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool depletion.
+ */
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
+ * pool is surplus or not.
+ * @scn: the state change notification.
+ *
+ * Return the status of buffer pool surplus.
+ */
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
+
+/**
+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
+ * @scn: the state change notification.
+ *
+ * Return the BPSCN context.
+ */
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
+
+/* Parsing CGCU */
+/**
+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
+ * @scn: the state change notification.
+ *
+ * Return the CGCU resource id.
+ */
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
+
+/**
+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
+ * @scn: the state change notification.
+ *
+ * Return instantaneous count in the CGCU notification.
+ */
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
+
+ /************/
+ /* Enqueues */
+ /************/
+
+/**
+ * struct qbman_eq_desc - structure of enqueue descriptor
+ * @dont_manipulate_directly: the 8 32bit data to represent the whole
+ * possible qbman enqueue setting in enqueue descriptor.
+ */
+struct qbman_eq_desc {
+ uint32_t dont_manipulate_directly[8];
+};
+
+/**
+ * struct qbman_eq_response - structure of enqueue response
+ * @dont_manipulate_directly: the 16 32bit data to represent the whole
+ * enqueue response.
+ */
+struct qbman_eq_response {
+ uint32_t dont_manipulate_directly[16];
+};
+
+/**
+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the given enqueue descriptor.
+ */
+void qbman_eq_desc_clear(struct qbman_eq_desc *d);
+
+/* Exactly one of the following descriptor "actions" should be set. (Calling
+ * any one of these will replace the effect of any prior call to one of these.)
+ * - enqueue without order-restoration
+ * - enqueue with order-restoration
+ * - fill a hole in the order-restoration sequence, without any enqueue
+ * - advance NESN (Next Expected Sequence Number), without any enqueue
+ * 'respond_success' indicates whether an enqueue response should be DMA'd
+ * after success (otherwise a response is DMA'd only after failure).
+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
+ * be enqueued.
+ */
+
+/**
+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ */
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+/**
+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ * @incomplete: indiates whether this is the last fragments using the same
+ * sequeue number.
+ */
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint32_t opr_id, uint32_t seqnum, int incomplete);
+
+/**
+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
+ uint32_t seqnum);
+
+/**
+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number)
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @opr_id: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
+ uint32_t seqnum);
+/**
+ * qbman_eq_desc_set_response() - Set the enqueue response info.
+ * @d: the enqueue descriptor
+ * @storage_phys: the physical address of the enqueue response in memory.
+ * @stash: indicate that the write allocation enabled or not.
+ *
+ * In the case where an enqueue response is DMA'd, this determines where that
+ * response should go. (The physical/DMA address is given for hardware's
+ * benefit, but software should interpret it as a "struct qbman_eq_response"
+ * data structure.) 'stash' controls whether or not the write to main-memory
+ * expresses a cache-warming attribute.
+ */
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ dma_addr_t storage_phys,
+ int stash);
+
+/**
+ * qbman_eq_desc_set_token() - Set token for the enqueue command
+ * @d: the enqueue descriptor
+ * @token: the token to be set.
+ *
+ * token is the value that shows up in an enqueue response that can be used to
+ * detect when the results have been published. The easiest technique is to zero
+ * result "storage" before issuing an enqueue, and use any non-zero 'token'
+ * value.
+ */
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
+
+/**
+ * Exactly one of the following descriptor "targets" should be set. (Calling any
+ * one of these will replace the effect of any prior call to one of these.)
+ * - enqueue to a frame queue
+ * - enqueue to a queuing destination
+ * Note, that none of these will have any affect if the "action" type has been
+ * set to "orp_hole" or "orp_nesn".
+ */
+/**
+ * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
+ * @d: the enqueue descriptor
+ * @fqid: the id of the frame queue to be enqueued.
+ */
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
+
+/**
+ * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
+ * @d: the enqueue descriptor
+ * @qdid: the id of the queuing destination to be enqueued.
+ * @qd_bin: the queuing destination bin
+ * @qd_prio: the queuing destination priority.
+ */
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint32_t qd_bin, uint32_t qd_prio);
+
+/**
+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
+ * @d: the enqueue descriptor
+ * @enable: boolean to enable/disable EQDI
+ *
+ * Determines whether or not the portal's EQDI interrupt source should be
+ * asserted after the enqueue command is completed.
+ */
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
+
+/**
+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
+ * @d: the enqueue descriptor.
+ * @enable: enabled/disable DCA mode.
+ * @dqrr_idx: DCAP_CI, the DCAP consumer index.
+ * @park: determine the whether park the FQ or not
+ *
+ * Determines whether or not a portal DQRR entry should be consumed once the
+ * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
+ * held-active (order-preserving) FQ, whether the FQ should be parked instead of
+ * being rescheduled.)
+ */
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint32_t dqrr_idx, int park);
+
+/**
+ * qbman_swp_enqueue() - Issue an enqueue command.
+ * @s: the software portal used for enqueue.
+ * @d: the enqueue descriptor.
+ * @fd: the frame descriptor to be enqueued.
+ *
+ * Please note that 'fd' should only be NULL if the "action" of the
+ * descriptor is "orp_hole" or "orp_nesn".
+ *
+ * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
+ */
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd);
+
+/* TODO:
+ * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
+ * @s: the software portal.
+ * @thresh: the threshold to trigger the EQRI interrupt.
+ *
+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer releases */
+ /*******************/
+/**
+ * struct qbman_release_desc - The structure for buffer release descriptor
+ * @dont_manipulate_directly: the 32bit data to represent the whole
+ * possible settings of qbman release descriptor.
+ */
+struct qbman_release_desc {
+ uint32_t dont_manipulate_directly[1];
+};
+
+/**
+ * qbman_release_desc_clear() - Clear the contents of a descriptor to
+ * default/starting state.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_clear(struct qbman_release_desc *d);
+
+/**
+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid);
+
+/**
+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
+ * interrupt source should be asserted after the release command is completed.
+ * @d: the qbman release descriptor.
+ */
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
+
+/**
+ * qbman_swp_release() - Issue a buffer release command.
+ * @s: the software portal object.
+ * @d: the release descriptor.
+ * @buffers: a pointer pointing to the buffer address to be released.
+ * @num_buffers: number of buffers to be released, must be less than 8.
+ *
+ * Return 0 for success, -EBUSY if the release command ring is not ready.
+ */
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers);
+
+/* TODO:
+ * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
+ * @s: the software portal.
+ * @thresh: the threshold.
+ * An RCRI interrupt can be generated when the fill-level of RCR falls below
+ * the 'thresh' value set here. Setting thresh==0 (the default) disables.
+ */
+int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
+
+ /*******************/
+ /* Buffer acquires */
+ /*******************/
+/**
+ * qbman_swp_acquire() - Issue a buffer acquire command.
+ * @s: the software portal object.
+ * @bpid: the buffer pool index.
+ * @buffers: a pointer pointing to the acquired buffer address|es.
+ * @num_buffers: number of buffers to be acquired, must be less than 8.
+ *
+ * Return 0 for success, or negative error code if the acquire command
+ * fails.
+ */
+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+ unsigned int num_buffers);
+
+ /*****************/
+ /* FQ management */
+ /*****************/
+/**
+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be scheduled.
+ *
+ * There are a couple of different ways that a FQ can end up parked state,
+ * This schedules it.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue to be forced.
+ *
+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
+ * and thus be available for selection by any channel-dequeuing behaviour (push
+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still
+ * empty at the time this happens, the resulting dq_entry will have no FD.
+ * (qbman_result_DQ_fd() will return NULL.)
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
+
+/**
+ * These functions change the FQ flow-control stuff between XON/XOFF. (The
+ * default is XON.) This setting doesn't affect enqueues to the FQ, just
+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
+ * changed to XOFF after it had already become truly-scheduled to a channel, and
+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
+ * return NULL.)
+ */
+/**
+ * qbman_swp_fq_xon() - XON the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
+/**
+ * qbman_swp_fq_xoff() - XOFF the frame queue.
+ * @s: the software portal object.
+ * @fqid: the index of frame queue.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
+
+ /**********************/
+ /* Channel management */
+ /**********************/
+
+/**
+ * If the user has been allocated a channel object that is going to generate
+ * CDANs to another channel, then these functions will be necessary.
+ * CDAN-enabled channels only generate a single CDAN notification, after which
+ * it they need to be reenabled before they'll generate another. (The idea is
+ * that pull dequeuing will occur in reaction to the CDAN, followed by a
+ * reenable step.) Each function generates a distinct command to hardware, so a
+ * combination function is provided if the user wishes to modify the "context"
+ * (which shows up in each CDAN message) each time they reenable, as a single
+ * command to hardware.
+ */
+
+/**
+ * qbman_swp_CDAN_set_context() - Set CDAN context
+ * @s: the software portal object.
+ * @channelid: the channel index.
+ * @ctx: the context to be set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+
+/**
+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_disable() - disable CDAN for the channel.
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
+
+/**
+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
+ * @s: the software portal object.
+ * @channelid: the index of the channel to generate CDAN.
+ * @ctx: the context set in CDAN.
+ *
+ * Return 0 for success, or negative error code for failure.
+ */
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx);
+int qbman_swp_fill_ring(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint8_t burst_index);
+int qbman_swp_flush_ring(struct qbman_swp *s);
+void qbman_sync(void);
+int qbman_swp_send_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int frames_to_send);
+
+int qbman_check_command_complete(struct qbman_swp *s,
+ const struct qbman_result *dq);
+
+int qbman_get_version(void);
+#endif /* !_FSL_QBMAN_PORTAL_H */
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
new file mode 100644
index 00000000..5d407cc0
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -0,0 +1,1496 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qbman_portal.h"
+
+/* QBMan portal management command codes */
+#define QBMAN_MC_ACQUIRE 0x30
+#define QBMAN_WQCHAN_CONFIGURE 0x46
+
+/* CINH register offsets */
+#define QBMAN_CINH_SWP_EQCR_PI 0x800
+#define QBMAN_CINH_SWP_EQCR_CI 0x840
+#define QBMAN_CINH_SWP_EQAR 0x8c0
+#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DCAP 0xac0
+#define QBMAN_CINH_SWP_SDQCR 0xb00
+#define QBMAN_CINH_SWP_RAR 0xcc0
+#define QBMAN_CINH_SWP_ISR 0xe00
+#define QBMAN_CINH_SWP_IER 0xe40
+#define QBMAN_CINH_SWP_ISDR 0xe80
+#define QBMAN_CINH_SWP_IIR 0xec0
+
+/* CENA register offsets */
+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6))
+#define QBMAN_CENA_SWP_CR 0x600
+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1))
+#define QBMAN_CENA_SWP_VDQCR 0x780
+#define QBMAN_CENA_SWP_EQCR_CI 0x840
+
+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6)
+
+/* QBMan FQ management command codes */
+#define QBMAN_FQ_SCHEDULE 0x48
+#define QBMAN_FQ_FORCE 0x49
+#define QBMAN_FQ_XON 0x4d
+#define QBMAN_FQ_XOFF 0x4e
+
+/*******************************/
+/* Pre-defined attribute codes */
+/*******************************/
+
+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7);
+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8);
+
+/*************************/
+/* SDQCR attribute codes */
+/*************************/
+
+/* we put these here because at least some of them are required by
+ * qbman_swp_init()
+ */
+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2);
+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1);
+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8);
+static struct qb_attr_code code_eq_dca_idx;
+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1)
+enum qbman_sdqcr_dct {
+ qbman_sdqcr_dct_null = 0,
+ qbman_sdqcr_dct_prio_ics,
+ qbman_sdqcr_dct_active_ics,
+ qbman_sdqcr_dct_active
+};
+
+enum qbman_sdqcr_fc {
+ qbman_sdqcr_fc_one = 0,
+ qbman_sdqcr_fc_up_to_3 = 1
+};
+
+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16);
+
+/* We need to keep track of which SWP triggered a pull command
+ * so keep an array of portal IDs and use the token field to
+ * be able to find the proper portal
+ */
+#define MAX_QBMAN_PORTALS 35
+static struct qbman_swp *portal_idx_map[MAX_QBMAN_PORTALS];
+
+uint32_t qman_version;
+
+/*********************************/
+/* Portal constructor/destructor */
+/*********************************/
+
+/* Software portals should always be in the power-on state when we initialise,
+ * due to the CCSR-based portal reset functionality that MC has.
+ *
+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR
+ * valid-bits, so we need to support a workaround where we don't trust
+ * valid-bits when detecting new entries until any stale ring entries have been
+ * overwritten at least once. The idea is that we read PI for the first few
+ * entries, then switch to valid-bit after that. The trick is to clear the
+ * bug-work-around boolean once the PI wraps around the ring for the first time.
+ *
+ * Note: this still carries a slight additional cost once the decrementer hits
+ * zero.
+ */
+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
+{
+ int ret;
+ uint32_t eqcr_pi;
+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+ p->desc = *d;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit = QB_VALID_BIT;
+ p->sdq = 0;
+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics);
+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3);
+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb);
+ atomic_set(&p->vdq.busy, 1);
+ p->vdq.valid_bit = QB_VALID_BIT;
+ p->dqrr.next_idx = 0;
+ p->dqrr.valid_bit = QB_VALID_BIT;
+ qman_version = p->desc.qman_version;
+ if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ p->dqrr.dqrr_size = 4;
+ p->dqrr.reset_bug = 1;
+ /* Set size of DQRR to 4, encoded in 2 bits */
+ code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 2);
+ } else {
+ p->dqrr.dqrr_size = 8;
+ p->dqrr.reset_bug = 0;
+ /* Set size of DQRR to 8, encoded in 3 bits */
+ code_eq_dca_idx = (struct qb_attr_code)QB_CODE(0, 8, 3);
+ }
+
+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size);
+ if (ret) {
+ kfree(p);
+ pr_err("qbman_swp_sys_init() failed %d\n", ret);
+ return NULL;
+ }
+ /* SDQCR needs to be initialized to 0 when no channels are
+ * being dequeued from or else the QMan HW will indicate an
+ * error. The values that were calculated above will be
+ * applied when dequeues from a specific channel are enabled
+ */
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0);
+ eqcr_pi = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_PI);
+ p->eqcr.pi = eqcr_pi & 0xF;
+ p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
+ p->eqcr.ci = qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_EQCR_CI) & 0xF;
+ p->eqcr.available = QBMAN_EQCR_SIZE - qm_cyc_diff(QBMAN_EQCR_SIZE,
+ p->eqcr.ci, p->eqcr.pi);
+
+ portal_idx_map[p->desc.idx] = p;
+ return p;
+}
+
+void qbman_swp_finish(struct qbman_swp *p)
+{
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ qbman_swp_sys_finish(&p->sys);
+ portal_idx_map[p->desc.idx] = NULL;
+ kfree(p);
+}
+
+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p)
+{
+ return &p->desc;
+}
+
+/**************/
+/* Interrupts */
+/**************/
+
+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR);
+}
+
+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask);
+}
+
+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR);
+}
+
+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask);
+}
+
+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER);
+}
+
+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask);
+}
+
+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
+{
+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR);
+}
+
+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
+{
+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
+}
+
+/***********************/
+/* Management commands */
+/***********************/
+
+/*
+ * Internal code common to all types of management commands.
+ */
+
+void *qbman_swp_mc_start(struct qbman_swp *p)
+{
+ void *ret;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_start);
+#endif
+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR);
+#ifdef QBMAN_CHECKING
+ if (!ret)
+ p->mc.check = swp_mc_can_submit;
+#endif
+ return ret;
+}
+
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb)
+{
+ uint32_t *v = cmd;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(!(p->mc.check != swp_mc_can_submit));
+#endif
+ /* TBD: "|=" is going to hurt performance. Need to move as many fields
+ * out of word zero, and for those that remain, the "OR" needs to occur
+ * at the caller side. This debug check helps to catch cases where the
+ * caller wants to OR but has forgotten to do so.
+ */
+ QBMAN_BUG_ON((*v & cmd_verb) != *v);
+ *v = cmd_verb | p->mc.valid_bit;
+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd);
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_poll;
+#endif
+}
+
+void *qbman_swp_mc_result(struct qbman_swp *p)
+{
+ uint32_t *ret, verb;
+#ifdef QBMAN_CHECKING
+ QBMAN_BUG_ON(p->mc.check != swp_mc_can_poll);
+#endif
+ qbman_cena_invalidate_prefetch(&p->sys,
+ QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
+ /* Remove the valid-bit - command completed iff the rest is non-zero */
+ verb = ret[0] & ~QB_VALID_BIT;
+ if (!verb)
+ return NULL;
+#ifdef QBMAN_CHECKING
+ p->mc.check = swp_mc_can_start;
+#endif
+ p->mc.valid_bit ^= QB_VALID_BIT;
+ return ret;
+}
+
+/***********/
+/* Enqueue */
+/***********/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2);
+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1);
+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1);
+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1);
+/* Can't set code_eq_dca_idx width. Need qman version. Read at runtime */
+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1);
+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1);
+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1);
+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14);
+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16);
+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24);
+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */
+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1);
+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16);
+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4);
+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1);
+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8);
+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32);
+
+enum qbman_eq_cmd_e {
+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */
+ qbman_eq_cmd_empty,
+ /* DMA an enqueue response once complete */
+ qbman_eq_cmd_respond,
+ /* DMA an enqueue response only if the enqueue fails */
+ qbman_eq_cmd_respond_reject
+};
+
+void qbman_eq_desc_clear(struct qbman_eq_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_orp_en, cl, 0);
+ qb_attr_code_encode(&code_eq_cmd, cl,
+ respond_success ? qbman_eq_cmd_respond :
+ qbman_eq_cmd_respond_reject);
+}
+
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ uint32_t opr_id, uint32_t seqnum, int incomplete)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
+ qb_attr_code_encode(&code_eq_cmd, cl,
+ respond_success ? qbman_eq_cmd_respond :
+ qbman_eq_cmd_respond_reject);
+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete);
+}
+
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id,
+ uint32_t seqnum)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0);
+}
+
+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id,
+ uint32_t seqnum)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_orp_en, cl, 1);
+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty);
+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id);
+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum);
+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0);
+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1);
+}
+
+void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys);
+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash);
+}
+
+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token);
+}
+
+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_qd_en, cl, 0);
+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid);
+}
+
+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
+ uint32_t qd_bin, uint32_t qd_prio)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_qd_en, cl, 1);
+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid);
+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin);
+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio);
+}
+
+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable);
+}
+
+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
+ uint32_t dqrr_idx, int park)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable);
+ if (enable) {
+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park);
+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx);
+ }
+}
+
+#define EQAR_IDX(eqar) ((eqar) & 0x7)
+#define EQAR_VB(eqar) ((eqar) & 0x80)
+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
+static int qbman_swp_enqueue_array_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR);
+
+ pr_debug("EQAR=%08x\n", eqar);
+ if (!EQAR_SUCCESS(eqar))
+ return -EBUSY;
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ word_copy(&p[1], &cl[1], 7);
+ word_copy(&p[8], fd, sizeof(*fd) >> 2);
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | EQAR_VB(eqar);
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
+ return 0;
+}
+
+static int qbman_swp_enqueue_ring_mode(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci;
+ uint8_t diff;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return -EBUSY;
+ }
+
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ word_copy(&p[1], &cl[1], 7);
+ word_copy(&p[8], fd, sizeof(*fd) >> 2);
+ lwsync();
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & 7));
+ s->eqcr.pi++;
+ s->eqcr.pi &= 0xF;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ return 0;
+}
+
+int qbman_swp_fill_ring(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ __attribute__((unused)) uint8_t burst_index)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci;
+ uint8_t diff;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ s->eqcr.available += diff;
+ if (!diff)
+ return -EBUSY;
+ }
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR((s->eqcr.pi/* +burst_index */) & 7));
+ /* word_copy(&p[1], &cl[1], 7); */
+ memcpy(&p[1], &cl[1], 7 * 4);
+ /* word_copy(&p[8], fd, sizeof(*fd) >> 2); */
+ memcpy(&p[8], fd, sizeof(struct qbman_fd));
+
+ /* lwsync(); */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+
+ s->eqcr.pi++;
+ s->eqcr.pi &= 0xF;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+
+ return 0;
+}
+
+int qbman_swp_flush_ring(struct qbman_swp *s)
+{
+ void *ptr = s->sys.addr_cena;
+
+ dcbf((uint64_t)ptr);
+ dcbf((uint64_t)ptr + 0x40);
+ dcbf((uint64_t)ptr + 0x80);
+ dcbf((uint64_t)ptr + 0xc0);
+ dcbf((uint64_t)ptr + 0x100);
+ dcbf((uint64_t)ptr + 0x140);
+ dcbf((uint64_t)ptr + 0x180);
+ dcbf((uint64_t)ptr + 0x1c0);
+
+ return 0;
+}
+
+void qbman_sync(void)
+{
+ lwsync();
+}
+
+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ if (s->sys.eqcr_mode == qman_eqcr_vb_array)
+ return qbman_swp_enqueue_array_mode(s, d, fd);
+ else /* Use ring mode by default */
+ return qbman_swp_enqueue_ring_mode(s, d, fd);
+}
+
+/*************************/
+/* Static (push) dequeue */
+/*************************/
+
+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled)
+{
+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq);
+}
+
+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable)
+{
+ uint16_t dqsrc;
+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx);
+
+ QBMAN_BUG_ON(channel_idx > 15);
+ qb_attr_code_encode(&code, &s->sdq, !!enable);
+ /* Read make the complete src map. If no channels are enabled
+ * the SDQCR must be 0 or else QMan will assert errors
+ */
+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq);
+ if (dqsrc != 0)
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq);
+ else
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0);
+}
+
+/***************************/
+/* Volatile (pull) dequeue */
+/***************************/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2);
+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2);
+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1);
+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1);
+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4);
+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8);
+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24);
+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32);
+
+enum qb_pull_dt_e {
+ qb_pull_dt_channel,
+ qb_pull_dt_workqueue,
+ qb_pull_dt_framequeue
+};
+
+void qbman_pull_desc_clear(struct qbman_pull_desc *d)
+{
+ memset(d, 0, sizeof(*d));
+}
+
+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
+ struct qbman_result *storage,
+ dma_addr_t storage_phys,
+ int stash)
+{
+ uint32_t *cl = qb_cl(d);
+ /* Squiggle the pointer 'storage' into the extra 2 words of the
+ * descriptor (which aren't copied to the hw command)
+ */
+ *(void **)&cl[4] = storage;
+ if (!storage) {
+ qb_attr_code_encode(&code_pull_rls, cl, 0);
+ return;
+ }
+ qb_attr_code_encode(&code_pull_rls, cl, 1);
+ qb_attr_code_encode(&code_pull_stash, cl, !!stash);
+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys);
+}
+
+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes)
+{
+ uint32_t *cl = qb_cl(d);
+
+ QBMAN_BUG_ON(!numframes || (numframes > 16));
+ qb_attr_code_encode(&code_pull_numframes, cl,
+ (uint32_t)(numframes - 1));
+}
+
+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_token, cl, token);
+}
+
+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_dct, cl, 1);
+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue);
+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid);
+}
+
+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
+ enum qbman_pull_type_e dct)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_dct, cl, dct);
+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue);
+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid);
+}
+
+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
+ enum qbman_pull_type_e dct)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_pull_dct, cl, dct);
+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel);
+ qb_attr_code_encode(&code_pull_dqsource, cl, chid);
+}
+
+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
+{
+ uint32_t *p;
+ uint32_t *cl = qb_cl(d);
+
+ if (!atomic_dec_and_test(&s->vdq.busy)) {
+ atomic_inc(&s->vdq.busy);
+ return -EBUSY;
+ }
+ s->vdq.storage = *(void **)&cl[4];
+ /* We use portal index +1 as token so that 0 still indicates
+ * that the result isn't valid yet.
+ */
+ qb_attr_code_encode(&code_pull_token, cl, s->desc.idx + 1);
+ p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+ word_copy(&p[1], &cl[1], 3);
+ /* Set the verb byte, have to substitute in the valid-bit */
+ lwsync();
+ p[0] = cl[0] | s->vdq.valid_bit;
+ s->vdq.valid_bit ^= QB_VALID_BIT;
+ qbman_cena_write_complete_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
+ return 0;
+}
+
+/****************/
+/* Polling DQRR */
+/****************/
+
+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8);
+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7);
+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8);
+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14);
+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16);
+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */
+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24);
+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32);
+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24);
+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32);
+
+#define QBMAN_RESULT_DQ 0x60
+#define QBMAN_RESULT_FQRN 0x21
+#define QBMAN_RESULT_FQRNI 0x22
+#define QBMAN_RESULT_FQPN 0x24
+#define QBMAN_RESULT_FQDAN 0x25
+#define QBMAN_RESULT_CDAN 0x26
+#define QBMAN_RESULT_CSCN_MEM 0x27
+#define QBMAN_RESULT_CGCU 0x28
+#define QBMAN_RESULT_BPSCN 0x29
+#define QBMAN_RESULT_CSCN_WQ 0x2a
+
+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4);
+
+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
+ * only once, so repeated calls can return a sequence of DQRR entries, without
+ * requiring they be consumed immediately or in any particular order.
+ */
+const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *s)
+{
+ uint32_t verb;
+ uint32_t response_verb;
+ uint32_t flags;
+ const struct qbman_result *dq;
+ const uint32_t *p;
+
+ /* Before using valid-bit to detect if something is there, we have to
+ * handle the case of the DQRR reset bug...
+ */
+ if (unlikely(s->dqrr.reset_bug)) {
+ /* We pick up new entries by cache-inhibited producer index,
+ * which means that a non-coherent mapping would require us to
+ * invalidate and read *only* once that PI has indicated that
+ * there's an entry here. The first trip around the DQRR ring
+ * will be much less efficient than all subsequent trips around
+ * it...
+ */
+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI);
+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi);
+ /* there are new entries iff pi != next_idx */
+ if (pi == s->dqrr.next_idx)
+ return NULL;
+ /* if next_idx is/was the last ring index, and 'pi' is
+ * different, we can disable the workaround as all the ring
+ * entries have now been DMA'd to so valid-bit checking is
+ * repaired. Note: this logic needs to be based on next_idx
+ * (which increments one at a time), rather than on pi (which
+ * can burst and wrap-around between our snapshots of it).
+ */
+ QBMAN_BUG_ON((s->dqrr.dqrr_size - 1) < 0);
+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1u)) {
+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n",
+ s->dqrr.next_idx, pi);
+ s->dqrr.reset_bug = 0;
+ }
+ qbman_cena_invalidate_prefetch(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ }
+ dq = qbman_cena_read_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
+ p = qb_cl(dq);
+ verb = qb_attr_code_decode(&code_dqrr_verb, p);
+ /* If the valid-bit isn't of the expected polarity, nothing there. Note,
+ * in the DQRR reset bug workaround, we shouldn't need to skip these
+ * check, because we've already determined that a new entry is available
+ * and we've invalidated the cacheline before reading it, so the
+ * valid-bit behaviour is repaired and should tell us what we already
+ * knew from reading PI.
+ */
+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit)
+ return NULL;
+
+ /* There's something there. Move "next_idx" attention to the next ring
+ * entry (and prefetch it) before returning what we found.
+ */
+ s->dqrr.next_idx++;
+ if (s->dqrr.next_idx == s->dqrr.dqrr_size) {
+ s->dqrr.next_idx = 0;
+ s->dqrr.valid_bit ^= QB_VALID_BIT;
+ }
+ /* If this is the final response to a volatile dequeue command
+ * indicate that the vdq is no longer busy.
+ */
+ flags = qbman_result_DQ_flags(dq);
+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb);
+ if ((response_verb == QBMAN_RESULT_DQ) &&
+ (flags & QBMAN_DQ_STAT_VOLATILE) &&
+ (flags & QBMAN_DQ_STAT_EXPIRED))
+ atomic_inc(&s->vdq.busy);
+
+ return dq;
+}
+
+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
+void qbman_swp_dqrr_consume(struct qbman_swp *s,
+ const struct qbman_result *dq)
+{
+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
+}
+
+/*********************************/
+/* Polling user-provided storage */
+/*********************************/
+
+int qbman_result_has_new_result(__attribute__((unused)) struct qbman_swp *s,
+ const struct qbman_result *dq)
+{
+ /* To avoid converting the little-endian DQ entry to host-endian prior
+ * to us knowing whether there is a valid entry or not (and run the
+ * risk of corrupting the incoming hardware LE write), we detect in
+ * hardware endianness rather than host. This means we need a different
+ * "code" depending on whether we are BE or LE in software, which is
+ * where DQRR_TOK_OFFSET comes in...
+ */
+ static struct qb_attr_code code_dqrr_tok_detect =
+ QB_CODE(0, DQRR_TOK_OFFSET, 8);
+ /* The user trying to poll for a result treats "dq" as const. It is
+ * however the same address that was provided to us non-const in the
+ * first place, for directing hardware DMA to. So we can cast away the
+ * const because it is mutable from our perspective.
+ */
+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
+ uint32_t token;
+
+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
+ if (token == 0)
+ return 0;
+ /* Entry is valid - overwrite token back to 0 so
+ * a) If this memory is reused tokesn will be 0
+ * b) If someone calls "has_new_result()" again on this entry it
+ * will not appear to be new
+ */
+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0);
+
+ /* Only now do we convert from hardware to host endianness. Also, as we
+ * are returning success, the user has promised not to call us again, so
+ * there's no risk of us converting the endianness twice...
+ */
+ make_le32_n(p, 16);
+ return 1;
+}
+
+int qbman_check_command_complete(struct qbman_swp *s,
+ const struct qbman_result *dq)
+{
+ /* To avoid converting the little-endian DQ entry to host-endian prior
+ * to us knowing whether there is a valid entry or not (and run the
+ * risk of corrupting the incoming hardware LE write), we detect in
+ * hardware endianness rather than host. This means we need a different
+ * "code" depending on whether we are BE or LE in software, which is
+ * where DQRR_TOK_OFFSET comes in...
+ */
+ static struct qb_attr_code code_dqrr_tok_detect =
+ QB_CODE(0, DQRR_TOK_OFFSET, 8);
+ /* The user trying to poll for a result treats "dq" as const. It is
+ * however the same address that was provided to us non-const in the
+ * first place, for directing hardware DMA to. So we can cast away the
+ * const because it is mutable from our perspective.
+ */
+ uint32_t *p = (uint32_t *)(unsigned long)qb_cl(dq);
+ uint32_t token;
+
+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]);
+ if (token == 0)
+ return 0;
+ /* TODO: Remove qbman_swp from parameters and make it a local
+ * once we've tested the reserve portal map change
+ */
+ s = portal_idx_map[token - 1];
+ /* When token is set it indicates that VDQ command has been fetched
+ * by qbman and is working on it. It is safe for software to issue
+ * another VDQ command, so incrementing the busy variable.
+ */
+ if (s->vdq.storage == dq) {
+ s->vdq.storage = NULL;
+ atomic_inc(&s->vdq.busy);
+ }
+ return 1;
+}
+
+/********************************/
+/* Categorising qbman results */
+/********************************/
+
+static struct qb_attr_code code_result_in_mem =
+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7);
+
+static inline int __qbman_result_is_x(const struct qbman_result *dq,
+ uint32_t x)
+{
+ const uint32_t *p = qb_cl(dq);
+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p);
+
+ return (response_verb == x);
+}
+
+static inline int __qbman_result_is_x_in_mem(const struct qbman_result *dq,
+ uint32_t x)
+{
+ const uint32_t *p = qb_cl(dq);
+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p);
+
+ return (response_verb == x);
+}
+
+int qbman_result_is_DQ(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ);
+}
+
+int qbman_result_is_FQDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN);
+}
+
+int qbman_result_is_CDAN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN);
+}
+
+int qbman_result_is_CSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) ||
+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ);
+}
+
+int qbman_result_is_BPSCN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN);
+}
+
+int qbman_result_is_CGCU(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU);
+}
+
+int qbman_result_is_FQRN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN);
+}
+
+int qbman_result_is_FQRNI(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI);
+}
+
+int qbman_result_is_FQPN(const struct qbman_result *dq)
+{
+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN);
+}
+
+/*********************************/
+/* Parsing frame dequeue results */
+/*********************************/
+
+/* These APIs assume qbman_result_is_DQ() is TRUE */
+
+uint32_t qbman_result_DQ_flags(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return qb_attr_code_decode(&code_dqrr_stat, p);
+}
+
+uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p);
+}
+
+uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p);
+}
+
+uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return qb_attr_code_decode(&code_dqrr_fqid, p);
+}
+
+uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return qb_attr_code_decode(&code_dqrr_byte_count, p);
+}
+
+uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return qb_attr_code_decode(&code_dqrr_frame_count, p);
+}
+
+uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq)
+{
+ const uint64_t *p = (const uint64_t *)qb_cl(dq);
+
+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p);
+}
+
+const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq)
+{
+ const uint32_t *p = qb_cl(dq);
+
+ return (const struct qbman_fd *)&p[8];
+}
+
+/**************************************/
+/* Parsing state-change notifications */
+/**************************************/
+
+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8);
+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24);
+static struct qb_attr_code code_scn_state_in_mem =
+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8);
+static struct qb_attr_code code_scn_rid_in_mem =
+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24);
+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32);
+
+uint8_t qbman_result_SCN_state(const struct qbman_result *scn)
+{
+ const uint32_t *p = qb_cl(scn);
+
+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p);
+}
+
+uint32_t qbman_result_SCN_rid(const struct qbman_result *scn)
+{
+ const uint32_t *p = qb_cl(scn);
+
+ return qb_attr_code_decode(&code_scn_rid, p);
+}
+
+uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn)
+{
+ const uint64_t *p = (const uint64_t *)qb_cl(scn);
+
+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p);
+}
+
+uint8_t qbman_result_SCN_state_in_mem(const struct qbman_result *scn)
+{
+ const uint32_t *p = qb_cl(scn);
+
+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p);
+}
+
+uint32_t qbman_result_SCN_rid_in_mem(const struct qbman_result *scn)
+{
+ const uint32_t *p = qb_cl(scn);
+ uint32_t result_rid;
+
+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p);
+ return make_le24(result_rid);
+}
+
+/*****************/
+/* Parsing BPSCN */
+/*****************/
+uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF;
+}
+
+int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn)
+{
+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1);
+}
+
+int qbman_result_bpscn_is_depleted(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2);
+}
+
+int qbman_result_bpscn_is_surplus(const struct qbman_result *scn)
+{
+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4);
+}
+
+uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn)
+{
+ uint64_t ctx;
+ uint32_t ctx_hi, ctx_lo;
+
+ ctx = qbman_result_SCN_ctx(scn);
+ ctx_hi = upper32(ctx);
+ ctx_lo = lower32(ctx);
+ return ((uint64_t)make_le32(ctx_hi) << 32 |
+ (uint64_t)make_le32(ctx_lo));
+}
+
+/*****************/
+/* Parsing CGCU */
+/*****************/
+uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn)
+{
+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF;
+}
+
+uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn)
+{
+ uint64_t ctx;
+ uint32_t ctx_hi, ctx_lo;
+
+ ctx = qbman_result_SCN_ctx(scn);
+ ctx_hi = upper32(ctx);
+ ctx_lo = lower32(ctx);
+ return ((uint64_t)(make_le32(ctx_hi) & 0xFF) << 32) |
+ (uint64_t)make_le32(ctx_lo);
+}
+
+/******************/
+/* Buffer release */
+/******************/
+
+/* These should be const, eventually */
+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */
+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1);
+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1);
+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16);
+
+void qbman_release_desc_clear(struct qbman_release_desc *d)
+{
+ uint32_t *cl;
+
+ memset(d, 0, sizeof(*d));
+ cl = qb_cl(d);
+ qb_attr_code_encode(&code_release_set_me, cl, 1);
+}
+
+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_release_bpid, cl, bpid);
+}
+
+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
+{
+ uint32_t *cl = qb_cl(d);
+
+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable);
+}
+
+#define RAR_IDX(rar) ((rar) & 0x7)
+#define RAR_VB(rar) ((rar) & 0x80)
+#define RAR_SUCCESS(rar) ((rar) & 0x100)
+
+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
+ const uint64_t *buffers, unsigned int num_buffers)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR);
+
+ pr_debug("RAR=%08x\n", rar);
+ if (!RAR_SUCCESS(rar))
+ return -EBUSY;
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+ /* Start the release command */
+ p = qbman_cena_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ /* Copy the caller's buffer pointers to the command */
+ u64_to_le32_copy(&p[2], buffers, num_buffers);
+ /* Set the verb byte, have to substitute in the valid-bit and the number
+ * of buffers.
+ */
+ lwsync();
+ p[0] = cl[0] | RAR_VB(rar) | num_buffers;
+ qbman_cena_write_complete_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
+ return 0;
+}
+
+/*******************/
+/* Buffer acquires */
+/*******************/
+
+/* These should be const, eventually */
+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16);
+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3);
+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3);
+
+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers,
+ unsigned int num_buffers)
+{
+ uint32_t *p;
+ uint32_t rslt, num;
+
+ QBMAN_BUG_ON(!num_buffers || (num_buffers > 7));
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ qb_attr_code_encode(&code_acquire_bpid, p, bpid);
+ qb_attr_code_encode(&code_acquire_num, p, num_buffers);
+
+ /* Complete the management command */
+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE);
+
+ /* Decode the outcome */
+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
+ num = qb_attr_code_decode(&code_acquire_r_num, p);
+ QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) !=
+ QBMAN_MC_ACQUIRE);
+
+ /* Determine success or failure */
+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n",
+ bpid, rslt);
+ return -EIO;
+ }
+ QBMAN_BUG_ON(num > num_buffers);
+ /* Copy the acquired buffers to the caller's array */
+ u64_from_le32_copy(buffers, &p[2], num);
+ return (int)num;
+}
+
+/*****************/
+/* FQ management */
+/*****************/
+
+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32);
+
+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid,
+ uint8_t alt_fq_verb)
+{
+ uint32_t *p;
+ uint32_t rslt;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid);
+ /* Complete the management command */
+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb);
+
+ /* Decode the outcome */
+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
+ QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p) != alt_fq_verb);
+
+ /* Determine success or failure */
+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n",
+ fqid, alt_fq_verb, rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
+}
+
+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
+}
+
+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
+}
+
+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid)
+{
+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
+}
+
+/**********************/
+/* Channel management */
+/**********************/
+
+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12);
+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8);
+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1);
+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32);
+
+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it
+ * would be irresponsible to expose it.
+ */
+#define CODE_CDAN_WE_EN 0x1
+#define CODE_CDAN_WE_CTX 0x4
+
+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid,
+ uint8_t we_mask, uint8_t cdan_en,
+ uint64_t ctx)
+{
+ uint32_t *p;
+ uint32_t rslt;
+
+ /* Start the management command */
+ p = qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* Encode the caller-provided attributes */
+ qb_attr_code_encode(&code_cdan_cid, p, channelid);
+ qb_attr_code_encode(&code_cdan_we, p, we_mask);
+ qb_attr_code_encode(&code_cdan_en, p, cdan_en);
+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx);
+ /* Complete the management command */
+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE);
+
+ /* Decode the outcome */
+ rslt = qb_attr_code_decode(&code_generic_rslt, p);
+ QBMAN_BUG_ON(qb_attr_code_decode(&code_generic_verb, p)
+ != QBMAN_WQCHAN_CONFIGURE);
+
+ /* Determine success or failure */
+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) {
+ pr_err("CDAN cQID %d failed: code = 0x%02x\n",
+ channelid, rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_CTX,
+ 0, ctx);
+}
+
+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 1, 0);
+}
+
+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN,
+ 0, 0);
+}
+
+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
+ uint64_t ctx)
+{
+ return qbman_swp_CDAN_set(s, channelid,
+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
+ 1, ctx);
+}
+
+uint8_t qbman_get_dqrr_idx(struct qbman_result *dqrr)
+{
+ return QBMAN_IDX_FROM_DQRR(dqrr);
+}
+
+struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx)
+{
+ struct qbman_result *dq;
+
+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(idx));
+ return dq;
+}
+
+int qbman_swp_send_multiple(struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int frames_to_send)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci;
+ uint8_t diff;
+ int sent = 0;
+ int i;
+ int initial_pi = s->eqcr.pi;
+ uint64_t start_pointer;
+
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cena_read_reg(&s->sys,
+ QBMAN_CENA_SWP_EQCR_CI) & 0xF;
+ diff = qm_cyc_diff(QBMAN_EQCR_SIZE,
+ eqcr_ci, s->eqcr.ci);
+ if (!diff)
+ goto done;
+ s->eqcr.available += diff;
+ }
+
+ /* we are trying to send frames_to_send,
+ * if we have enough space in the ring
+ */
+ while (s->eqcr.available && frames_to_send--) {
+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
+ /* Write command (except of first byte) and FD */
+ memcpy(&p[1], &cl[1], 7 * 4);
+ memcpy(&p[8], &fd[sent], sizeof(struct qbman_fd));
+
+ initial_pi++;
+ initial_pi &= 0xF;
+ s->eqcr.available--;
+ sent++;
+ }
+
+done:
+ initial_pi = s->eqcr.pi;
+ lwsync();
+
+ /* in order for flushes to complete faster:
+ * we use a following trick: we record all lines in 32 bit word
+ */
+
+ initial_pi = s->eqcr.pi;
+ for (i = 0; i < sent; i++) {
+ p = qbman_cena_write_start_wo_shadow_fast(&s->sys,
+ QBMAN_CENA_SWP_EQCR((initial_pi) & 7));
+
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ initial_pi++;
+ initial_pi &= 0xF;
+
+ if (!(initial_pi & 7))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ initial_pi = s->eqcr.pi;
+
+ /* We need to flush all the lines but without
+ * load/store operations between them.
+ * We assign start_pointer before we start loop so that
+ * in loop we do not read it from memory
+ */
+ start_pointer = (uint64_t)s->sys.addr_cena;
+ for (i = 0; i < sent; i++) {
+ p = (uint32_t *)(start_pointer
+ + QBMAN_CENA_SWP_EQCR(initial_pi & 7));
+ dcbf((uint64_t)p);
+ initial_pi++;
+ initial_pi &= 0xF;
+ }
+
+ /* Update producer index for the next call */
+ s->eqcr.pi = initial_pi;
+
+ return sent;
+}
+
+int qbman_get_version(void)
+{
+ return qman_version;
+}
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
new file mode 100644
index 00000000..7aa1d4f6
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -0,0 +1,277 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qbman_private.h"
+#include <fsl_qbman_portal.h>
+
+/* All QBMan command and result structures use this "valid bit" encoding */
+#define QB_VALID_BIT ((uint32_t)0x80)
+
+/* Management command result codes */
+#define QBMAN_MC_RSLT_OK 0xf0
+
+/* QBMan DQRR size is set at runtime in qbman_portal.c */
+
+#define QBMAN_EQCR_SIZE 8
+
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return (2 * ringsize) + last - first;
+}
+
+/* --------------------- */
+/* portal data structure */
+/* --------------------- */
+
+struct qbman_swp {
+ struct qbman_swp_desc desc;
+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it
+ * needs in here.
+ */
+ struct qbman_swp_sys sys;
+ /* Management commands */
+ struct {
+#ifdef QBMAN_CHECKING
+ enum swp_mc_check {
+ swp_mc_can_start, /* call __qbman_swp_mc_start() */
+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */
+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */
+ } check;
+#endif
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ } mc;
+ /* Push dequeues */
+ uint32_t sdq;
+ /* Volatile dequeues */
+ struct {
+ /* VDQCR supports a "1 deep pipeline", meaning that if you know
+ * the last-submitted command is already executing in the
+ * hardware (as evidenced by at least 1 valid dequeue result),
+ * you can write another dequeue command to the register, the
+ * hardware will start executing it as soon as the
+ * already-executing command terminates. (This minimises latency
+ * and stalls.) With that in mind, this "busy" variable refers
+ * to whether or not a command can be submitted, not whether or
+ * not a previously-submitted command is still executing. In
+ * other words, once proof is seen that the previously-submitted
+ * command is executing, "vdq" is no longer "busy".
+ */
+ atomic_t busy;
+ uint32_t valid_bit; /* 0x00 or 0x80 */
+ /* We need to determine when vdq is no longer busy. This depends
+ * on whether the "busy" (last-submitted) dequeue command is
+ * targeting DQRR or main-memory, and detected is based on the
+ * presence of the dequeue command's "token" showing up in
+ * dequeue entries in DQRR or main-memory (respectively).
+ */
+ struct qbman_result *storage; /* NULL if DQRR */
+ } vdq;
+ /* DQRR */
+ struct {
+ uint32_t next_idx;
+ uint32_t valid_bit;
+ uint8_t dqrr_size;
+ int reset_bug;
+ } dqrr;
+ struct {
+ uint32_t pi;
+ uint32_t pi_vb;
+ uint32_t ci;
+ int available;
+ } eqcr;
+};
+
+/* -------------------------- */
+/* portal management commands */
+/* -------------------------- */
+
+/* Different management commands all use this common base layer of code to issue
+ * commands and poll for results. The first function returns a pointer to where
+ * the caller should fill in their MC command (though they should ignore the
+ * verb byte), the second function commits merges in the caller-supplied command
+ * verb (which should not include the valid-bit) and submits the command to
+ * hardware, and the third function checks for a completed response (returns
+ * non-NULL if only if the response is complete).
+ */
+void *qbman_swp_mc_start(struct qbman_swp *p);
+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb);
+void *qbman_swp_mc_result(struct qbman_swp *p);
+
+/* Wraps up submit + poll-for-result */
+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
+ uint32_t cmd_verb)
+{
+ int loopvar;
+
+ qbman_swp_mc_submit(swp, cmd, cmd_verb);
+ DBG_POLL_START(loopvar);
+ do {
+ DBG_POLL_CHECK(loopvar);
+ cmd = qbman_swp_mc_result(swp);
+ } while (!cmd);
+ return cmd;
+}
+
+/* ------------ */
+/* qb_attr_code */
+/* ------------ */
+
+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which
+ * is either serving as a configuration command or a query result. The
+ * representation is inherently little-endian, as the indexing of the words is
+ * itself little-endian in nature and DPAA2 QBMan is little endian for anything
+ * that crosses a word boundary too (64-bit fields are the obvious examples).
+ */
+struct qb_attr_code {
+ unsigned int word; /* which uint32_t[] array member encodes the field */
+ unsigned int lsoffset; /* encoding offset from ls-bit */
+ unsigned int width; /* encoding width. (bool must be 1.) */
+};
+
+/* Some pre-defined codes */
+extern struct qb_attr_code code_generic_verb;
+extern struct qb_attr_code code_generic_rslt;
+
+/* Macros to define codes */
+#define QB_CODE(a, b, c) { a, b, c}
+#define QB_CODE_NULL \
+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1)
+
+/* Rotate a code "ms", meaning that it moves from less-significant bytes to
+ * more-significant, from less-significant words to more-significant, etc. The
+ * "ls" version does the inverse, from more-significant towards
+ * less-significant.
+ */
+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code,
+ unsigned int bits)
+{
+ code->lsoffset += bits;
+ while (code->lsoffset > 31) {
+ code->word++;
+ code->lsoffset -= 32;
+ }
+}
+
+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code,
+ unsigned int bits)
+{
+ /* Don't be fooled, this trick should work because the types are
+ * unsigned. So the case that interests the while loop (the rotate has
+ * gone too far and the word count needs to compensate for it), is
+ * manifested when lsoffset is negative. But that equates to a really
+ * large unsigned value, starting with lots of "F"s. As such, we can
+ * continue adding 32 back to it until it wraps back round above zero,
+ * to a value of 31 or less...
+ */
+ code->lsoffset -= bits;
+ while (code->lsoffset > 31) {
+ code->word--;
+ code->lsoffset += 32;
+ }
+}
+
+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */
+#define qb_attr_code_for_ms(code, bits, expr) \
+ for (; expr; qb_attr_code_rotate_ms(code, bits))
+#define qb_attr_code_for_ls(code, bits, expr) \
+ for (; expr; qb_attr_code_rotate_ls(code, bits))
+
+/* decode a field from a cacheline */
+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code,
+ const uint32_t *cacheline)
+{
+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]);
+}
+
+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code,
+ const uint64_t *cacheline)
+{
+ return cacheline[code->word / 2];
+}
+
+/* encode a field to a cacheline */
+static inline void qb_attr_code_encode(const struct qb_attr_code *code,
+ uint32_t *cacheline, uint32_t val)
+{
+ cacheline[code->word] =
+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word])
+ | e32_uint32_t(code->lsoffset, code->width, val);
+}
+
+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code,
+ uint64_t *cacheline, uint64_t val)
+{
+ cacheline[code->word / 2] = val;
+}
+
+/* Small-width signed values (two's-complement) will decode into medium-width
+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to
+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value
+ * 249. Likewise -120 would decode as 136.) This function allows the caller to
+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit
+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t).
+ */
+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code,
+ uint32_t val)
+{
+ QBMAN_BUG_ON(val >= (1u << code->width));
+ /* code->width should never exceed the width of val. If it does then a
+ * different function with larger val size must be used to translate
+ * from unsigned to signed
+ */
+ QBMAN_BUG_ON(code->width > sizeof(val) * CHAR_BIT);
+ /* If the high bit was set, it was encoding a negative */
+ if (val >= 1u << (code->width - 1))
+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) -
+ val);
+ /* Otherwise, it was encoding a positive */
+ return (int32_t)val;
+}
+
+/* ---------------------- */
+/* Descriptors/cachelines */
+/* ---------------------- */
+
+/* To avoid needless dynamic allocation, the driver API often gives the caller
+ * a "descriptor" type that the caller can instantiate however they like.
+ * Ultimately though, it is just a cacheline of binary storage (or something
+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for
+ * holding pre-formatted pieces of hardware commands. The performance-critical
+ * code can then copy these descriptors directly into hardware command
+ * registers more efficiently than trying to construct/format commands
+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in
+ * order for the compiler to know its size, but the internal details are not
+ * exposed. The following macro is used within the driver for converting *any*
+ * descriptor pointer to a usable array pointer. The use of a macro (instead of
+ * an inline) is necessary to work with different descriptor types and to work
+ * correctly with const and non-const inputs (and similarly-qualified outputs).
+ */
+#define qb_cl(d) (&(d)->dont_manipulate_directly[0])
diff --git a/drivers/bus/fslmc/qbman/qbman_private.h b/drivers/bus/fslmc/qbman/qbman_private.h
new file mode 100644
index 00000000..292ec6a9
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_private.h
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* Perform extra checking */
+#define QBMAN_CHECKING
+
+/* To maximise the amount of logic that is common between the Linux driver and
+ * other targets (such as the embedded MC firmware), we pivot here between the
+ * inclusion of two platform-specific headers.
+ *
+ * The first, qbman_sys_decl.h, includes any and all required system headers as
+ * well as providing any definitions for the purposes of compatibility. The
+ * second, qbman_sys.h, is where platform-specific routines go.
+ *
+ * The point of the split is that the platform-independent code (including this
+ * header) may depend on platform-specific declarations, yet other
+ * platform-specific routines may depend on platform-independent definitions.
+ */
+
+#include "qbman_sys_decl.h"
+
+/* When things go wrong, it is a convenient trick to insert a few FOO()
+ * statements in the code to trace progress. TODO: remove this once we are
+ * hacking the code less actively.
+ */
+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__)
+
+/* Any time there is a register interface which we poll on, this provides a
+ * "break after x iterations" scheme for it. It's handy for debugging, eg.
+ * where you don't want millions of lines of log output from a polling loop
+ * that won't, because such things tend to drown out the earlier log output
+ * that might explain what caused the problem. (NB: put ";" after each macro!)
+ * TODO: we should probably remove this once we're done sanitising the
+ * simulator...
+ */
+#define DBG_POLL_START(loopvar) (loopvar = 10)
+#define DBG_POLL_CHECK(loopvar) \
+do { \
+ if (!(loopvar--)) \
+ QBMAN_BUG_ON(NULL == "DBG_POLL_CHECK"); \
+} while (0)
+
+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets
+ * and widths, these macro-generated encode/decode/isolate/remove inlines can
+ * be used.
+ *
+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type),
+ * where the field is located 3 bits "up" from the least-significant bit of the
+ * register (ie. the field location within the 32-bit register corresponds to a
+ * mask of 0x0001fff8), you would do;
+ * uint16_t field = d32_uint16_t(3, 14, reg_value);
+ *
+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE,
+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!"
+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the
+ * LS bit), do;
+ * reg_value |= e32_int(19, 1, !!field);
+ *
+ * If you wish to read-modify-write a register, such that you leave the 14-bit
+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit
+ * value using;
+ * reg_value = i32_uint16_t(3, 14, reg_value);
+ *
+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to
+ * zero) but leaving all other fields as-is;
+ * reg_val = r32_int(19, 1, reg_value);
+ *
+ */
+#ifdef __LP64__
+#define MAKE_MASK32(width) ((uint32_t)(( 1ULL << width) - 1))
+#else
+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \
+ (uint32_t)((1 << width) - 1))
+#endif
+#define DECLARE_CODEC32(t) \
+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \
+{ \
+ QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \
+} \
+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \
+{ \
+ QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \
+} \
+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \
+ uint32_t val) \
+{ \
+ QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \
+} \
+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \
+ uint32_t val) \
+{ \
+ QBMAN_BUG_ON(width > (sizeof(t) * 8)); \
+ return ~(MAKE_MASK32(width) << lsoffset) & val; \
+}
+DECLARE_CODEC32(uint32_t)
+DECLARE_CODEC32(uint16_t)
+DECLARE_CODEC32(uint8_t)
+DECLARE_CODEC32(int)
+
+ /*********************/
+ /* Debugging assists */
+ /*********************/
+
+static inline void __hexdump(unsigned long start, unsigned long end,
+ unsigned long p, size_t sz, const unsigned char *c)
+{
+ while (start < end) {
+ unsigned int pos = 0;
+ char buf[64];
+ int nl = 0;
+
+ pos += sprintf(buf + pos, "%08lx: ", start);
+ do {
+ if ((start < p) || (start >= (p + sz)))
+ pos += sprintf(buf + pos, "..");
+ else
+ pos += sprintf(buf + pos, "%02x", *(c++));
+ if (!(++start & 15)) {
+ buf[pos++] = '\n';
+ nl = 1;
+ } else {
+ nl = 0;
+ if (!(start & 1))
+ buf[pos++] = ' ';
+ if (!(start & 3))
+ buf[pos++] = ' ';
+ }
+ } while (start & 15);
+ if (!nl)
+ buf[pos++] = '\n';
+ buf[pos] = '\0';
+ pr_info("%s", buf);
+ }
+}
+
+static inline void hexdump(const void *ptr, size_t sz)
+{
+ unsigned long p = (unsigned long)ptr;
+ unsigned long start = p & ~(unsigned long)15;
+ unsigned long end = (p + sz + 15) & ~(unsigned long)15;
+ const unsigned char *c = ptr;
+
+ __hexdump(start, end, p, sz, c);
+}
+
+#include "qbman_sys.h"
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
new file mode 100644
index 00000000..5dbcaa57
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -0,0 +1,385 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the
+ * driver. They are only included via qbman_private.h, which is itself a
+ * platform-independent file and is included by all the other driver source.
+ *
+ * qbman_sys_decl.h is included prior to all other declarations and logic, and
+ * it exists to provide compatibility with any linux interfaces our
+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file
+ * provides linux compatibility.
+ *
+ * This qbman_sys.h header, on the other hand, is included *after* any common
+ * and platform-neutral declarations and logic in qbman_private.h, and exists to
+ * implement any platform-specific logic of the qbman driver itself. Ie. it is
+ * *not* to provide linux compatibility.
+ */
+
+/* Trace the 3 different classes of read/write access to QBMan. #undef as
+ * required.
+ */
+#undef QBMAN_CCSR_TRACE
+#undef QBMAN_CINH_TRACE
+#undef QBMAN_CENA_TRACE
+
+static inline void word_copy(void *d, const void *s, unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = s;
+
+ while (cnt--)
+ *(dd++) = *(ss++);
+}
+
+/* Currently, the CENA support code expects each 32-bit word to be written in
+ * host order, and these are converted to hardware (little-endian) order on
+ * command submission. However, 64-bit quantities are must be written (and read)
+ * as two 32-bit words with the least-significant word first, irrespective of
+ * host endianness.
+ */
+static inline void u64_to_le32_copy(void *d, const uint64_t *s,
+ unsigned int cnt)
+{
+ uint32_t *dd = d;
+ const uint32_t *ss = (const uint32_t *)s;
+
+ while (cnt--) {
+ /* TBD: the toolchain was choking on the use of 64-bit types up
+ * until recently so this works entirely with 32-bit variables.
+ * When 64-bit types become usable again, investigate better
+ * ways of doing this.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ *(dd++) = ss[1];
+ *(dd++) = ss[0];
+ ss += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+static inline void u64_from_le32_copy(uint64_t *d, const void *s,
+ unsigned int cnt)
+{
+ const uint32_t *ss = s;
+ uint32_t *dd = (uint32_t *)d;
+
+ while (cnt--) {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ dd[1] = *(ss++);
+ dd[0] = *(ss++);
+ dd += 2;
+#else
+ *(dd++) = *(ss++);
+ *(dd++) = *(ss++);
+#endif
+ }
+}
+
+/* Convert a host-native 32bit value into little endian */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+static inline uint32_t make_le32(uint32_t val)
+{
+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) |
+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24);
+}
+
+static inline uint32_t make_le24(uint32_t val)
+{
+ return (((val & 0xff) << 16) | (val & 0xff00) |
+ ((val & 0xff0000) >> 16));
+}
+
+static inline void make_le32_n(uint32_t *val, unsigned int num)
+{
+ while (num--) {
+ *val = make_le32(*val);
+ val++;
+ }
+}
+
+#else
+#define make_le32(val) (val)
+#define make_le24(val) (val)
+#define make_le32_n(val, len) do {} while (0)
+#endif
+
+ /******************/
+ /* Portal access */
+ /******************/
+struct qbman_swp_sys {
+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi
+ * not an mmap() of the real portal registers, but an allocated
+ * place-holder, because the actual writes/reads to/from the portal are
+ * marshalled from these allocated areas using QBMan's "MC access
+ * registers". CINH accesses are atomic so there's no need for a
+ * place-holder.
+ */
+ uint8_t *cena;
+ uint8_t __iomem *addr_cena;
+ uint8_t __iomem *addr_cinh;
+ uint32_t idx;
+ enum qbman_eqcr_mode eqcr_mode;
+};
+
+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal
+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH)
+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index
+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal)
+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE)
+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete)
+ */
+
+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset,
+ uint32_t val)
+{
+ __raw_writel(val, s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_write(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, val);
+#endif
+}
+
+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t reg = __raw_readl(s->addr_cinh + offset);
+#ifdef QBMAN_CINH_TRACE
+ pr_info("qbman_cinh_read(%p:%d:0x%03x) 0x%08x\n",
+ s->addr_cinh, s->idx, offset, reg);
+#endif
+ return reg;
+}
+
+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ void *shadow = s->cena + offset;
+
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+ dcbz(shadow);
+ return shadow;
+}
+
+static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+ return (s->addr_cena + offset);
+}
+
+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
+ uint32_t offset, void *cmd)
+{
+ const uint32_t *shadow = cmd;
+ int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+ hexdump(cmd, 64);
+#endif
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cena +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cena + offset);
+ dcbf(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_write_complete_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_complete(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+ hexdump(cmd, 64);
+#endif
+ dcbf(s->addr_cena + offset);
+}
+
+static inline uint32_t qbman_cena_read_reg(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ return __raw_readl(s->addr_cena + offset);
+}
+
+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
+{
+ uint32_t *shadow = (uint32_t *)(s->cena + offset);
+ unsigned int loop;
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cena + offset
+ + loop * 4);
+#ifdef QBMAN_CENA_TRACE
+ hexdump(shadow, 64);
+#endif
+ return shadow;
+}
+
+static inline void *qbman_cena_read_wo_shadow(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_read(%p:%d:0x%03x) %p\n",
+ s->addr_cena, s->idx, offset, shadow);
+#endif
+
+#ifdef QBMAN_CENA_TRACE
+ hexdump(shadow, 64);
+#endif
+ return s->addr_cena + offset;
+}
+
+static inline void qbman_cena_invalidate(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ dccivac(s->addr_cena + offset);
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+static inline void qbman_cena_prefetch(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+ prefetch_for_load(s->addr_cena + offset);
+}
+
+ /******************/
+ /* Portal support */
+ /******************/
+
+/* The SWP_CFG portal register is special, in that it is used by the
+ * platform-specific code rather than the platform-independent code in
+ * qbman_portal.c. So use of it is declared locally here.
+ */
+#define QBMAN_CINH_SWP_CFG 0xd00
+
+/* For MC portal use, we always configure with
+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4)
+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x2)
+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3)
+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2)
+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x2)
+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- TRUE)
+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE)
+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- TRUE)
+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE)
+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- TRUE)
+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- TRUE)
+ */
+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn,
+ uint8_t est, uint8_t rpm, uint8_t dcm,
+ uint8_t epm, int sd, int sp, int se,
+ int dp, int de, int ep)
+{
+ uint32_t reg;
+
+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) |
+ e32_uint8_t(16, 3, est) |
+ e32_uint8_t(12, 2, rpm) | e32_uint8_t(10, 2, dcm) |
+ e32_uint8_t(8, 2, epm) | e32_int(5, 1, sd) |
+ e32_int(4, 1, sp) | e32_int(3, 1, se) | e32_int(2, 1, dp) |
+ e32_int(1, 1, de) | e32_int(0, 1, ep) | e32_uint8_t(14, 1, wn);
+ return reg;
+}
+
+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
+ const struct qbman_swp_desc *d,
+ uint8_t dqrr_size)
+{
+ uint32_t reg;
+
+ s->addr_cena = d->cena_bar;
+ s->addr_cinh = d->cinh_bar;
+ s->idx = (uint32_t)d->idx;
+ s->cena = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!s->cena) {
+ pr_err("Could not allocate page for cena shadow\n");
+ return -1;
+ }
+ s->eqcr_mode = d->eqcr_mode;
+ QBMAN_BUG_ON(d->idx < 0);
+#ifdef QBMAN_CHECKING
+ /* We should never be asked to initialise for a portal that isn't in
+ * the power-on state. (Ie. don't forget to reset portals when they are
+ * decommissioned!)
+ */
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ QBMAN_BUG_ON(reg);
+#endif
+ if (s->eqcr_mode == qman_eqcr_vb_array)
+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
+ 1, 1);
+ else
+ reg = qbman_set_swp_cfg(dqrr_size, 0, 2, 3, 2, 2, 1, 1, 1, 1,
+ 1, 1);
+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
+ if (!reg) {
+ pr_err("The portal %d is not enabled!\n", s->idx);
+ kfree(s->cena);
+ return -1;
+ }
+ return 0;
+}
+
+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s)
+{
+ free_page((unsigned long)s->cena);
+}
+
+static inline void *
+qbman_cena_write_start_wo_shadow_fast(struct qbman_swp_sys *s,
+ uint32_t offset)
+{
+#ifdef QBMAN_CENA_TRACE
+ pr_info("qbman_cena_write_start(%p:%d:0x%03x)\n",
+ s->addr_cena, s->idx, offset);
+#endif
+ QBMAN_BUG_ON(offset & 63);
+ return (s->addr_cena + offset);
+}
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
new file mode 100644
index 00000000..e52f5ed2
--- /dev/null
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -0,0 +1,73 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <compat.h>
+#include <fsl_qbman_base.h>
+
+/* Sanity check */
+#if (__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) && \
+ (__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__)
+#error "Unknown endianness!"
+#endif
+
+/* The platform-independent code shouldn't need endianness, except for
+ * weird/fast-path cases like qbman_result_has_token(), which needs to
+ * perform a passive and endianness-specific test on a read-only data structure
+ * very quickly. It's an exception, and this symbol is used for that case.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define DQRR_TOK_OFFSET 0
+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24
+#define SCN_STATE_OFFSET_IN_MEM 8
+#define SCN_RID_OFFSET_IN_MEM 8
+#else
+#define DQRR_TOK_OFFSET 24
+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0
+#define SCN_STATE_OFFSET_IN_MEM 16
+#define SCN_RID_OFFSET_IN_MEM 0
+#endif
+
+/* Similarly-named functions */
+#define upper32(a) upper_32_bits(a)
+#define lower32(a) lower_32_bits(a)
+
+ /****************/
+ /* arch assists */
+ /****************/
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
+#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
+static inline void prefetch_for_load(void *p)
+{
+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p));
+}
+
+static inline void prefetch_for_store(void *p)
+{
+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p));
+}
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
new file mode 100644
index 00000000..2db0fcef
--- /dev/null
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -0,0 +1,51 @@
+DPDK_17.05 {
+ global:
+
+ dpaa2_affine_qbman_swp;
+ dpaa2_affine_qbman_swp_sec;
+ dpaa2_alloc_dpbp_dev;
+ dpaa2_alloc_dq_storage;
+ dpaa2_free_dpbp_dev;
+ dpaa2_free_dq_storage;
+ dpbp_disable;
+ dpbp_enable;
+ dpbp_get_attributes;
+ dpbp_get_num_free_bufs;
+ dpbp_open;
+ dpbp_reset;
+ dpio_close;
+ dpio_disable;
+ dpio_enable;
+ dpio_get_attributes;
+ dpio_open;
+ dpio_reset;
+ dpio_set_stashing_destination;
+ mc_send_command;
+ per_lcore__dpaa2_io;
+ qbman_check_command_complete;
+ qbman_eq_desc_clear;
+ qbman_eq_desc_set_fq;
+ qbman_eq_desc_set_no_orp;
+ qbman_eq_desc_set_qd;
+ qbman_eq_desc_set_response;
+ qbman_get_version;
+ qbman_pull_desc_clear;
+ qbman_pull_desc_set_fq;
+ qbman_pull_desc_set_numframes;
+ qbman_pull_desc_set_storage;
+ qbman_release_desc_clear;
+ qbman_release_desc_set_bpid;
+ qbman_result_DQ_fd;
+ qbman_result_DQ_flags;
+ qbman_result_has_new_result;
+ qbman_swp_acquire;
+ qbman_swp_pull;
+ qbman_swp_release;
+ qbman_swp_send_multiple;
+ rte_fslmc_driver_register;
+ rte_fslmc_driver_unregister;
+ rte_fslmc_vfio_dmamap;
+ rte_mcp_ptr_list;
+
+ local: *;
+};
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
new file mode 100644
index 00000000..040ab958
--- /dev/null
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -0,0 +1,148 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_FSLMC_H_
+#define _RTE_FSLMC_H_
+
+/**
+ * @file
+ *
+ * RTE FSLMC Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_bus.h>
+
+/** Name of FSLMC Bus */
+#define FSLMC_BUS_NAME "FSLMC"
+
+struct rte_dpaa2_driver;
+
+/* DPAA2 Device and Driver lists for FSLMC bus */
+TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device);
+TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
+
+extern struct rte_fslmc_bus rte_fslmc_bus;
+
+/**
+ * A structure describing a DPAA2 device.
+ */
+struct rte_dpaa2_device {
+ TAILQ_ENTRY(rte_dpaa2_device) next; /**< Next probed DPAA2 device. */
+ struct rte_device device; /**< Inherit core device */
+ union {
+ struct rte_eth_dev *eth_dev; /**< ethernet device */
+ struct rte_cryptodev *cryptodev; /**< Crypto Device */
+ };
+ uint16_t dev_type; /**< Device Type */
+ uint16_t object_id; /**< DPAA2 Object ID */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_dpaa2_driver *driver; /**< Associated driver */
+};
+
+typedef int (*rte_dpaa2_probe_t)(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev);
+typedef int (*rte_dpaa2_remove_t)(struct rte_dpaa2_device *dpaa2_dev);
+
+/**
+ * A structure describing a DPAA2 driver.
+ */
+struct rte_dpaa2_driver {
+ TAILQ_ENTRY(rte_dpaa2_driver) next; /**< Next in list. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_fslmc_bus *fslmc_bus; /**< FSLMC bus reference */
+ uint32_t drv_flags; /**< Flags for controlling device.*/
+ uint16_t drv_type; /**< Driver Type */
+ rte_dpaa2_probe_t probe;
+ rte_dpaa2_remove_t remove;
+};
+
+/*
+ * FSLMC bus
+ */
+struct rte_fslmc_bus {
+ struct rte_bus bus; /**< Generic Bus object */
+ struct rte_fslmc_device_list device_list;
+ /**< FSLMC DPAA2 Device list */
+ struct rte_fslmc_driver_list driver_list;
+ /**< FSLMC DPAA2 Driver list */
+ int device_count;
+ /**< Optional: Count of devices on bus */
+};
+
+/**
+ * Register a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be registered.
+ */
+void rte_fslmc_driver_register(struct rte_dpaa2_driver *driver);
+
+/**
+ * Unregister a DPAA2 driver.
+ *
+ * @param driver
+ * A pointer to a rte_dpaa2_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
+
+/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
+#define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
+RTE_INIT(dpaa2initfn_ ##nm); \
+static void dpaa2initfn_ ##nm(void) \
+{\
+ (dpaa2_drv).driver.name = RTE_STR(nm);\
+ rte_fslmc_driver_register(&dpaa2_drv); \
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FSLMC_H_ */
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 745c6146..7a719b9d 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -31,13 +31,29 @@
include $(RTE_SDK)/mk/rte.vars.mk
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_cryptodev
+
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
+DEPDIRS-aesni_gcm = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DEPDIRS-aesni_mb = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DEPDIRS-armv8 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
+DEPDIRS-openssl = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
+DEPDIRS-qat = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
+DEPDIRS-scheduler = $(core-libs) librte_kvargs librte_reorder
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DEPDIRS-snow3g = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DEPDIRS-kasumi = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
+DEPDIRS-zuc = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+DEPDIRS-null = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+DEPDIRS-dpaa2_sec = $(core-libs)
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index 5898cae1..59a7c6a9 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -31,9 +31,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
endif
# library name
@@ -50,10 +47,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-LDLIBS += -lcrypto
+LDLIBS += -lisal_crypto
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
@@ -62,11 +56,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
# export include files
SYMLINK-y-include +=
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index c399068c..e9de6546 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -37,91 +37,26 @@
#define LINUX
#endif
-#include <gcm_defines.h>
-#include <aux_funcs.h>
+#include <isa-l_crypto/aes_gcm.h>
-/** Supported vector modes */
-enum aesni_gcm_vector_mode {
- RTE_AESNI_GCM_NOT_SUPPORTED = 0,
- RTE_AESNI_GCM_SSE,
- RTE_AESNI_GCM_AVX,
- RTE_AESNI_GCM_AVX2
-};
-
-typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data,
+ uint8_t *iv,
+ uint8_t const *aad,
+ uint64_t aad_len);
-typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
- u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
- u8 *auth_tag, u64 auth_tag_len);
+typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t plaintext_len);
-typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data,
+ uint8_t *auth_tag,
+ uint64_t auth_tag_len);
-/** GCM library function pointer table */
struct aesni_gcm_ops {
- struct {
- struct {
- aes_keyexp_128_enc_t aes128_enc;
- /**< AES128 enc key expansion */
- } keyexp;
- /**< Key expansion functions */
- } aux; /**< Auxiliary functions */
-
- struct {
- aesni_gcm_t enc; /**< GCM encode function pointer */
- aesni_gcm_t dec; /**< GCM decode function pointer */
- aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
- } gcm; /**< GCM functions */
+ aesni_gcm_init_t init;
+ aesni_gcm_update_t update;
+ aesni_gcm_finalize_t finalize;
};
-
-static const struct aesni_gcm_ops gcm_ops[] = {
- [RTE_AESNI_GCM_NOT_SUPPORTED] = {
- .aux = {
- .keyexp = {
- NULL
- }
- },
- .gcm = {
- NULL
- }
- },
- [RTE_AESNI_GCM_SSE] = {
- .aux = {
- .keyexp = {
- aes_keyexp_128_enc_sse
- }
- },
- .gcm = {
- aesni_gcm_enc_sse,
- aesni_gcm_dec_sse,
- aesni_gcm_precomp_sse
- }
- },
- [RTE_AESNI_GCM_AVX] = {
- .aux = {
- .keyexp = {
- aes_keyexp_128_enc_avx,
- }
- },
- .gcm = {
- aesni_gcm_enc_avx_gen2,
- aesni_gcm_dec_avx_gen2,
- aesni_gcm_precomp_avx_gen2
- }
- },
- [RTE_AESNI_GCM_AVX2] = {
- .aux = {
- .keyexp = {
- aes_keyexp_128_enc_avx2,
- }
- },
- .gcm = {
- aesni_gcm_enc_avx_gen4,
- aesni_gcm_dec_avx_gen4,
- aesni_gcm_precomp_avx_gen4
- }
- }
-};
-
-
#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index af3d60f0..101ef98b 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -30,8 +30,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <openssl/aes.h>
-
#include <rte_common.h>
#include <rte_config.h>
#include <rte_hexdump.h>
@@ -44,133 +42,96 @@
#include "aesni_gcm_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each AES-NI multi
- * buffer crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int
-aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
- uint8_t *aeskey, unsigned aeskey_length)
-{
- uint8_t key[aeskey_length] __rte_aligned(16);
- AES_KEY enc_key;
-
- if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
- return -EFAULT;
-
- memcpy(key, aeskey, aeskey_length);
-
- if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
- return -EFAULT;
-
- AES_encrypt(hsubkey, hsubkey, &enc_key);
-
- return 0;
-}
-
-/** Get xform chain order */
-static int
-aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
-{
- /*
- * GCM only supports authenticated encryption or authenticated
- * decryption, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- }
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
- return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- }
+/** GCM encode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_enc[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_enc_update,
+ aesni_gcm128_enc_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_enc_update,
+ aesni_gcm256_enc_finalize
+ }
+};
- return -1;
-}
+/** GCM decode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_dec[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_dec_update,
+ aesni_gcm128_dec_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_dec_update,
+ aesni_gcm256_dec_finalize
+ }
+};
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
- struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
-
- uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *cipher_xform;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- switch (aesni_gcm_get_mode(xform)) {
- case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ if (xform->next == NULL || xform->next->next != NULL) {
+ GCM_LOG_ERR("Two and only two chained xform required");
+ return -EINVAL;
+ }
- cipher_xform = xform;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform->next;
- break;
- case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-
+ cipher_xform = xform;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
auth_xform = xform;
cipher_xform = xform->next;
- break;
- default:
- GCM_LOG_ERR("Unsupported operation chain order parameter");
+ } else {
+ GCM_LOG_ERR("Cipher and auth xform required");
return -EINVAL;
}
- /* We only support AES GCM */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
- auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+ if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
+ GCM_LOG_ERR("We only support AES GCM and AES GMAC");
return -EINVAL;
+ }
- /* Select cipher direction */
- if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
- "(DECRYPT) specified are an invalid selection");
- return -EINVAL;
- } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
- "(ENCRYPT) specified are an invalid selection");
+ /* Select Crypto operation */
+ if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+ " Decrypt/Verify are valid only");
return -EINVAL;
}
- /* Expand GCM AES128 key */
- (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
- sess->gdata.expanded_keys);
+ /* Check key length and calculate GCM pre-compute. */
+ switch (cipher_xform->cipher.key.length) {
+ case 16:
+ aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_128;
- /* Calculate hash sub key here */
- aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
- cipher_xform->cipher.key.data,
- cipher_xform->cipher.key.length);
+ break;
+ case 32:
+ aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_256;
- /* Calculate GCM pre-compute */
- (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+ break;
+ default:
+ GCM_LOG_ERR("Unsupported cipher key length");
+ return -EINVAL;
+ }
return 0;
}
@@ -194,10 +155,10 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
return sess;
sess = (struct aesni_gcm_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
- sess, op->xform) != 0)) {
+ if (unlikely(aesni_gcm_set_session_parameters(sess,
+ op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -217,19 +178,45 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
*
*/
static int
-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct rte_crypto_sym_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m = op->m_src;
+ struct rte_mbuf *m_src = op->m_src;
+ uint32_t offset = op->cipher.data.offset;
+ uint32_t part_len, total_len, data_len;
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < op->cipher.data.length) ? data_len :
+ op->cipher.data.length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == op->cipher.data.length) ||
+ ((part_len != op->cipher.data.length) &&
+ (op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((op->m_dst == NULL) ||
+ ((op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(op->m_dst)));
+
- src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
dst = op->m_dst ?
rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
op->cipher.data.offset);
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
/* sanity checks */
if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
op->cipher.iv.length != 0) {
@@ -246,48 +233,81 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
*iv_padd = rte_bswap32(1);
}
- if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
- op->auth.aad.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
if (op->auth.digest.length != 16 &&
op->auth.digest.length != 12 &&
- op->auth.digest.length != 8 &&
- op->auth.digest.length != 0) {
- GCM_LOG_ERR("iv");
+ op->auth.digest.length != 8) {
+ GCM_LOG_ERR("digest");
return -1;
}
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- (*qp->ops->gcm.enc)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_enc[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_enc[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_enc[session->key].finalize(&session->gdata,
op->auth.digest.data,
(uint64_t)op->auth.digest.length);
- } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+ } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
+ op->m_dst : op->m_src,
op->auth.digest.length);
if (!auth_tag) {
- GCM_LOG_ERR("iv");
+ GCM_LOG_ERR("auth_tag");
return -1;
}
- (*qp->ops->gcm.dec)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_dec[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_dec[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
(uint64_t)op->auth.digest.length);
- } else {
- GCM_LOG_ERR("iv");
- return -1;
}
return 0;
@@ -355,67 +375,73 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
-
- rte_ring_enqueue(qp->processed_pkts, (void *)op);
}
static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_session *sess;
struct aesni_gcm_qp *qp = queue_pair;
- int i, retval = 0;
+ int retval = 0;
+ unsigned int i, nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_ops; i++) {
+ for (i = 0; i < nb_dequeued; i++) {
sess = aesni_gcm_get_session(qp, ops[i]->sym);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->qp_stats.enqueue_err_count++;
+ qp->qp_stats.dequeue_err_count++;
break;
}
- retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->qp_stats.enqueue_err_count++;
+ qp->qp_stats.dequeue_err_count++;
break;
}
handle_completed_gcm_crypto_op(qp, ops[i]);
-
- qp->qp_stats.enqueued_count++;
}
+
+ qp->qp_stats.dequeued_count += i;
+
return i;
}
static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_qp *qp = queue_pair;
- unsigned nb_dequeued;
+ unsigned int nb_enqueued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.enqueued_count += nb_enqueued;
- return nb_dequeued;
+ return nb_enqueued;
}
-static int aesni_gcm_remove(const char *name);
+static int aesni_gcm_remove(struct rte_vdev_device *vdev);
static int
aesni_gcm_create(const char *name,
+ struct rte_vdev_device *vdev,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct aesni_gcm_private *internals;
- enum aesni_gcm_vector_mode vector_mode;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
@@ -423,27 +449,7 @@ aesni_gcm_create(const char *name,
return -EFAULT;
}
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
- vector_mode = RTE_AESNI_GCM_AVX2;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
- vector_mode = RTE_AESNI_GCM_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- vector_mode = RTE_AESNI_GCM_SSE;
- else {
- GCM_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- GCM_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
@@ -459,63 +465,60 @@ aesni_gcm_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_CPU_AESNI;
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
- switch (vector_mode) {
- case RTE_AESNI_GCM_SSE:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- break;
- case RTE_AESNI_GCM_AVX:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- break;
- case RTE_AESNI_GCM_AVX2:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
- break;
- default:
- break;
- }
-
- /* Set vector instructions mode supported */
internals = dev->data->dev_private;
- internals->vector_mode = vector_mode;
-
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- GCM_LOG_ERR("driver %s: create failed", name);
+ GCM_LOG_ERR("driver %s: create failed", init_params->name);
- aesni_gcm_remove(crypto_dev_name);
+ aesni_gcm_remove(vdev);
return -EFAULT;
}
static int
-aesni_gcm_probe(const char *name, const char *input_args)
+aesni_gcm_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return aesni_gcm_create(name, &init_params);
+ return aesni_gcm_create(name, vdev, &init_params);
}
static int
-aesni_gcm_remove(const char *name)
+aesni_gcm_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index c51f82a8..1fc047be 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -39,17 +39,17 @@
#include "aesni_gcm_pmd_private.h"
static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
- { /* AES GCM (AUTH) */
+ { /* AES GMAC (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
.block_size = 16,
.key_size = {
.min = 16,
- .max = 16,
- .increment = 0
+ .max = 32,
+ .increment = 16
},
.digest_size = {
.min = 8,
@@ -57,9 +57,34 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.increment = 4
},
.aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .digest_size = {
.min = 8,
- .max = 12,
+ .max = 16,
.increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
}
}, }
}, }
@@ -73,8 +98,8 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.block_size = 16,
.key_size = {
.min = 16,
- .max = 16,
- .increment = 0
+ .max = 32,
+ .increment = 16
},
.iv_size = {
.min = 12,
@@ -89,7 +114,8 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
/** Configure device */
static int
-aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -199,7 +225,7 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
GCM_LOG_INFO("Reusing existing ring %s for processed"
" packets", qp->name);
return r;
@@ -221,7 +247,6 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int socket_id)
{
struct aesni_gcm_qp *qp = NULL;
- struct aesni_gcm_private *internals = dev->data->dev_private;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -239,8 +264,6 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
- qp->ops = &gcm_ops[internals->vector_mode];
-
qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL)
@@ -291,18 +314,15 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
/** Configure a aesni gcm session from a crypto xform chain */
static void *
-aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform, void *sess)
{
- struct aesni_gcm_private *internals = dev->data->dev_private;
-
if (unlikely(sess == NULL)) {
GCM_LOG_ERR("invalid session struct");
return NULL;
}
- if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
- sess, xform) != 0) {
+ if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
GCM_LOG_ERR("failed configure session parameters");
return NULL;
}
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 9878d6e4..0496b447 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -58,8 +58,6 @@
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
- enum aesni_gcm_vector_mode vector_mode;
- /**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
@@ -71,8 +69,6 @@ struct aesni_gcm_qp {
/**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
- const struct aesni_gcm_ops *ops;
- /**< Architecture dependent function pointer table of the gcm APIs */
struct rte_ring *processed_pkts;
/**< Ring for placing process packets */
struct rte_mempool *sess_mp;
@@ -87,10 +83,17 @@ enum aesni_gcm_operation {
AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
};
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_256
+};
+
/** AESNI GCM private session structure */
struct aesni_gcm_session {
enum aesni_gcm_operation op;
/**< GCM operation type */
+ enum aesni_gcm_key key;
+ /**< GCM key type */
struct gcm_data gdata __rte_cache_aligned;
/**< GCM parameters */
};
@@ -98,7 +101,6 @@ struct aesni_gcm_session {
/**
* Setup GCM session parameters
- * @param ops gcm ops function pointer table
* @param sess aesni gcm session structure
* @param xform crypto transform chain
*
@@ -107,8 +109,7 @@ struct aesni_gcm_session {
* - On failure returns error code < 0
*/
extern int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
- struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform);
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index d3994cc6..611d4123 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -58,11 +58,4 @@ LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 0c119bf1..59c3ee1e 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -44,29 +44,30 @@ enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
RTE_AESNI_MB_SSE,
RTE_AESNI_MB_AVX,
- RTE_AESNI_MB_AVX2
+ RTE_AESNI_MB_AVX2,
+ RTE_AESNI_MB_AVX512
};
-typedef void (*md5_one_block_t)(void *data, void *digest);
+typedef void (*md5_one_block_t)(const void *data, void *digest);
-typedef void (*sha1_one_block_t)(void *data, void *digest);
-typedef void (*sha224_one_block_t)(void *data, void *digest);
-typedef void (*sha256_one_block_t)(void *data, void *digest);
-typedef void (*sha384_one_block_t)(void *data, void *digest);
-typedef void (*sha512_one_block_t)(void *data, void *digest);
+typedef void (*sha1_one_block_t)(const void *data, void *digest);
+typedef void (*sha224_one_block_t)(const void *data, void *digest);
+typedef void (*sha256_one_block_t)(const void *data, void *digest);
+typedef void (*sha384_one_block_t)(const void *data, void *digest);
+typedef void (*sha512_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_128_t)
- (void *key, void *enc_exp_keys, void *dec_exp_keys);
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_192_t)
- (void *key, void *enc_exp_keys, void *dec_exp_keys);
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_256_t)
- (void *key, void *enc_exp_keys, void *dec_exp_keys);
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_xcbc_expand_key_t)
- (void *key, void *exp_k1, void *k2, void *k3);
+ (const void *key, void *exp_k1, void *k2, void *k3);
/** Multi-buffer library function pointer table */
-struct aesni_mb_ops {
+struct aesni_mb_op_fns {
struct {
init_mb_mgr_t init_mgr;
/**< Initialise scheduler */
@@ -115,7 +116,7 @@ struct aesni_mb_ops {
};
-static const struct aesni_mb_ops job_ops[] = {
+static const struct aesni_mb_op_fns job_ops[] = {
[RTE_AESNI_MB_NOT_SUPPORTED] = {
.job = {
NULL
@@ -203,6 +204,31 @@ static const struct aesni_mb_ops job_ops[] = {
aes_xcbc_expand_key_avx2
}
}
+ },
+ [RTE_AESNI_MB_AVX512] = {
+ .job = {
+ init_mb_mgr_avx512,
+ get_next_job_avx512,
+ submit_job_avx512,
+ get_completed_job_avx512,
+ flush_job_avx512
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx512,
+ sha1_one_block_avx512,
+ sha224_one_block_avx512,
+ sha256_one_block_avx512,
+ sha384_one_block_avx512,
+ sha512_one_block_avx512
+ },
+ .keyexp = {
+ aes_keyexp_128_avx512,
+ aes_keyexp_192_avx512,
+ aes_keyexp_256_avx512,
+ aes_xcbc_expand_key_avx512
+ }
+ }
}
};
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 7443b47b..45b25c9d 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -40,29 +40,8 @@
#include "rte_aesni_mb_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each AES-NI multi
- * buffer crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-typedef void (*hash_one_block_t)(void *data, void *digest);
-typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
/**
* Calculate the authentication pre-computes
@@ -107,41 +86,50 @@ calculate_auth_precomputes(hash_one_block_t one_block_hash,
}
/** Get xform chain order */
-static int
+static enum aesni_mb_operation
aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
{
- /*
- * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
- * operations, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return HASH_CIPHER;
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return CIPHER_HASH;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
- return -1;
+ return AESNI_MB_OP_NOT_SUPPORTED;
}
/** Set session authentication parameters */
static int
-aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
MB_LOG_ERR("Crypto xform struct not of type auth");
return -1;
}
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
/* Set Authentication Parameters */
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
sess->auth.algo = AES_XCBC;
@@ -193,12 +181,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
/** Set session cipher parameters */
static int
-aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
aes_keyexp_t aes_keyexp_fn;
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
return -1;
@@ -225,6 +218,9 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
case RTE_CRYPTO_CIPHER_AES_CTR:
sess->cipher.mode = CNTR;
break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ break;
default:
MB_LOG_ERR("Unsupported cipher mode parameter");
return -1;
@@ -259,7 +255,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
/** Parse crypto xform chain and set private session parameters */
int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
@@ -268,16 +264,36 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
- case HASH_CIPHER:
+ case AESNI_MB_OP_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
break;
- case CIPHER_HASH:
+ case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
return -1;
@@ -296,16 +312,43 @@ aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
return 0;
}
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
/** Get multi buffer session */
-static struct aesni_mb_session *
+static inline struct aesni_mb_session *
get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
{
struct aesni_mb_session *sess = NULL;
if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_AESNI_MB_PMD))
+ RTE_CRYPTODEV_AESNI_MB_PMD)) {
return NULL;
+ }
sess = (struct aesni_mb_session *)op->sym->session->_private;
} else {
@@ -317,7 +360,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
sess = (struct aesni_mb_session *)
((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_mb_set_session_parameters(qp->ops,
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
@@ -340,18 +383,20 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
* - Completed JOB_AES_HMAC structure pointer on success
* - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
*/
-static JOB_AES_HMAC *
-process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
- struct aesni_mb_session *session)
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op)
{
- JOB_AES_HMAC *job;
-
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ struct aesni_mb_session *session;
uint16_t m_offset = 0;
- job = (*qp->ops->job.get_next)(&qp->mb_mgr);
- if (unlikely(job == NULL))
- return job;
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+ op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
@@ -386,7 +431,8 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
if (odata == NULL) {
MB_LOG_ERR("failed to allocate space in destination "
"mbuf for source data");
- return NULL;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
}
memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
@@ -397,14 +443,16 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
}
/* Set digest output location */
- if (job->cipher_direction == DECRYPT) {
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
get_digest_byte_length(job->hash_alg));
if (job->auth_tag_output == NULL) {
MB_LOG_ERR("failed to allocate space in output mbuf "
"for temp digest");
- return NULL;
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
}
memset(job->auth_tag_output, 0,
@@ -439,7 +487,22 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
job->user_data = op;
job->user_data2 = m_dst;
- return job;
+ return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
+ struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
+
+ RTE_ASSERT(m_dst == NULL);
+
+ /* Verify digest if required */
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
@@ -452,32 +515,32 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
* verification of supplied digest in the case of a HASH_CIPHER operation
* - Returns NULL on invalid job
*/
-static struct rte_crypto_op *
+static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
- struct rte_crypto_op *op =
- (struct rte_crypto_op *)job->user_data;
- struct rte_mbuf *m_dst =
- (struct rte_mbuf *)job->user_data2;
-
- if (op == NULL || m_dst == NULL)
- return NULL;
-
- /* set status as successful by default */
- op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
-
- /* check if job has been processed */
- if (unlikely(job->status != STS_COMPLETED)) {
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return op;
- } else if (job->chain_order == HASH_CIPHER) {
- /* Verify digest if required */
- if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
- op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+
+ struct aesni_mb_session *sess;
+
+ RTE_ASSERT(op == NULL);
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ sess = (struct aesni_mb_session *)
+ op->sym->session->_private;
+
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
}
/* Free session if a session-less crypto op */
@@ -500,121 +563,131 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
* - Number of processed jobs
*/
static unsigned
-handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job) {
- processed_jobs++;
+ while (job != NULL && processed_jobs < nb_ops) {
op = post_process_mb_job(qp, job);
- if (op)
- rte_ring_enqueue(qp->processed_ops, (void *)op);
- else
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
qp->stats.dequeue_err_count++;
- job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
+ break;
+ }
+
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
return processed_jobs;
}
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ return job;
+}
+
static uint16_t
-aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct aesni_mb_session *sess;
struct aesni_mb_qp *qp = queue_pair;
- JOB_AES_HMAC *job = NULL;
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
- int i, processed_jobs = 0;
+ int retval, processed_jobs = 0;
- for (i = 0; i < nb_ops; i++) {
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- MB_LOG_ERR("PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
- }
-#endif
- sess = get_session(qp, ops[i]);
- if (unlikely(sess == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
- }
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
- job = process_crypto_op(qp, ops[i], sess);
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL)) {
- qp->stats.enqueue_err_count++;
- goto flush_jobs;
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
+
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ }
+
+ retval = set_mb_job_params(job, qp, op);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job);
}
- /* Submit Job */
- job = (*qp->ops->job.submit)(&qp->mb_mgr);
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
* before submitting subsequent jobs
*/
if (job)
- processed_jobs += handle_completed_jobs(qp, job);
- }
-
- if (processed_jobs == 0)
- goto flush_jobs;
- else
- qp->stats.enqueued_count += processed_jobs;
- return i;
-
-flush_jobs:
- /*
- * If we haven't processed any jobs in submit loop, then flush jobs
- * queue to stop the output stalling
- */
- job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
- if (job)
- qp->stats.enqueued_count += handle_completed_jobs(qp, job);
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- return i;
-}
+ } while (processed_jobs < nb_ops);
-static uint16_t
-aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct aesni_mb_qp *qp = queue_pair;
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
- unsigned nb_dequeued;
-
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
- qp->stats.dequeued_count += nb_dequeued;
-
- return nb_dequeued;
+ return processed_jobs;
}
-
-static int cryptodev_aesni_mb_remove(const char *name);
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static int
cryptodev_aesni_mb_create(const char *name,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- /* Check CPU for support for AES instruction set */
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
- return -EFAULT;
- }
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
/* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
@@ -625,15 +698,7 @@ cryptodev_aesni_mb_create(const char *name,
return -EFAULT;
}
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- MB_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_mb_private), init_params->socket_id);
if (dev == NULL) {
MB_LOG_ERR("failed to create cryptodev vdev");
@@ -661,6 +726,9 @@ cryptodev_aesni_mb_create(const char *name,
case RTE_AESNI_MB_AVX2:
dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
default:
break;
}
@@ -674,38 +742,50 @@ cryptodev_aesni_mb_create(const char *name,
return 0;
init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
+ MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
+ init_params->name);
- cryptodev_aesni_mb_remove(crypto_dev_name);
+ cryptodev_aesni_mb_remove(vdev);
return -EFAULT;
}
-
static int
-cryptodev_aesni_mb_probe(const char *name,
- const char *input_args)
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ ""
};
+ const char *name;
+ const char *input_args;
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_aesni_mb_create(name, &init_params);
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
static int
-cryptodev_aesni_mb_remove(const char *name)
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3d49e2ae..d1bc28e0 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -227,13 +227,35 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* AES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
/** Configure device */
static int
-aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev)
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -342,24 +364,32 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
/** Create a ring to place processed operations on */
static struct rte_ring *
aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
- unsigned ring_size, int socket_id)
+ const char *str, unsigned int ring_size, int socket_id)
{
struct rte_ring *r;
+ char ring_name[RTE_CRYPTODEV_NAME_LEN];
+
+ unsigned int n = snprintf(ring_name, sizeof(ring_name),
+ "%s_%s",
+ qp->name, str);
+
+ if (n > sizeof(ring_name))
+ return NULL;
- r = rte_ring_lookup(qp->name);
+ r = rte_ring_lookup(ring_name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
MB_LOG_INFO("Reusing existing ring %s for processed ops",
- qp->name);
+ ring_name);
return r;
}
MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
- qp->name);
+ ring_name);
return NULL;
}
- return rte_ring_create(qp->name, ring_size, socket_id,
+ return rte_ring_create(ring_name, ring_size, socket_id,
RING_F_SP_ENQ | RING_F_SC_DEQ);
}
@@ -388,11 +418,12 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
goto qp_setup_cleanup;
- qp->ops = &job_ops[internals->vector_mode];
- qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
- qp_conf->nb_descriptors, socket_id);
- if (qp->processed_ops == NULL)
+ qp->op_fns = &job_ops[internals->vector_mode];
+
+ qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+ "ingress", qp_conf->nb_descriptors, socket_id);
+ if (qp->ingress_queue == NULL)
goto qp_setup_cleanup;
qp->sess_mp = dev->data->session_pool;
@@ -400,8 +431,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
memset(&qp->stats, 0, sizeof(qp->stats));
/* Initialise multi-buffer manager */
- (*qp->ops->job.init_mgr)(&qp->mb_mgr);
-
+ (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
return 0;
qp_setup_cleanup:
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 17f367f4..0d82699c 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -88,6 +88,7 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [NULL_HASH] = 0
};
/**
@@ -111,6 +112,7 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_384] = 48,
[SHA_512] = 64,
[AES_XCBC] = 16,
+ [NULL_HASH] = 0
};
/**
@@ -125,6 +127,13 @@ get_digest_byte_length(JOB_HASH_ALG algo)
return auth_digest_byte_lengths[algo];
}
+enum aesni_mb_operation {
+ AESNI_MB_OP_HASH_CIPHER,
+ AESNI_MB_OP_CIPHER_HASH,
+ AESNI_MB_OP_HASH_ONLY,
+ AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_NOT_SUPPORTED
+};
/** private data structure for each virtual AESNI device */
struct aesni_mb_private {
@@ -142,12 +151,12 @@ struct aesni_mb_qp {
/**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
- const struct aesni_mb_ops *ops;
+ const struct aesni_mb_op_fns *op_fns;
/**< Vector mode dependent pointer table of the multi-buffer APIs */
MB_MGR mb_mgr;
/**< Multi-buffer instance */
- struct rte_ring *processed_ops;
- /**< Ring for placing process operations */
+ struct rte_ring *ingress_queue;
+ /**< Ring for placing operations ready for processing */
struct rte_mempool *sess_mp;
/**< Session Mempool */
struct rte_cryptodev_stats stats;
@@ -185,6 +194,8 @@ struct aesni_mb_session {
/** Authentication Parameters */
struct {
JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
union {
struct {
uint8_t inner[128] __rte_aligned(16);
@@ -216,7 +227,7 @@ struct aesni_mb_session {
*
*/
extern int
-aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform);
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
new file mode 100644
index 00000000..1474951c
--- /dev/null
+++ b/drivers/crypto/armv8/Makefile
@@ -0,0 +1,65 @@
+#
+# BSD LICENSE
+#
+# Copyright (C) Cavium networks Ltd. 2017.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(ARMV8_CRYPTO_LIB_PATH),)
+$(error "Please define ARMV8_CRYPTO_LIB_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_armv8_pmd_version.map
+
+# external library dependencies
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
+LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644
index 00000000..3d603a5a
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -0,0 +1,906 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be described by a set of numbers:
+ * - order: order of operations (cipher, auth) or (auth, cipher)
+ * - direction: encryption or decryption
+ * - calg: cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg: authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl: cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ * +--+CRYPTO_FUNC
+ * +--+ENC|
+ * +--+CA|
+ * | +--+DEC
+ * ORDER|
+ * | +--+ENC
+ * +--+AC|
+ * +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_AUTH_MAX: max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define KEYL(keyl) (ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define CIPH_AES_CBC RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define AUTH_SHA1_HMAC RTE_CRYPTO_AUTH_SHA1_HMAC
+#define AUTH_SHA256_HMAC RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt: cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt: cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt: authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt: authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+ &crypto_op_ca_encrypt,
+ &crypto_op_ca_decrypt,
+ NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+ &crypto_op_ac_encrypt,
+ &crypto_op_ac_decrypt,
+ NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth: cipher first, authenticate after
+ * crypto_auth_cipher: authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+ crypto_cipher_auth,
+ crypto_auth_cipher,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl) \
+({ \
+ crypto_func_tbl_t *func_tbl = \
+ (crypto_chain_order[(order)])[(cop)]; \
+ \
+ ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt: keys for encryption
+ * crypto_key_sched_decrypt: keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+ &crypto_key_sched_encrypt,
+ &crypto_key_sched_decrypt,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_KEY_SCHED(cop, calg, keyl) \
+({ \
+ crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)]; \
+ \
+ ((*ks_tbl)[(calg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+ /*
+ * This driver currently covers only chained operations.
+ * Ignore only cipher or only authentication operations
+ * or chains longer than 2 xform structures.
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+ }
+
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ size_t i;
+
+ /* Generate i_key_pad and o_key_pad */
+ memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+ rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+ rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ /*
+ * XOR key with IPAD/OPAD values to obtain i_key_pad
+ * and o_key_pad.
+ * Byte-by-byte operation may seem to be the less efficient
+ * here but in fact it's the opposite.
+ * The result ASM code is likely operate on NEON registers
+ * (load auth key to Qx, load IPAD/OPAD to multiple
+ * elements of Qy, eor 128 bits at once).
+ */
+ for (i = 0; i < SHA_BLOCK_MAX; i++) {
+ sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+ sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+ }
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t partial[64] = { 0 };
+ int error;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
+
+ if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
+ /*
+ * In case the key is longer than 160 bits
+ * the algorithm will use SHA1(key) instead.
+ */
+ error = sha1_block(NULL, xform->auth.key.data,
+ sess->auth.hmac.key, xform->auth.key.length);
+ if (error != 0)
+ return -1;
+ } else {
+ /*
+ * Now copy the given authentication key to the session
+ * key assuming that the session key is zeroed there is
+ * no need for additional zero padding if the key is
+ * shorter than SHA1_AUTH_KEY_LENGTH.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+ }
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
+
+ if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
+ /*
+ * In case the key is longer than 256 bits
+ * the algorithm will use SHA256(key) instead.
+ */
+ error = sha256_block(NULL, xform->auth.key.data,
+ sess->auth.hmac.key, xform->auth.key.length);
+ if (error != 0)
+ return -1;
+ } else {
+ /*
+ * Now copy the given authentication key to the session
+ * key assuming that the session key is zeroed there is
+ * no need for additional zero padding if the key is
+ * shorter than SHA256_AUTH_KEY_LENGTH.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+ }
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ crypto_key_sched_t cipher_key_sched;
+
+ cipher_key_sched = sess->cipher.key_sched;
+ if (likely(cipher_key_sched != NULL)) {
+ /* Set up cipher session key */
+ cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ enum armv8_crypto_chain_order order;
+ enum armv8_crypto_cipher_operation cop;
+ enum rte_crypto_cipher_algorithm calg;
+ enum rte_crypto_auth_algorithm aalg;
+
+ /* Validate and prepare scratch order of combined operations */
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ order = sess->chain_order;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Select cipher direction */
+ sess->cipher.direction = cipher_xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = cipher_xform->cipher.key.length;
+ /* Set cipher direction */
+ cop = sess->cipher.direction;
+ /* Set cipher algorithm */
+ calg = cipher_xform->cipher.algo;
+
+ /* Select cipher algo */
+ switch (calg) {
+ /* Cover supported cipher algorithms */
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = calg;
+ /* IV len is always 16 bytes (block size) for AES CBC */
+ sess->cipher.iv_len = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Select auth generate/verify */
+ sess->auth.operation = auth_xform->auth.op;
+
+ /* Select auth algo */
+ switch (auth_xform->auth.algo) {
+ /* Cover supported hash algorithms */
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+ aalg = auth_xform->auth.algo;
+ sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Verify supported key lengths and extract proper algorithm */
+ switch (cipher_xform->cipher.key.length << 3) {
+ case 128:
+ sess->crypto_func =
+ CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+ sess->cipher.key_sched =
+ CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+ break;
+ case 192:
+ case 256:
+ /* These key lengths are not supported yet */
+ default: /* Fall through */
+ sess->crypto_func = NULL;
+ sess->cipher.key_sched = NULL;
+ return -EINVAL;
+ }
+
+ if (unlikely(sess->crypto_func == NULL)) {
+ /*
+ * If we got here that means that there must be a bug
+ * in the algorithms selection above. Nevertheless keep
+ * it here to catch bug immediately and avoid NULL pointer
+ * dereference in OPs processing.
+ */
+ ARMV8_CRYPTO_LOG_ERR(
+ "No appropriate crypto function for given parameters");
+ return -EINVAL;
+ }
+
+ /* Set up cipher session prerequisites */
+ if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+ return -EINVAL;
+
+ /* Set up authentication session prerequisites */
+ if (auth_set_prerequisites(sess, auth_xform) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ bool is_chained_op;
+ int ret;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = armv8_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ is_chained_op = true;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ is_chained_op = true;
+ break;
+ default:
+ is_chained_op = false;
+ return -EINVAL;
+ }
+
+ if (is_chained_op) {
+ ret = armv8_crypto_set_session_chained_parameters(sess,
+ cipher_xform, auth_xform);
+ if (unlikely(ret != 0)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Invalid/unsupported chained (cipher/auth) parameters");
+ return -EINVAL;
+ }
+ } else {
+ ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Provide session for operation */
+static inline struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct armv8_crypto_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL &&
+ op->sym->session->dev_type ==
+ RTE_CRYPTODEV_ARMV8_PMD)) {
+ sess = (struct armv8_crypto_session *)
+ op->sym->session->_private;
+ }
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+
+ if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+ sess = (struct armv8_crypto_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)
+ ->_private;
+
+ if (unlikely(armv8_crypto_set_session_parameters(
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ } else
+ op->sym->session = _sess;
+ }
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static inline void
+process_armv8_chained_op
+ (struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ crypto_func_t crypto_func;
+ crypto_arg_t arg;
+ struct rte_mbuf *m_asrc, *m_adst;
+ uint8_t *csrc, *cdst;
+ uint8_t *adst, *asrc;
+ uint64_t clen, alen;
+ int error;
+
+ clen = op->sym->cipher.data.length;
+ alen = op->sym->auth.data.length;
+
+ csrc = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ cdst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ m_asrc = m_adst = mbuf_dst;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ m_asrc = mbuf_src;
+ m_adst = mbuf_dst;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+ asrc = rte_pktmbuf_mtod_offset(m_asrc, uint8_t *,
+ op->sym->auth.data.offset);
+
+ switch (sess->auth.mode) {
+ case ARMV8_CRYPTO_AUTH_AS_AUTH:
+ /* Nothing to do here, just verify correct option */
+ break;
+ case ARMV8_CRYPTO_AUTH_AS_HMAC:
+ arg.digest.hmac.key = sess->auth.hmac.key;
+ arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+ arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ adst = op->sym->auth.digest.data;
+ if (adst == NULL) {
+ adst = rte_pktmbuf_mtod_offset(m_adst,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+ } else {
+ adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
+ op->sym->auth.digest.length);
+ }
+
+ if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ arg.cipher.iv = op->sym->cipher.iv.data;
+ arg.cipher.key = sess->cipher.key.data;
+ /* Acquire combined mode function */
+ crypto_func = sess->crypto_func;
+ ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+ error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg);
+ if (error != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(adst, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(m_asrc,
+ op->sym->auth.digest.length);
+ }
+}
+
+/** Process crypto operation for mbuf */
+static inline int
+process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+ process_armv8_chained_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ERROR))
+ return -1;
+
+ return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_session *sess;
+ struct armv8_crypto_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ qp->stats.enqueued_count += retval;
+
+ return retval;
+
+enqueue_err:
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ if (ops[i] != NULL)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->stats.enqueue_err_count++;
+ return retval;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct armv8_crypto_private *internals;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for SHA instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+ !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "SHA1/SHA2 instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for Advance SIMD instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Advanced SIMD instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct armv8_crypto_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+ dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_NEON |
+ RTE_CRYPTODEV_FF_CPU_ARM_CE;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ ARMV8_CRYPTO_LOG_ERR(
+ "driver %s: cryptodev_armv8_crypto_create failed",
+ init_params->name);
+
+ cryptodev_armv8_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0') {
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ }
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_armv8_crypto_create(name, vdev, &init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing ARMv8 crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver armv8_crypto_drv = {
+ .probe = cryptodev_armv8_crypto_init,
+ .remove = cryptodev_armv8_crypto_uninit
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644
index 00000000..4d9ccbfb
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -0,0 +1,370 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities
+ armv8_crypto_pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 16,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 16,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct armv8_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = armv8_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct armv8_crypto_qp *qp)
+{
+ unsigned int n;
+
+ n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ARMV8_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ ARMV8_CRYPTO_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct armv8_crypto_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ armv8_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (armv8_crypto_set_session_parameters(
+ sess, xform) != 0) {
+ ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+
+ /* Zero out the whole structure */
+ if (sess)
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+ .dev_configure = armv8_crypto_pmd_config,
+ .dev_start = armv8_crypto_pmd_start,
+ .dev_stop = armv8_crypto_pmd_stop,
+ .dev_close = armv8_crypto_pmd_close,
+
+ .stats_get = armv8_crypto_pmd_stats_get,
+ .stats_reset = armv8_crypto_pmd_stats_reset,
+
+ .dev_infos_get = armv8_crypto_pmd_info_get,
+
+ .queue_pair_setup = armv8_crypto_pmd_qp_setup,
+ .queue_pair_release = armv8_crypto_pmd_qp_release,
+ .queue_pair_start = armv8_crypto_pmd_qp_start,
+ .queue_pair_stop = armv8_crypto_pmd_qp_stop,
+ .queue_pair_count = armv8_crypto_pmd_qp_count,
+
+ .session_get_size = armv8_crypto_pmd_session_get_size,
+ .session_configure = armv8_crypto_pmd_session_configure,
+ .session_clear = armv8_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644
index 00000000..b75107f2
--- /dev/null
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -0,0 +1,211 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con) \
+do { \
+ if (!(con)) { \
+ rte_panic("%s(): " \
+ con "condition failed, line %u", __func__); \
+ } \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY 8 /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+ ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+ ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+ ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+ ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+ ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+ ARMV8_CRYPTO_AUTH_AS_AUTH,
+ ARMV8_CRYPTO_AUTH_AS_HMAC,
+ ARMV8_CRYPTO_AUTH_AS_CIPHER,
+ ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+ ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+ uint8_t *, uint8_t *, uint64_t,
+ crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+ enum armv8_crypto_chain_order chain_order;
+ /**< chain order mode */
+ crypto_func_t crypto_func;
+ /**< cryptographic function to use for this session */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ int iv_len;
+ /**< IV length */
+
+ struct {
+ uint8_t data[256];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ crypto_key_sched_t key_sched;
+ /**< Key schedule function */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum armv8_crypto_auth_mode mode;
+ /**< auth operation mode */
+
+ union {
+ struct {
+ /* Add data if needed */
+ } auth;
+
+ struct {
+ uint8_t i_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< inner pad (max supported block length) */
+ uint8_t o_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< outer pad (max supported block length) */
+ uint8_t key[SHA_AUTH_KEY_MAX];
+ /**< HMAC key (max supported length)*/
+ } hmac;
+ };
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+ struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/drivers/net/mpipe/rte_pmd_mpipe_version.map b/drivers/crypto/armv8/rte_armv8_pmd_version.map
index ad607bbe..1f84b68a 100644
--- a/drivers/net/mpipe/rte_pmd_mpipe_version.map
+++ b/drivers/crypto/armv8/rte_armv8_pmd_version.map
@@ -1,3 +1,3 @@
-DPDK_2.2 {
+DPDK_17.02 {
local: *;
};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
new file mode 100644
index 00000000..11c7c785
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -0,0 +1,78 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_sec.a
+
+# build flags
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+CFLAGS += -D _GNU_SOURCE
+
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_cryptodev
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/bus/fslmc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/mempool/dpaa2
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
new file mode 100644
index 00000000..4e01fe81
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -0,0 +1,1656 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_common.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+#include <fsl_dpseci.h>
+#include <fsl_mc_sys.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define FSL_VENDOR_ID 0x1957
+#define FSL_DEVICE_ID 0x410
+#define FSL_SUBSYSTEM_SEC 1
+#define FSL_MC_DPSECI_DEVID 3
+
+#define NO_PREFETCH 0
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+
+static inline int
+build_authenc_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sym_op->auth.digest.length;
+ uint8_t *old_icv;
+ uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sym_op->auth.digest.length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->cipher.data.length;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = sym_op->auth.digest.length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sym_op->cipher.iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sym_op->cipher.iv.length) :
+ (sym_op->auth.data.length + sym_op->cipher.iv.length +
+ sym_op->auth.digest.length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+ sge->length = sym_op->cipher.iv.length;
+ sge++;
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->auth.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sym_op->auth.digest.length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sym_op->auth.digest.length +
+ sym_op->cipher.iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ return 0;
+}
+
+static inline int
+build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ uint32_t mem_len = (sess->dir == DIR_ENC) ?
+ (3 * sizeof(struct qbman_fle)) :
+ (5 * sizeof(struct qbman_fle) +
+ sym_op->auth.digest.length);
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+ return -1;
+ }
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ }
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ fle->length = sym_op->auth.digest.length;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ fle++;
+
+ if (sess->dir == DIR_ENC) {
+ DPAA2_SET_FLE_ADDR(fle,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+ fle->length = sym_op->auth.data.length;
+ } else {
+ sge = fle + 2;
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+ sym_op->auth.digest.length);
+ sge->length = sym_op->auth.data.length;
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sym_op->auth.digest.length;
+ fle->length = sym_op->auth.data.length +
+ sym_op->auth.digest.length;
+ DPAA2_SET_FLE_FIN(sge);
+ }
+ DPAA2_SET_FLE_FIN(fle);
+
+ return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* todo - we can use some mempool to avoid malloc here */
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+ sge = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+
+ flc = &priv->flc_desc[0].flc;
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+ sym_op->cipher.iv.length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+ PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+ flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+
+ fle++;
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+ sge->length = sym_op->cipher.iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ sge->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+
+ PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ (void *)DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ return 0;
+}
+
+static inline int
+build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ int ret = -1;
+
+ PMD_INIT_FUNC_TRACE();
+
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ }
+ return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ /*todo - need to support multiple buffer pools */
+ uint16_t bpid;
+ struct rte_mempool *mb_pool;
+ dpaa2_sec_session *sess;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ return 0;
+ }
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+ if (!DPAA2_PER_LCORE_SEC_DPIO) {
+ ret = dpaa2_affine_qbman_swp_sec();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_SEC_PORTAL;
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ /*Clear the unused FD fields before sending*/
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+ sess = (dpaa2_sec_session *)
+ (*ops)->sym->session->_private;
+ mb_pool = (*ops)->sym->m_src->pool;
+ bpid = mempool_to_bpid(mb_pool);
+ ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "error: Improper packet"
+ " contents for crypto operation\n");
+ goto skip_tx;
+ }
+ ops++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_send_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_ops -= frames_to_send;
+ }
+skip_tx:
+ dpaa2_qp->tx_vq.tx_pkts += num_tx;
+ dpaa2_qp->tx_vq.err_pkts += nb_ops;
+ return num_tx;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_fle *fle;
+ struct rte_crypto_op *op;
+
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+
+ if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+ /* TODO complete it. */
+ RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+ return NULL;
+ }
+ op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FLE_ADDR((fle - 1)));
+
+ /* Prefeth op */
+ rte_prefetch0(op->sym->m_src);
+
+ PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
+ (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+
+ PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ (void *)DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ /* free the fle memory */
+ rte_free(fle - 1);
+
+ return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+
+ if (!DPAA2_PER_LCORE_SEC_DPIO) {
+ ret = dpaa2_affine_qbman_swp_sec();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_ops);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+ 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely(
+ (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ PMD_RX_LOG(DEBUG, "No frame is delivered");
+ continue;
+ }
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ ops[num_rx] = sec_fd_to_mbuf(fd);
+
+ if (unlikely(fd->simple.frc)) {
+ /* TODO Parse SEC errors */
+ RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
+ fd->simple.frc);
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+ PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct dpaa2_sec_qp *qp =
+ (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qp->rx_vq.q_storage) {
+ dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+ rte_free(qp->rx_vq.q_storage);
+ }
+ rte_free(qp);
+
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct dpaa2_sec_qp *qp;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ PMD_DRV_LOG(INFO, "QP already setup");
+ return 0;
+ }
+
+ PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+ qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp) {
+ RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ return -1;
+ }
+
+ qp->rx_vq.dev = dev;
+ qp->tx_vq.dev = dev;
+ qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp->rx_vq.q_storage) {
+ RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ return -1;
+ }
+ memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+ if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+ RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ return -1;
+ }
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ return retcode;
+}
+
+/** Start queue pair */
+static int
+dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa2_sec_session);
+}
+
+static void
+dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
+ void *sess __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+ struct alginfo cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC CIPHER only one descriptor is required. */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ ctxt->iv.length = AES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ ctxt->iv.length = TDES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_NULL:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ xform->cipher.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ xform->cipher.algo);
+ goto error_out;
+ }
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+ &cipherdata, NULL, ctxt->iv.length,
+ session->dir);
+ if (bufsize < 0) {
+ RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ goto error_out;
+ }
+ flc->dhr = 0;
+ flc->bpv0 = 0x1;
+ flc->mode_bits = 0x8000;
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+ struct alginfo authdata;
+ unsigned int bufsize;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC AUTH three descriptors are required for various stages */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + 3 *
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key");
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = xform->auth.key.length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ xform->auth.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ xform->auth.algo);
+ goto error_out;
+ }
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+ 1, 0, &authdata, !session->dir,
+ ctxt->trunc_len);
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ return 0;
+
+error_out:
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct alginfo authdata, cipherdata;
+ unsigned int bufsize;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session->ext_params.aead_ctxt.auth_cipher_text) {
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+ } else {
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+ }
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ ctxt->trunc_len = auth_xform->digest_length;
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ ctxt->iv.length = AES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ ctxt->iv.length = TDES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = cipherdata.keylen;
+ priv->flc_desc[0].desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[2], 2);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[2] & 1) {
+ cipherdata.key_type = RTA_DATA_IMM;
+ } else {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (priv->flc_desc[0].desc[2] & (1 << 1)) {
+ authdata.key_type = RTA_DATA_IMM;
+ } else {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+ priv->flc_desc[0].desc[2] = 0;
+
+ if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+ bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+ 0, &cipherdata, &authdata,
+ ctxt->iv.length,
+ ctxt->auth_only_len,
+ ctxt->trunc_len,
+ session->dir);
+ } else {
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static void *
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ dpaa2_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ RTE_LOG(ERR, PMD, "invalid session struct");
+ return NULL;
+ }
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_CIPHER;
+ dpaa2_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_AUTH;
+ dpaa2_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ dpaa2_sec_aead_init(dev, xform, session);
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_init(dev, xform, session);
+ } else {
+ RTE_LOG(ERR, PMD, "Invalid crypto type");
+ return NULL;
+ }
+
+ return session;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
+
+ if (s) {
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ }
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOTSUP;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_attr attr;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ struct dpseci_rx_queue_attr rx_attr;
+ struct dpseci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&attr, 0, sizeof(struct dpseci_attr));
+
+ ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
+ priv->hw_id);
+ goto get_attr_failure;
+ }
+ ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ goto get_attr_failure;
+ }
+ for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->rx_vq;
+ dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &rx_attr);
+ dpaa2_q->fqid = rx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ }
+ for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->tx_vq;
+ dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &tx_attr);
+ dpaa2_q->fqid = tx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ }
+
+ return 0;
+get_attr_failure:
+ dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ priv->hw_id);
+ return;
+ }
+
+ ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
+ ret);
+ return;
+ }
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Function is reverse of dpaa2_sec_dev_init.
+ * It does the following:
+ * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+ * 2. Close the DPSECI device
+ * 3. Free the allocated resources.
+ */
+
+ /*Close the device at underlying layer*/
+ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
+ " error code %d\n", ret);
+ return -1;
+ }
+
+ /*Free the allocated memory for ethernet private data and dpseci*/
+ priv->hw = NULL;
+ free(dpseci);
+
+ return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa2_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ }
+}
+
+static
+void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_sec_counters counters = {0};
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
+ stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
+ stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
+ }
+
+ ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+ &counters);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ } else {
+ PMD_DRV_LOG(INFO, "dpseci hw stats:"
+ "\n\tNumber of Requests Dequeued = %lu"
+ "\n\tNumber of Outbound Encrypt Requests = %lu"
+ "\n\tNumber of Inbound Decrypt Requests = %lu"
+ "\n\tNumber of Outbound Bytes Encrypted = %lu"
+ "\n\tNumber of Outbound Bytes Protected = %lu"
+ "\n\tNumber of Inbound Bytes Decrypted = %lu"
+ "\n\tNumber of Inbound Bytes Validated = %lu",
+ counters.dequeued_requests,
+ counters.ob_enc_requests,
+ counters.ib_dec_requests,
+ counters.ob_enc_bytes,
+ counters.ob_prot_bytes,
+ counters.ib_dec_bytes,
+ counters.ib_valid_bytes);
+ }
+}
+
+static
+void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->tx_vq.rx_pkts = 0;
+ qp[i]->tx_vq.tx_pkts = 0;
+ qp[i]->tx_vq.err_pkts = 0;
+ qp[i]->rx_vq.rx_pkts = 0;
+ qp[i]->rx_vq.tx_pkts = 0;
+ qp[i]->rx_vq.err_pkts = 0;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa2_sec_dev_configure,
+ .dev_start = dpaa2_sec_dev_start,
+ .dev_stop = dpaa2_sec_dev_stop,
+ .dev_close = dpaa2_sec_dev_close,
+ .dev_infos_get = dpaa2_sec_dev_infos_get,
+ .stats_get = dpaa2_sec_stats_get,
+ .stats_reset = dpaa2_sec_stats_reset,
+ .queue_pair_setup = dpaa2_sec_queue_pair_setup,
+ .queue_pair_release = dpaa2_sec_queue_pair_release,
+ .queue_pair_start = dpaa2_sec_queue_pair_start,
+ .queue_pair_stop = dpaa2_sec_queue_pair_stop,
+ .queue_pair_count = dpaa2_sec_queue_pair_count,
+ .session_get_size = dpaa2_sec_session_get_size,
+ .session_initialize = dpaa2_sec_session_initialize,
+ .session_configure = dpaa2_sec_session_configure,
+ .session_clear = dpaa2_sec_session_clear,
+};
+
+static int
+dpaa2_sec_uninit(const struct rte_cryptodev_driver *crypto_drv __rte_unused,
+ struct rte_cryptodev *dev)
+{
+ PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa2_sec_dev_private *internals;
+ struct rte_device *dev = cryptodev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct fsl_mc_io *dpseci;
+ uint16_t token;
+ struct dpseci_attr attr;
+ int retcode, hw_id;
+
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+ if (dpaa2_dev == NULL) {
+ PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ return -1;
+ }
+ hw_id = dpaa2_dev->object_id;
+
+ cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ return 0;
+ }
+ /*Open the rte device via MC and save the handle for further use*/
+ dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
+ sizeof(struct fsl_mc_io), 0);
+ if (!dpseci) {
+ PMD_INIT_LOG(ERR,
+ "Error in allocating the memory for dpsec object");
+ return -1;
+ }
+ dpseci->regs = rte_mcp_ptr_list[0];
+
+ retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR,
+ "Cannot get dpsec device attributed: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
+
+ internals->max_nb_queue_pairs = attr.num_tx_queues;
+ cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
+ internals->hw = dpseci;
+ internals->token = token;
+
+ PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+
+ /* dpaa2_sec_uninit(crypto_dev_name); */
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa2_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa2_dev->cryptodev = cryptodev;
+ cryptodev->device = &dpaa2_dev->device;
+ cryptodev->driver = (struct rte_cryptodev_driver *)dpaa2_drv;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa2_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa2_dev->cryptodev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa2_sec_uninit(NULL, cryptodev);
+ if (ret)
+ return ret;
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->device = NULL;
+ cryptodev->driver = NULL;
+ cryptodev->data = NULL;
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+ .drv_type = DPAA2_MC_DPSECI_DEVID,
+ .driver = {
+ .name = "DPAA2 SEC PMD"
+ },
+ .probe = cryptodev_dpaa2_sec_probe,
+ .remove = cryptodev_dpaa2_sec_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
diff --git a/lib/librte_eal/common/include/arch/tile/rte_spinlock.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index e91f99ee..03d4c703 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_spinlock.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -1,7 +1,8 @@
-/*
+/*-
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,65 +29,42 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#ifndef _RTE_SPINLOCK_TILE_H_
-#define _RTE_SPINLOCK_TILE_H_
+#ifndef _DPAA2_SEC_LOGS_H_
+#define _DPAA2_SEC_LOGS_H_
-#ifndef RTE_FORCE_INTRINSICS
-# error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-#ifdef __cplusplus
-extern "C" {
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
#endif
-#include <rte_common.h>
-#include "generic/rte_spinlock.h"
-
-static inline int rte_tm_supported(void)
-{
- return 0;
-}
-
-static inline void
-rte_spinlock_lock_tm(rte_spinlock_t *sl)
-{
- rte_spinlock_lock(sl); /* fall-back */
-}
-
-static inline int
-rte_spinlock_trylock_tm(rte_spinlock_t *sl)
-{
- return rte_spinlock_trylock(sl);
-}
-
-static inline void
-rte_spinlock_unlock_tm(rte_spinlock_t *sl)
-{
- rte_spinlock_unlock(sl);
-}
-
-static inline void
-rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
-{
- rte_spinlock_recursive_lock(slr); /* fall-back */
-}
-
-static inline void
-rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
-{
- rte_spinlock_recursive_unlock(slr);
-}
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
-static inline int
-rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
-{
- return rte_spinlock_recursive_trylock(slr);
-}
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
-#ifdef __cplusplus
-}
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
#endif
-#endif /* _RTE_SPINLOCK_TILE_H_ */
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
new file mode 100644
index 00000000..f5c61694
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -0,0 +1,368 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+
+#define MAX_QUEUES 64
+#define MAX_DESC_SIZE 64
+/** private data structure for each DPAA2_SEC device */
+struct dpaa2_sec_dev_private {
+ void *mc_portal; /**< MC Portal for configuring this device */
+ void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ int32_t hw_id; /**< An unique ID of this device instance */
+ int32_t vfio_fd; /**< File descriptor received via VFIO */
+ uint16_t token; /**< Token required by DPxxx objects */
+ unsigned int max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+struct dpaa2_sec_qp {
+ struct dpaa2_queue rx_vq;
+ struct dpaa2_queue tx_vq;
+};
+
+enum shr_desc_type {
+ DESC_UPDATE,
+ DESC_FINAL,
+ DESC_INITFINAL,
+};
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+ /* word 0 */
+ uint16_t word0_sdid; /* 11-0 SDID */
+ uint16_t word0_res; /* 31-12 reserved */
+
+ /* word 1 */
+ uint8_t word1_sdl; /* 5-0 SDL */
+ /* 7-6 reserved */
+
+ uint8_t word1_bits_15_8; /* 11-8 CRID */
+ /* 14-12 reserved */
+ /* 15 CRJD */
+
+ uint8_t word1_bits23_16; /* 16 EWS */
+ /* 17 DAC */
+ /* 18,19,20 ? */
+ /* 23-21 reserved */
+
+ uint8_t word1_bits31_24; /* 24 RSC */
+ /* 25 RBMT */
+ /* 31-26 reserved */
+
+ /* word 2 RFLC[31-0] */
+ uint32_t word2_rflc_31_0;
+
+ /* word 3 RFLC[63-32] */
+ uint32_t word3_rflc_63_32;
+
+ /* word 4 */
+ uint16_t word4_iicid; /* 15-0 IICID */
+ uint16_t word4_oicid; /* 31-16 OICID */
+
+ /* word 5 */
+ uint32_t word5_ofqid:24; /* 23-0 OFQID */
+ uint32_t word5_31_24:8;
+ /* 24 OSC */
+ /* 25 OBMT */
+ /* 29-26 reserved */
+ /* 31-30 ICR */
+
+ /* word 6 */
+ uint32_t word6_oflc_31_0;
+
+ /* word 7 */
+ uint32_t word7_oflc_63_32;
+
+ /* Word 8-15 storage profiles */
+ uint16_t dl; /**< DataLength(correction) */
+ uint16_t reserved; /**< reserved */
+ uint16_t dhr; /**< DataHeadRoom(correction) */
+ uint16_t mode_bits; /**< mode bits */
+ uint16_t bpv0; /**< buffer pool0 valid */
+ uint16_t bpid0; /**< Bypass Memory Translation */
+ uint16_t bpv1; /**< buffer pool1 valid */
+ uint16_t bpid1; /**< Bypass Memory Translation */
+ uint64_t word_12_15[2]; /**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+ struct sec_flow_context flc;
+ uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+ struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+ DPAA2_SEC_NONE, /*!< No Cipher operations*/
+ DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
+ * associated data
+ */
+ DPAA2_SEC_HASH_CIPHER, /*!< Encryption with Authenticated
+ * associated data
+ */
+ DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA2_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA2_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_cipher_ctxt {
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv; /**< Initialisation vector parameters */
+ uint8_t *init_counter; /*!< Set initial counter for CTR mode */
+};
+
+struct dpaa2_sec_auth_ctxt {
+ uint8_t trunc_len; /*!< Length for output ICV, should
+ * be 0 if no truncation required
+ */
+};
+
+struct dpaa2_sec_aead_ctxt {
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
+ uint8_t trunc_len; /*!< Length for output ICV, should
+ * be 0 if no truncation required
+ */
+};
+
+typedef struct dpaa2_sec_session_entry {
+ void *ctxt;
+ uint8_t ctxt_type;
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_cipher_ctxt cipher_ctxt;
+ struct dpaa2_sec_auth_ctxt auth_ctxt;
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+} dpaa2_sec_session;
+
+static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/hw/compat.h b/drivers/crypto/dpaa2_sec/hw/compat.h
new file mode 100644
index 00000000..11fdaa8e
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -0,0 +1,157 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#include <stdint.h>
+#include <errno.h>
+
+#ifdef __GLIBC__
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <rte_byteorder.h>
+
+#ifndef __BYTE_ORDER__
+#error "Undefined endianness"
+#endif
+
+#else
+#error Environment not supported!
+#endif
+
+#ifndef __always_inline
+#define __always_inline (inline __attribute__((always_inline)))
+#endif
+
+#ifndef __always_unused
+#define __always_unused __attribute__((unused))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
+#endif
+
+#if defined(__GLIBC__) && !defined(pr_debug)
+#if !defined(SUPPRESS_PRINTS) && defined(RTA_DEBUG)
+#define pr_debug(fmt, ...) \
+ RTE_LOG(DEBUG, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_debug */
+
+#if defined(__GLIBC__) && !defined(pr_err)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_err(fmt, ...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_err(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_err */
+
+#if defined(__GLIBC__) && !defined(pr_warn)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_warn(fmt, ...) \
+ RTE_LOG(WARNING, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_warn(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_warn */
+
+/**
+ * ARRAY_SIZE - returns the number of elements in an array
+ * @x: array
+ */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
+ ~((__typeof__(x))(a) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef upper_32_bits
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#endif
+
+#ifndef lower_32_bits
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+#endif
+
+/* Use Linux naming convention */
+#ifdef __GLIBC__
+ #define swab16(x) rte_bswap16(x)
+ #define swab32(x) rte_bswap32(x)
+ #define swab64(x) rte_bswap64(x)
+ /* Define cpu_to_be32 macro if not defined in the build environment */
+ #if !defined(cpu_to_be32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_be32(x) (x)
+ #else
+ #define cpu_to_be32(x) swab32(x)
+ #endif
+ #endif
+ /* Define cpu_to_le32 macro if not defined in the build environment */
+ #if !defined(cpu_to_le32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_le32(x) swab32(x)
+ #else
+ #define cpu_to_le32(x) (x)
+ #endif
+ #endif
+#endif
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
new file mode 100644
index 00000000..5185be22
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -0,0 +1,2601 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SEC descriptor composition header.
+ * Definitions to support SEC descriptor instruction generation
+ */
+
+#ifndef __RTA_DESC_H__
+#define __RTA_DESC_H__
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/* Max size of any SEC descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+#define CAAM_CMD_SZ sizeof(uint32_t)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK (0x1f << CMD_SHIFT)
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEDW (0x06 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION ((uint32_t)(0x10 << CMD_SHIFT))
+#define CMD_SIGNATURE ((uint32_t)(0x12 << CMD_SHIFT))
+#define CMD_JUMP ((uint32_t)(0x14 << CMD_SHIFT))
+#define CMD_MATH ((uint32_t)(0x15 << CMD_SHIFT))
+#define CMD_DESC_HDR ((uint32_t)(0x16 << CMD_SHIFT))
+#define CMD_SHARED_DESC_HDR ((uint32_t)(0x17 << CMD_SHIFT))
+#define CMD_MATHI ((uint32_t)(0x1d << CMD_SHIFT))
+#define CMD_SEQ_IN_PTR ((uint32_t)(0x1e << CMD_SHIFT))
+#define CMD_SEQ_OUT_PTR ((uint32_t)(0x1f << CMD_SHIFT))
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/* ICV Check bits for Algo Operation command */
+#define ICV_CHECK_DISABLE 0
+#define ICV_CHECK_ENABLE 1
+
+/* Encap Mode check bits for Algo Operation command */
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Extended Job Descriptor Header
+ */
+#define HDR_EXT BIT(24)
+
+/*
+ * Read input frame as soon as possible (SHR HDR)
+ */
+#define HDR_RIF BIT(25)
+
+/*
+ * Require SEQ LIODN to be the Same (JOB HDR)
+ */
+#define HDR_RSLS BIT(25)
+
+/*
+ * Do Not Run - marks a descriptor not executable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR BIT(24)
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE BIT(23)
+#define HDR_ZRO BIT(15)
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED BIT(14)
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED BIT(13)
+
+/* Clear Input FiFO (if SharedDesc) */
+#define HDR_CLEAR_IFIFO BIT(13)
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX BIT(12)
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED BIT(12)
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE BIT(11)
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR BIT(11)
+
+/* DECO Select Valid */
+#define HDR_EXT_DSEL_VALID BIT(7)
+
+/* Fake trusted descriptor */
+#define HDR_EXT_FTD BIT(8)
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS1 (1 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS2 (2 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF BIT(24)
+#define KEY_VLF BIT(24)
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM BIT(23)
+
+/*
+ * Already in Input Data FIFO - the Input Data Sequence is not read, since it is
+ * already in the Input Data FIFO.
+ */
+#define KEY_AIDF BIT(23)
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if this descriptor is trusted
+ */
+#define KEY_ENC BIT(22)
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB BIT(21)
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT BIT(20)
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK BIT(15)
+
+/*
+ * Plaintext Store
+ */
+#define KEY_PTS BIT(14)
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF BIT(24)
+#define LDST_VLF BIT(24)
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM BIT(23)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQCTRL (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQDAR (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_STAT (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_DCHKSM (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PID (0x04 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_GTR (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STR (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZL (0x70 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_L (0x72 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_M (0x73 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZL (0x74 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZM (0x75 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IFNSR (0x76 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_OFNSR (0x77 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_ALTSOURCE (0x78 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO BIT(2)
+#define LDOFF_DISABLE_AUTO_NFIFO BIT(3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT BIT(7)
+#define LDLEN_RST_CHA_OFIFO_PTR BIT(6)
+#define LDLEN_RST_OFIFO BIT(5)
+#define LDLEN_SET_OFIFO_OFF_VALID BIT(4)
+#define LDLEN_SET_OFIFO_OFF_RSVD BIT(3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE BIT(0)
+#define CLRW_CLR_C1DATAS BIT(2)
+#define CLRW_CLR_C1ICV BIT(3)
+#define CLRW_CLR_C1CTX BIT(5)
+#define CLRW_CLR_C1KEY BIT(6)
+#define CLRW_CLR_PK_A BIT(12)
+#define CLRW_CLR_PK_B BIT(13)
+#define CLRW_CLR_PK_N BIT(14)
+#define CLRW_CLR_PK_E BIT(15)
+#define CLRW_CLR_C2MODE BIT(16)
+#define CLRW_CLR_C2KEYS BIT(17)
+#define CLRW_CLR_C2DATAS BIT(18)
+#define CLRW_CLR_C2CTX BIT(21)
+#define CLRW_CLR_C2KEY BIT(22)
+#define CLRW_RESET_CLS2_DONE BIT(26) /* era 4 */
+#define CLRW_RESET_CLS1_DONE BIT(27) /* era 4 */
+#define CLRW_RESET_CLS2_CHA BIT(28) /* era 4 */
+#define CLRW_RESET_CLS1_CHA BIT(29) /* era 4 */
+#define CLRW_RESET_OFIFO BIT(30) /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO BIT(31) /* era 3 */
+
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL BIT(0)
+#define CCTRL_RESET_CHA_AESA BIT(1)
+#define CCTRL_RESET_CHA_DESA BIT(2)
+#define CCTRL_RESET_CHA_AFHA BIT(3)
+#define CCTRL_RESET_CHA_KFHA BIT(4)
+#define CCTRL_RESET_CHA_SF8A BIT(5)
+#define CCTRL_RESET_CHA_PKHA BIT(6)
+#define CCTRL_RESET_CHA_MDHA BIT(7)
+#define CCTRL_RESET_CHA_CRCA BIT(8)
+#define CCTRL_RESET_CHA_RNG BIT(9)
+#define CCTRL_RESET_CHA_SF9A BIT(10)
+#define CCTRL_RESET_CHA_ZUCE BIT(11)
+#define CCTRL_RESET_CHA_ZUCA BIT(12)
+#define CCTRL_UNLOAD_PK_A0 BIT(16)
+#define CCTRL_UNLOAD_PK_A1 BIT(17)
+#define CCTRL_UNLOAD_PK_A2 BIT(18)
+#define CCTRL_UNLOAD_PK_A3 BIT(19)
+#define CCTRL_UNLOAD_PK_B0 BIT(20)
+#define CCTRL_UNLOAD_PK_B1 BIT(21)
+#define CCTRL_UNLOAD_PK_B2 BIT(22)
+#define CCTRL_UNLOAD_PK_B3 BIT(23)
+#define CCTRL_UNLOAD_PK_N BIT(24)
+#define CCTRL_UNLOAD_PK_A BIT(26)
+#define CCTRL_UNLOAD_PK_B BIT(27)
+#define CCTRL_UNLOAD_SBOX BIT(28)
+
+/* IRQ Control Register (CxCIRQ) bits */
+#define CIRQ_ADI BIT(1)
+#define CIRQ_DDI BIT(2)
+#define CIRQ_RCDI BIT(3)
+#define CIRQ_KDI BIT(4)
+#define CIRQ_S8DI BIT(5)
+#define CIRQ_PDI BIT(6)
+#define CIRQ_MDI BIT(7)
+#define CIRQ_CDI BIT(8)
+#define CIRQ_RNDI BIT(9)
+#define CIRQ_S9DI BIT(10)
+#define CIRQ_ZEDI BIT(11) /* valid for Era 5 or higher */
+#define CIRQ_ZADI BIT(12) /* valid for Era 5 or higher */
+#define CIRQ_AEI BIT(17)
+#define CIRQ_DEI BIT(18)
+#define CIRQ_RCEI BIT(19)
+#define CIRQ_KEI BIT(20)
+#define CIRQ_S8EI BIT(21)
+#define CIRQ_PEI BIT(22)
+#define CIRQ_MEI BIT(23)
+#define CIRQ_CEI BIT(24)
+#define CIRQ_RNEI BIT(25)
+#define CIRQ_S9EI BIT(26)
+#define CIRQ_ZEEI BIT(27) /* valid for Era 5 or higher */
+#define CIRQ_ZAEI BIT(28) /* valid for Era 5 or higher */
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_BOTH (0x03 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF BIT(24)
+#define FIFOLDST_VLF BIT(24)
+
+/*
+ * Immediate - Data follows command in descriptor
+ * AIDF - Already in Input Data FIFO
+ */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_AIDF_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM BIT(23)
+#define FIFOLD_AIDF BIT(23)
+
+#define FIFOST_IMM_SHIFT 23
+#define FIFOST_IMM_MASK (1 << FIFOST_IMM_SHIFT)
+#define FIFOST_IMM BIT(23)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT BIT(23)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT BIT(22)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0f << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA2 (0x31 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_DIFFIEHELLMAN (0x17 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENCRYPT (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADECRYPT (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_PDU (0x32 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_SDU (0x33 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER (0x42 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL (0x43 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL_MIXED (0x44 << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_NULL 0x0B00
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+#define OP_PCL_IPSEC_AES_NULL_WITH_GMAC 0x1500
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_AES_CMAC_96 0x0008
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
+
+#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
+#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
+#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
+#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
+#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
+#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
+#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
+#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
+#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
+#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
+#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
+#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
+#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
+#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
+#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
+#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
+#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
+#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
+#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* 3G DCRC protinfos */
+#define OP_PCL_3G_DCRC_CRC7 0x0710
+#define OP_PCL_3G_DCRC_CRC11 0x0B10
+
+/* 3G RLC protinfos */
+#define OP_PCL_3G_RLC_NULL 0x0000
+#define OP_PCL_3G_RLC_KASUMI 0x0001
+#define OP_PCL_3G_RLC_SNOW 0x0002
+
+/* LTE protinfos */
+#define OP_PCL_LTE_NULL 0x0000
+#define OP_PCL_LTE_SNOW 0x0001
+#define OP_PCL_LTE_AES 0x0002
+#define OP_PCL_LTE_ZUC 0x0003
+
+/* LTE mixed protinfos */
+#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
+#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_DSA_MSG BIT(10)
+#define OP_PCL_PKPROT_HASH_SHIFT 7
+#define OP_PCL_PKPROT_HASH_MASK (7 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_MD5 (0 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA1 (1 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA224 (2 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA256 (3 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA384 (4 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA512 (5 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_EKT_Z BIT(6)
+#define OP_PCL_PKPROT_DECRYPT_Z BIT(5)
+#define OP_PCL_PKPROT_EKT_PRI BIT(4)
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT_PRI BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* Blob protinfos */
+#define OP_PCL_BLOB_TKEK_SHIFT 9
+#define OP_PCL_BLOB_TKEK BIT(9)
+#define OP_PCL_BLOB_EKT_SHIFT 8
+#define OP_PCL_BLOB_EKT BIT(8)
+#define OP_PCL_BLOB_REG_SHIFT 4
+#define OP_PCL_BLOB_REG_MASK (0xF << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_MEMORY (0x0 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY1 (0x1 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY2 (0x3 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_AFHA_SBOX (0x5 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_SPLIT (0x7 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_PKE (0x9 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_SEC_MEM_SHIFT 3
+#define OP_PCL_BLOB_SEC_MEM BIT(3)
+#define OP_PCL_BLOB_BLACK BIT(2)
+#define OP_PCL_BLOB_FORMAT_SHIFT 0
+#define OP_PCL_BLOB_FORMAT_MASK 0x3
+#define OP_PCL_BLOB_FORMAT_NORMAL 0
+#define OP_PCL_BLOB_FORMAT_MASTER_VER 2
+#define OP_PCL_BLOB_FORMAT_TEST 3
+
+/* IKE / IKEv2 protinfos */
+#define OP_PCL_IKE_HMAC_MD5 0x0100
+#define OP_PCL_IKE_HMAC_SHA1 0x0200
+#define OP_PCL_IKE_HMAC_AES128_CBC 0x0400
+#define OP_PCL_IKE_HMAC_SHA256 0x0500
+#define OP_PCL_IKE_HMAC_SHA384 0x0600
+#define OP_PCL_IKE_HMAC_SHA512 0x0700
+#define OP_PCL_IKE_HMAC_AES128_CMAC 0x0800
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* RSA Protinfo */
+#define OP_PCL_RSAPROT_OP_MASK 3
+#define OP_PCL_RSAPROT_OP_ENC_F_IN 0
+#define OP_PCL_RSAPROT_OP_ENC_F_OUT 1
+#define OP_PCL_RSAPROT_OP_DEC_ND 0
+#define OP_PCL_RSAPROT_OP_DEC_PQD 1
+#define OP_PCL_RSAPROT_OP_DEC_PQDPDQC 2
+#define OP_PCL_RSAPROT_FFF_SHIFT 4
+#define OP_PCL_RSAPROT_FFF_MASK (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_RED (0 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_ENC (1 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_ENC (5 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_EKT (3 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_EKT (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_PPP_SHIFT 8
+#define OP_PCL_RSAPROT_PPP_MASK (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_RED (0 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_ENC (1 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_ENC (5 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_EKT (3 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_EKT (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_FMT_PKCSV15 BIT(12)
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 (0x2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (0x4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCE (0xB0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCA (0xC0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x3ff << OP_ALG_AAI_SHIFT)
+
+/* block cipher AAI set */
+#define OP_ALG_AESA_MODE_MASK (0xF0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_CMAC (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC_LTE (0xd0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC (0xe0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_C2K (0x200 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_RNG_MODE_MASK (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_SHIFT OP_ALG_AAI_SHIFT
+#define OP_ALG_AAI_RNG4_SH_MASK (0x03 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_CRC_POLY_MASK (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_IVZ (0x80 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW/ZUC AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF 0
+#define OP_ALG_ICV_ON BIT(1)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT BIT(0)
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM BIT(19)
+#define OP_ALG_PKMODE_B_RAM BIT(18)
+#define OP_ALG_PKMODE_E_RAM BIT(17)
+#define OP_ALG_PKMODE_N_RAM BIT(16)
+#define OP_ALG_PKMODE_CLEARMEM BIT(0)
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_CLEARMEM_ALL (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AB (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_A (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_B (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_EN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_E (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_N (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_N_RAM)
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY BIT(19)
+#define OP_ALG_PKMODE_MOD_OUT_MONTY BIT(18)
+#define OP_ALG_PKMODE_MOD_F2M BIT(17)
+#define OP_ALG_PKMODE_MOD_R2_IN BIT(16)
+#define OP_ALG_PKMODE_PRJECTV BIT(11)
+#define OP_ALG_PKMODE_TIME_EQ BIT(10)
+
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+
+/*
+ * PKHA mode modular-arithmetic integer functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_MULT_IM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_MULT_IM_OM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_EXPO_TEQ (0x006 | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_EXPO_IM (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO_IM_TEQ (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_SML_EXP 0x016
+
+/*
+ * PKHA mode modular-arithmetic F2m functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_F2M_ADD (0x002 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL (0x005 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL_IM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_F2M_MUL_IM_OM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_F2M_EXP (0x006 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_EXP_TEQ (0x006 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_F2M_AMODN (0x007 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_INV (0x008 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_R2 (0x00c | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_GCD (0x00e | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_SML_EXP (0x016 | OP_ALG_PKMODE_MOD_F2M)
+
+/*
+ * PKHA mode ECC Integer arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_MOD_ADD 0x009
+#define OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_DBL 0x00a
+#define OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL 0x00b
+#define OP_ALG_PKMODE_ECC_MOD_MUL_TEQ (0x00b | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2 (0x00b | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/*
+ * PKHA mode ECC F2m arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_F2M_ADD (0x009 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_DBL (0x00a | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL (0x00b | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2 \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+/* PKHA mode copy-memory functions - amount based on N SIZE */
+#define OP_ALG_PKMODE_COPY_NSZ 0x10
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_A_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_B_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_B_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_N_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_N_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_N_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/* PKHA mode copy-memory functions - amount based on SRC SIZE */
+#define OP_ALG_PKMODE_COPY_SSZ 0x11
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_A_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_B_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_B_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_N_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_N_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_N_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS BIT(26)
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL BIT(25)
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQIN_PRE BIT(23)
+
+/* Use extended length following pointer */
+#define SQIN_EXT BIT(22)
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO BIT(21)
+
+/* Replace job descriptor */
+#define SQIN_RJD BIT(20)
+
+/* Sequence Out Pointer - start a new input sequence using output sequence */
+#define SQIN_SOP BIT(19)
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE BIT(23)
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO BIT(21)
+
+/*
+ * Ignore length field, add current output frame length back to SOL register.
+ * Reset tracking length of bytes written to output frame.
+ * Must be used together with SQOUT_RTO.
+ */
+#define SQOUT_RST BIT(20)
+
+/* Allow "write safe" transactions for this Output Sequence */
+#define SQOUT_EWS BIT(19)
+
+/* Use extended length following pointer */
+#define SQOUT_EXT BIT(22)
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP BIT(24)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_NO_NFIFO (0x0a << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_ALTSOURCE (0x0f << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+#define MOVELEN_SIZE_SHIFT 6
+#define MOVELEN_SIZE_MASK (0x3 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_WORD (0x01 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_BYTE (0x02 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_DWORD (0x03 << MOVELEN_SIZE_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB BIT(26)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU BIT(25)
+
+/* STL for MATH, SSEL for MATHI */
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL BIT(24)
+
+#define MATH_SSEL_SHIFT 24
+#define MATH_SSEL_MASK (1 << MATH_SSEL_SHIFT)
+#define MATH_SSEL BIT(24)
+
+#define MATH_SWP_SHIFT 0
+#define MATH_SWP_MASK (1 << MATH_SWP_SHIFT)
+#define MATH_SWP BIT(0)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) /* ZBYT is for MATH */
+#define MATH_FUN_FBYT (0x0a << MATH_FUN_SHIFT) /* FBYT is for MATHI */
+#define MATH_FUN_BSWAP (0x0b << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ONE (0x0f << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATHI_SRC1_SHIFT 16
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_JOBSOURCE (0x0d << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATHI_DEST_SHIFT 12
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* MATHI Immediate value */
+#define MATHI_IMM_SHIFT 4
+#define MATHI_IMM_MASK (0xff << MATHI_IMM_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL BIT(24)
+
+#define JUMP_TYPE_SHIFT 20
+#define JUMP_TYPE_MASK (0x0f << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_INC (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_GOSUB (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_DEC (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x04 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_RETURN (0x06 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x08 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x0c << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK ((0xff << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_PK_0 BIT(15)
+#define JUMP_COND_PK_GCD_1 BIT(14)
+#define JUMP_COND_PK_PRIME BIT(13)
+#define JUMP_COND_MATH_N BIT(11)
+#define JUMP_COND_MATH_Z BIT(10)
+#define JUMP_COND_MATH_C BIT(9)
+#define JUMP_COND_MATH_NV BIT(8)
+
+#define JUMP_COND_JQP (BIT(15) | JUMP_JSL)
+#define JUMP_COND_SHRD (BIT(14) | JUMP_JSL)
+#define JUMP_COND_SELF (BIT(13) | JUMP_JSL)
+#define JUMP_COND_CALM (BIT(12) | JUMP_JSL)
+#define JUMP_COND_NIP (BIT(11) | JUMP_JSL)
+#define JUMP_COND_NIFP (BIT(10) | JUMP_JSL)
+#define JUMP_COND_NOP (BIT(9) | JUMP_JSL)
+#define JUMP_COND_NCP (BIT(8) | JUMP_JSL)
+
+/* Source / destination selectors */
+#define JUMP_SRC_DST_SHIFT 12
+#define JUMP_SRC_DST_MASK (0x0f << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH0 (0x00 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH1 (0x01 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH2 (0x02 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH3 (0x03 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_DPOVRD (0x07 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQINLEN (0x08 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQOUTLEN (0x09 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQINLEN (0x0a << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQOUTLEN (0x0b << JUMP_SRC_DST_SHIFT)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 ((uint32_t)(2 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_BOTH ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 BIT(29)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 BIT(28)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 BIT(27)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 BIT(26)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_ALTSOURCE ((0 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_OFIFO_SYNC ((1 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_SNOOP_ALT ((3 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND BIT(19)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC BIT(15)
+
+#define NFIFOENTRY_PR_SHIFT 15
+#define NFIFOENTRY_PR_MASK (1 << NFIFOENTRY_PR_SHIFT)
+#define NFIFOENTRY_PR BIT(15)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_AST_SHIFT)
+#define NFIFOENTRY_AST BIT(14)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM BIT(11)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS BIT(10)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE BIT(31)
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN BIT(30)
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC BIT(29)
+
+#endif /* __RTA_DESC_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
new file mode 100644
index 00000000..c71ada07
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -0,0 +1,465 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_ALGO_H__
+#define __DESC_ALGO_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: Algorithms - Shared Descriptor Constructors
+ *
+ * Shared descriptors for algorithms (i.e. not for protocols).
+ */
+
+/**
+ * cnstr_shdsc_snow_f8 - SNOW/f8 (UEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @bearer: UEA2 bearer ID (5 bits)
+ * @direction: UEA2 direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ct = count;
+ uint8_t br = bearer;
+ uint8_t dr = direction;
+ uint32_t context[2] = {ct, (br << 27) | (dr << 26)};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_snow_f9 - SNOW/f9 (UIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @fresh: UEA2 fresh value ID (32 bits)
+ * @direction: UEA2 direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir, uint32_t count,
+ uint32_t fresh, uint8_t direction, uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t fr = fresh;
+ uint64_t dr = direction;
+ uint64_t context[2];
+
+ context[0] = (ct << 32) | (dr << 26);
+ context[1] = fr << 32;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab64(context[0]);
+ context[1] = swab64(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT2, 0, 16, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS2 | LAST2);
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_blkcipher - block cipher transformation
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
+ * @ivlen: IV length
+ * @dir: DIR_ENC/DIR_DEC
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t *iv,
+ uint32_t ivlen, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool is_aes_dec = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ LABEL(keyjmp);
+ LABEL(skipdk);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipdk);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+
+ pskipdk = JUMP(p, skipdk, LOCAL_JUMP, ALL_TRUE, 0);
+ }
+ SET_LABEL(p, keyjmp);
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipdk);
+ } else {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ if (iv)
+ /* IV load, convert size */
+ LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+ else
+ /* IV is present first before the actual message */
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+
+ /* Insert sequence load/store with VLF */
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ if (is_aes_dec)
+ PATCH_JUMP(p, pskipdk, skipdk);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_hmac - HMAC shared
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions;
+ * message digest algorithm: OP_ALG_ALGSEL_MD5/ SHA1-512.
+ * @do_icv: 0 if ICV checking is not desired, any other value if ICV checking
+ * is needed for all the packets processed by this shared descriptor
+ * @trunc_len: Length of the truncated ICV to be written in the output buffer, 0
+ * if no truncation is needed
+ *
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_hmac(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t do_icv,
+ uint8_t trunc_len)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint8_t storelen, opicv, dir;
+ LABEL(keyjmp);
+ LABEL(jmpprecomp);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pjmpprecomp);
+
+ /* Compute fixed-size store based on alg selection */
+ switch (authdata->algtype) {
+ case OP_ALG_ALGSEL_MD5:
+ storelen = 16;
+ break;
+ case OP_ALG_ALGSEL_SHA1:
+ storelen = 20;
+ break;
+ case OP_ALG_ALGSEL_SHA224:
+ storelen = 28;
+ break;
+ case OP_ALG_ALGSEL_SHA256:
+ storelen = 32;
+ break;
+ case OP_ALG_ALGSEL_SHA384:
+ storelen = 48;
+ break;
+ case OP_ALG_ALGSEL_SHA512:
+ storelen = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trunc_len = trunc_len && (trunc_len < storelen) ? trunc_len : storelen;
+
+ opicv = do_icv ? ICV_CHECK_ENABLE : ICV_CHECK_DISABLE;
+ dir = do_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ pjmpprecomp = JUMP(p, jmpprecomp, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ SET_LABEL(p, jmpprecomp);
+
+ /* compute sequences */
+ if (opicv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+
+ /* Do load (variable length) */
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ if (opicv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pjmpprecomp, jmpprecomp);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f8 - KASUMI F8 (Confidentiality) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @bearer: bearer ID (5 bits)
+ * @direction: direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t br = bearer;
+ uint64_t dr = direction;
+ uint32_t context[2] = { ct, (br << 27) | (dr << 26) };
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f9 - KASUMI F9 (Integrity) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @fresh: fresh value ID (32 bits)
+ * @direction: direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir,
+ uint32_t count, uint32_t fresh, uint8_t direction,
+ uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint16_t ctx_offset = 16;
+ uint32_t context[6] = {count, direction << 26, fresh, 0, 0, 0};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ context[2] = swab32(context[2]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 24, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS1 | LAST1);
+ /* Save output MAC of DWORD 2 into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT1, ctx_offset, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_ALGO_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/common.h b/drivers/crypto/dpaa2_sec/hw/desc/common.h
new file mode 100644
index 00000000..6b254908
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -0,0 +1,131 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_COMMON_H__
+#define __DESC_COMMON_H__
+
+#include "hw/rta.h"
+
+/**
+ * DOC: Shared Descriptor Constructors - shared structures
+ *
+ * Data structures shared between algorithm, protocol implementations.
+ */
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_enc_flags: key encryption flags; see encrypt_flags parameter of KEY
+ * command for valid values.
+ * @key_type: enum rta_data_type
+ * @algmode: algorithm mode selector; for valid values, see documentation of the
+ * functions where it is used.
+ */
+struct alginfo {
+ uint32_t algtype;
+ uint32_t keylen;
+ uint64_t key;
+ uint32_t key_enc_flags;
+ enum rta_data_type key_type;
+ uint16_t algmode;
+};
+
+#define INLINE_KEY(alginfo) inline_flags(alginfo->key_type)
+
+/**
+ * rta_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int
+rta_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len,
+ unsigned int *data_len,
+ uint32_t *inl_mask,
+ unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * struct protcmd - Container for Protocol Operation Command fields
+ * @optype: command type
+ * @protid: protocol Identifier
+ * @protinfo: protocol Information
+ */
+struct protcmd {
+ uint32_t optype;
+ uint32_t protid;
+ uint16_t protinfo;
+};
+
+#endif /* __DESC_COMMON_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
new file mode 100644
index 00000000..c63d0dac
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -0,0 +1,1547 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_IPSEC_H__
+#define __DESC_IPSEC_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: IPsec Shared Descriptor Constructors
+ *
+ * Shared descriptors for IPsec protocol.
+ */
+
+/* General IPSec ESP encap / decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ESN - Extended sequence included
+ */
+#define PDBOPTS_ESP_ESN 0x10
+
+/**
+ * PDBOPTS_ESP_IPVSN - Process IPv6 header
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPVSN 0x02
+
+/**
+ * PDBOPTS_ESP_TUNNEL - Tunnel mode next-header byte
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_TUNNEL 0x01
+
+/* IPSec ESP Encap PDB options */
+
+/**
+ * PDBOPTS_ESP_UPDATE_CSUM - Update ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80
+
+/**
+ * PDBOPTS_ESP_DIFFSERV - Copy TOS/TC from inner iphdr
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_DIFFSERV 0x40
+
+/**
+ * PDBOPTS_ESP_IVSRC - IV comes from internal random gen
+ */
+#define PDBOPTS_ESP_IVSRC 0x20
+
+/**
+ * PDBOPTS_ESP_IPHDRSRC - IP header comes from PDB
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPHDRSRC 0x08
+
+/**
+ * PDBOPTS_ESP_INCIPHDR - Prepend IP header to output frame
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_INCIPHDR 0x04
+
+/**
+ * PDBOPTS_ESP_OIHI_MASK - Mask for Outer IP Header Included
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_MASK 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_INL - Prepend IP header to output frame from PDB (where
+ * it is inlined).
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_INL 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_REF - Prepend IP header to output frame from PDB
+ * (referenced by pointer).
+ *
+ * Vlid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_REF 0x08
+
+/**
+ * PDBOPTS_ESP_OIHI_IF - Prepend IP header to output frame from input frame
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_IF 0x04
+
+/**
+ * PDBOPTS_ESP_NAT - Enable RFC 3948 UDP-encapsulated-ESP
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NAT 0x02
+
+/**
+ * PDBOPTS_ESP_NUC - Enable NAT UDP Checksum
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NUC 0x01
+
+/* IPSec ESP Decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ARS_MASK - antireplay window mask
+ */
+#define PDBOPTS_ESP_ARS_MASK 0xc0
+
+/**
+ * PDBOPTS_ESP_ARSNONE - No antireplay window
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00
+
+/**
+ * PDBOPTS_ESP_ARS64 - 64-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS64 0xc0
+
+/**
+ * PDBOPTS_ESP_ARS128 - 128-entry antireplay window
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ARS128 0x80
+
+/**
+ * PDBOPTS_ESP_ARS32 - 32-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS32 0x40
+
+/**
+ * PDBOPTS_ESP_VERIFY_CSUM - Validate ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20
+
+/**
+ * PDBOPTS_ESP_TECN - Implement RRFC6040 ECN tunneling from outer header to
+ * inner header.
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_TECN 0x20
+
+/**
+ * PDBOPTS_ESP_OUTFMT - Output only decapsulation
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_OUTFMT 0x08
+
+/**
+ * PDBOPTS_ESP_AOFL - Adjust out frame len
+ *
+ * Valid only for IPsec legacy mode and for SEC >= 5.3.
+ */
+#define PDBOPTS_ESP_AOFL 0x04
+
+/**
+ * PDBOPTS_ESP_ETU - EtherType Update
+ *
+ * Add corresponding ethertype (0x0800 for IPv4, 0x86dd for IPv6) in the output
+ * frame.
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ETU 0x01
+
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
+/**
+ * PDBHMO_ESP_DECAP_DTTL - IPsec ESP decrement TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_DECAP_DTTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ENCAP_DTTL - IPsec ESP increment TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_ENCAP_DTTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DIFFSERV - (Decap) DiffServ Copy - Copy the IPv4 TOS or IPv6
+ * Traffic Class byte from the outer IP header to the
+ * inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_SNR - (Encap) - Sequence Number Rollover control
+ *
+ * Configures behaviour in case of SN / ESN rollover:
+ * error if SNR = 1, rollover allowed if SNR = 0.
+ * Valid only for IPsec new mode.
+ */
+#define PDBHMO_ESP_SNR (0x01 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFBIT - (Encap) Copy DF bit - if an IPv4 tunnel mode outer IP
+ * header is coming from the PDB, copy the DF bit from the
+ * inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFV - (Decap) - DF bit value
+ *
+ * If ODF = 1, DF bit in output frame is replaced by DFV.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_DFV (0x04 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ODF - (Decap) Override DF bit in IPv4 header of decapsulated
+ * output frame.
+ *
+ * If ODF = 1, DF is replaced with the value of DFV bit.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_ODF (0x08 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * struct ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+ uint8_t iv[16];
+};
+
+
+/**
+ * struct ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header (legacy) / reserved (new) - 8b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content (only for IPsec legacy mode)
+ */
+struct ipsec_encap_pdb {
+ uint32_t options;
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ uint32_t spi;
+ uint32_t ip_hdr_len;
+ uint8_t ip_hdr[0];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_encap_pdb(struct program *program,
+ struct ipsec_encap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, pdb->options);
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ rta_copy_data(program, pdb->cbc.iv, sizeof(pdb->cbc.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ __rta_out64(program, true, pdb->ctr.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ __rta_out64(program, true, pdb->ccm.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ __rta_out64(program, true, pdb->gcm.iv);
+ break;
+ }
+
+ __rta_out32(program, pdb->spi);
+ __rta_out32(program, pdb->ip_hdr_len);
+
+ return start_pc;
+}
+
+/**
+ * struct ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+ uint32_t rsvd[2];
+};
+
+/**
+ * struct ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+};
+
+/**
+ * struct ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+};
+
+/**
+ * struct ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+};
+
+/**
+ * struct ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags);
+ * format must be Big Endian, irrespective of platform
+ */
+struct ipsec_decap_pdb {
+ uint32_t options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ uint32_t anti_replay[4];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_decap_pdb(struct program *program,
+ struct ipsec_decap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int i, ars;
+
+ __rta_out32(program, pdb->options);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ __rta_out32(program, pdb->cbc.rsvd[0]);
+ __rta_out32(program, pdb->cbc.rsvd[1]);
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ break;
+ }
+
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (pdb->options & PDBOPTS_ESP_ARS_MASK) {
+ case PDBOPTS_ESP_ARS128:
+ ars = 4;
+ break;
+ case PDBOPTS_ESP_ARS64:
+ ars = 2;
+ break;
+ case PDBOPTS_ESP_ARS32:
+ ars = 1;
+ break;
+ case PDBOPTS_ESP_ARSNONE:
+ default:
+ ars = 0;
+ break;
+ }
+
+ for (i = 0; i < ars; i++)
+ __rta_out_be32(program, pdb->anti_replay[i]);
+
+ return start_pc;
+}
+
+/**
+ * enum ipsec_icv_size - Type selectors for icv size in IPsec protocol
+ * @IPSEC_ICV_MD5_SIZE: full-length MD5 ICV
+ * @IPSEC_ICV_MD5_TRUNC_SIZE: truncated MD5 ICV
+ */
+enum ipsec_icv_size {
+ IPSEC_ICV_MD5_SIZE = 16,
+ IPSEC_ICV_MD5_TRUNC_SIZE = 12
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+
+#define IPSEC_DECO_DPOVRD_USE 0x80
+
+struct ipsec_deco_dpovrd {
+ uint8_t ovrd_ecn;
+ uint8_t ip_hdr_len;
+ uint8_t nh_offset;
+ union {
+ uint8_t next_header; /* next header if encap */
+ uint8_t rsvd; /* reserved if decap */
+ };
+};
+
+struct ipsec_new_encap_deco_dpovrd {
+#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
+ uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
+ * length
+ */
+#define IPSEC_NEW_ENCAP_OIMIF 0x80
+ uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
+ * offset
+ */
+ uint8_t rsvd;
+};
+
+struct ipsec_new_decap_deco_dpovrd {
+ uint8_t ovrd;
+ uint8_t aoipho_hi; /* upper nibble of actual outer IP
+ * header
+ */
+ uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
+ * header + outer IP header material
+ */
+};
+
+static inline void
+__gen_auth_key(struct program *program, struct alginfo *authdata)
+{
+ uint32_t dkp_protid;
+
+ switch (authdata->algtype & OP_PCL_IPSEC_AUTH_MASK) {
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ dkp_protid = OP_PCLID_DKP_MD5;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ dkp_protid = OP_PCLID_DKP_SHA1;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ dkp_protid = OP_PCLID_DKP_SHA256;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ dkp_protid = OP_PCLID_DKP_SHA384;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ dkp_protid = OP_PCLID_DKP_SHA512;
+ break;
+ default:
+ KEY(program, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ return;
+ }
+
+ if (authdata->key_type == RTA_DATA_PTR)
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_PTR,
+ OP_PCL_DKP_DST_PTR, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+ else
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_IMM,
+ OP_PCL_DKP_DST_IMM, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap - IPSec ESP decapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP encapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the encapsulation output packet.
+ * The descriptor performs DES-CBC/3DES-CBC & HMAC-MD5-96 and then rereads
+ * the input packet to do the AES-XCBC-MAC-96 calculation and to overwrite
+ * the MD5 ICV.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(hdr);
+ LABEL(shd_ptr);
+ LABEL(keyjump);
+ LABEL(outptr);
+ LABEL(swapped_seqin_fields);
+ LABEL(swapped_seqin_ptr);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_outlen);
+ REFERENCE(move_seqout_ptr);
+ REFERENCE(swapped_seqin_ptr_jump);
+ REFERENCE(write_swapped_seqin_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from below in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ IMMED);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+ /* Swap SEQINPTR to SEQOUTPTR. */
+ move_seqout_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, AND, ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR), MATH1,
+ 8, IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xa00000e5, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqin_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqin_ptr_jump = JUMP(p, swapped_seqin_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ SEQOUTPTR(p, 0, 65535, RTO);
+ move_outlen = MOVE(p, DESCBUF, 0, MATH0, 4, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH0, SUB,
+ (uint64_t)(pdb->ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE),
+ VSEQINSZ, 4, IMMED2);
+ MATHB(p, MATH0, SUB, IPSEC_ICV_MD5_TRUNC_SIZE, VSEQOUTSZ, 4, IMMED2);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ SEQFIFOLOAD(p, SKIP, pdb->ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1 | LAST1);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT1, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the Shared Descriptor Pointer */
+ SET_LABEL(p, shd_ptr);
+ shd_ptr += 1;
+ /* Label the Output Pointer */
+ SET_LABEL(p, outptr);
+ outptr += 3;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqin_fields);
+ swapped_seqin_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqin_ptr);
+ swapped_seqin_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, swapped_seqin_ptr_jump, swapped_seqin_ptr);
+ PATCH_MOVE(p, move_outlen, outptr);
+ PATCH_MOVE(p, move_seqout_ptr, shd_ptr);
+ PATCH_MOVE(p, write_swapped_seqin_ptr, swapped_seqin_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP decapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the decapsulation input packet.
+ * The descriptor computes the AES-XCBC-MAC-96 to check if the received ICV
+ * is correct, rereads the input packet to compute the MD5 ICV, overwrites
+ * the XCBC ICV, and then sends the modified input packet to the
+ * DES-CBC/3DES-CBC & HMAC-MD5-96 IPsec.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ip_hdr_len = (pdb->options & PDBHDRLEN_MASK) >>
+ PDBHDRLEN_ESP_DECAP_SHIFT;
+
+ LABEL(hdr);
+ LABEL(jump_cmd);
+ LABEL(keyjump);
+ LABEL(outlen);
+ LABEL(seqin_ptr);
+ LABEL(seqout_ptr);
+ LABEL(swapped_seqout_fields);
+ LABEL(swapped_seqout_ptr);
+ REFERENCE(seqout_ptr_jump);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_jump);
+ REFERENCE(move_jump_back);
+ REFERENCE(move_seqin_ptr);
+ REFERENCE(swapped_seqout_ptr_jump);
+ REFERENCE(write_swapped_seqout_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from bellow in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), MATH0, 4,
+ IMMED2);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_MD5, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1);
+ SEQFIFOLOAD(p, ICV1, IPSEC_ICV_MD5_TRUNC_SIZE, FLUSH1 | LAST1);
+ /* Swap SEQOUTPTR to SEQINPTR. */
+ move_seqin_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 8,
+ IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xA00000e1, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqout_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqout_ptr_jump = JUMP(p, swapped_seqout_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load
+ * a command that is also written by RTA).
+ * Change when proper RTA support is added.
+ */
+ SET_LABEL(p, jump_cmd);
+ WORD(p, 0xA00000f3);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH0, ADD, ip_hdr_len, VSEQOUTSZ, 4, IMMED2);
+ move_jump = MOVE(p, DESCBUF, 0, OFIFO, 0, 8, WAITCOMP | IMMED);
+ move_jump_back = MOVE(p, OFIFO, 0, DESCBUF, 0, 8, IMMED);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT2, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+ seqout_ptr_jump = JUMP(p, seqout_ptr, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_CLR_C2MODE |
+ CLRW_CLR_C2DATAS | CLRW_CLR_C2CTX | CLRW_RESET_CLS1_CHA, CLRW, 0,
+ 4, 0);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, ADD,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), SEQINSZ, 4,
+ IMMED2);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the SEQ OUT PTR */
+ SET_LABEL(p, seqout_ptr);
+ seqout_ptr += 2;
+ /* Label the Output Length */
+ SET_LABEL(p, outlen);
+ outlen += 4;
+ /* Label the SEQ IN PTR */
+ SET_LABEL(p, seqin_ptr);
+ seqin_ptr += 5;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqout_fields);
+ swapped_seqout_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqout_ptr);
+ swapped_seqout_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, seqout_ptr_jump, seqout_ptr);
+ PATCH_JUMP(p, swapped_seqout_ptr_jump, swapped_seqout_ptr);
+ PATCH_MOVE(p, move_jump, jump_cmd);
+ PATCH_MOVE(p, move_jump_back, seqin_ptr);
+ PATCH_MOVE(p, move_seqin_ptr, outlen);
+ PATCH_MOVE(p, write_swapped_seqout_ptr, swapped_seqout_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or keys can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor
+ * length for the case of
+ * NULL encryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or key can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_encap - IPSec new mode ESP encapsulation
+ * protocol-level shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the encapsulation PDB.
+ * @opt_ip_hdr: pointer to Optional IP Header
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_INL, opt_ip_hdr points to the buffer to
+ * be inlined in the PDB. Number of bytes (buffer size) copied is provided
+ * in pdb->ip_hdr_len.
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_REF, opt_ip_hdr points to the address of
+ * the Optional IP Header. The address will be inlined in the PDB verbatim.
+ * -for other values of OIHI options field, opt_ip_hdr is not used.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_encap_pdb *pdb,
+ uint8_t *opt_ip_hdr,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode encap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+
+ switch (pdb->options & PDBOPTS_ESP_OIHI_MASK) {
+ case PDBOPTS_ESP_OIHI_PDB_INL:
+ COPY_DATA(p, opt_ip_hdr, pdb->ip_hdr_len);
+ break;
+ case PDBOPTS_ESP_OIHI_PDB_REF:
+ if (ps)
+ COPY_DATA(p, opt_ip_hdr, 8);
+ else
+ COPY_DATA(p, opt_ip_hdr, 4);
+ break;
+ default:
+ break;
+ }
+ SET_LABEL(p, hdr);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_DEC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor
+ * length for the case of
+ * NULL decryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_DEC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_decap - IPSec new mode ESP decapsulation protocol-level
+ * shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values 0 one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode decap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_AUTH_VAR_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ * for the case of variable-length authentication
+ * only data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor
+ * length for variable-length authentication only
+ * data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN \
+ (IPSEC_AUTH_VAR_BASE_DESC_LEN + CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_BASE_DESC_LEN (19 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_AES_DEC_BASE_DESC_LEN (IPSEC_AUTH_BASE_DESC_LEN + \
+ CAAM_CMD_SZ)
+
+/**
+ * cnstr_shdsc_authenc - authenc-like descriptor
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @cipherdata: ointer to block cipher transform definitions.
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * @authdata: pointer to authentication transform definitions.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512}
+ * Note: The key for authentication is supposed to be given as plain text.
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * @ivlen: length of the IV to be read from the input frame, before any data
+ * to be processed
+ * @auth_only_len: length of the data to be authenticated-only (commonly IP
+ * header, IV, Sequence number and SPI)
+ * Note: Extended Sequence Number processing is NOT supported
+ *
+ * @trunc_len: the length of the ICV to be written to the output frame. If 0,
+ * then the corresponding length of the digest, according to the
+ * selected algorithm shall be used.
+ * @dir: Protocol direction, encapsulation or decapsulation (DIR_ENC/DIR_DEC)
+ *
+ * Note: Here's how the input frame needs to be formatted so that the processing
+ * will be done correctly:
+ * For encapsulation:
+ * Input:
+ * +----+----------------+---------------------------------------------+
+ * | IV | Auth-only data | Padded data to be authenticated & Encrypted |
+ * +----+----------------+---------------------------------------------+
+ * Output:
+ * +--------------------------------------+
+ * | Authenticated & Encrypted data | ICV |
+ * +--------------------------------+-----+
+
+ * For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ *
+ * Note: This descriptor can use per-packet commands, encoded as below in the
+ * DPOVRD register:
+ * 32 24 16 0
+ * +------+---------------------+
+ * | 0x80 | 0x00| auth_only_len |
+ * +------+---------------------+
+ *
+ * This mechanism is available only for SoCs having SEC ERA >= 3. In other
+ * words, this will not work for P4080TO2
+ *
+ * Note: The descriptor does not add any kind of padding to the input data,
+ * so the upper layer needs to ensure that the data is padded properly,
+ * according to the selected cipher. Failure to do so will result in
+ * the descriptor failing with a data-size error.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ uint16_t ivlen, uint16_t auth_only_len,
+ uint8_t trunc_len, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool is_aes_dec = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+
+ LABEL(skip_patch_len);
+ LABEL(keyjmp);
+ LABEL(skipkeys);
+ LABEL(aonly_len_offset);
+ REFERENCE(pskip_patch_len);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipkeys);
+ REFERENCE(read_len);
+ REFERENCE(write_len);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ /*
+ * Since we currently assume that key length is equal to hash digest
+ * size, it's ok to truncate keylen value.
+ */
+ trunc_len = trunc_len && (trunc_len < authdata->keylen) ?
+ trunc_len : (uint8_t)authdata->keylen;
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ /*
+ * M0 will contain the value provided by the user when creating
+ * the shared descriptor. If the user provided an override in
+ * DPOVRD, then M0 will contain that value
+ */
+ MATHB(p, MATH0, ADD, auth_only_len, MATH0, 4, IMMED2);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ /*
+ * Check if the user wants to override the auth-only len
+ */
+ MATHB(p, DPOVRD, ADD, 0x80000000, MATH2, 4, IMMED2);
+
+ /*
+ * No need to patch the length of the auth-only data read if
+ * the user did not override it
+ */
+ pskip_patch_len = JUMP(p, skip_patch_len, LOCAL_JUMP, ALL_TRUE,
+ MATH_N);
+
+ /* Get auth-only len in M0 */
+ MATHB(p, MATH2, AND, 0xFFFF, MATH0, 4, IMMED2);
+
+ /*
+ * Since M0 is used in calculations, don't mangle it, copy
+ * its content to M1 and use this for patching.
+ */
+ MATHB(p, MATH0, ADD, MATH1, MATH1, 4, 0);
+
+ read_len = MOVE(p, DESCBUF, 0, MATH1, 0, 6, WAITCOMP | IMMED);
+ write_len = MOVE(p, MATH1, 0, DESCBUF, 0, 8, WAITCOMP | IMMED);
+
+ SET_LABEL(p, skip_patch_len);
+ }
+ /*
+ * MATH0 contains the value in DPOVRD w/o the MSB, or the initial
+ * value, as provided by the user at descriptor creation time
+ */
+ if (dir == DIR_ENC)
+ MATHB(p, MATH0, ADD, ivlen, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, ADD, ivlen + trunc_len, MATH0, 4, IMMED2);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (is_aes_dec)
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
+
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipkeys);
+ } else {
+ SET_LABEL(p, skipkeys);
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ /*
+ * Prepare the length of the data to be both encrypted/decrypted
+ * and authenticated/checked
+ */
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ MATHB(p, VSEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* Prepare for writing the output frame */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SET_LABEL(p, aonly_len_offset);
+
+ /* Read IV */
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ /*
+ * Read data needed only for authentication. This is overwritten above
+ * if the user requested it.
+ */
+ SEQFIFOLOAD(p, MSG2, auth_only_len, 0);
+
+ if (dir == DIR_ENC) {
+ /*
+ * Read input plaintext, encrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Finally, write the ICV */
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+ } else {
+ /*
+ * Read input ciphertext, decrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Read the ICV to check */
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ }
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ PATCH_JUMP(p, pskip_patch_len, skip_patch_len);
+ PATCH_MOVE(p, read_len, aonly_len_offset);
+ PATCH_MOVE(p, write_len, aonly_len_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_IPSEC_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta.h b/drivers/crypto/dpaa2_sec/hw/rta.h
new file mode 100644
index 00000000..7cf4c8a9
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -0,0 +1,954 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_RTA_H__
+#define __RTA_RTA_H__
+
+#include "rta/sec_run_time_asm.h"
+#include "rta/fifo_load_store_cmd.h"
+#include "rta/header_cmd.h"
+#include "rta/jump_cmd.h"
+#include "rta/key_cmd.h"
+#include "rta/load_cmd.h"
+#include "rta/math_cmd.h"
+#include "rta/move_cmd.h"
+#include "rta/nfifo_cmd.h"
+#include "rta/operation_cmd.h"
+#include "rta/protocol_cmd.h"
+#include "rta/seq_in_out_ptr_cmd.h"
+#include "rta/signature_cmd.h"
+#include "rta/store_cmd.h"
+
+/**
+ * DOC: About
+ *
+ * RTA (Runtime Assembler) Library is an easy and flexible runtime method for
+ * writing SEC descriptors. It implements a thin abstraction layer above
+ * SEC commands set; the resulting code is compact and similar to a
+ * descriptor sequence.
+ *
+ * RTA library improves comprehension of the SEC code, adds flexibility for
+ * writing complex descriptors and keeps the code lightweight. Should be used
+ * by whom needs to encode descriptors at runtime, with comprehensible flow
+ * control in descriptor.
+ */
+
+/**
+ * DOC: Usage
+ *
+ * RTA is used in kernel space by the SEC / CAAM (Cryptographic Acceleration and
+ * Assurance Module) kernel module (drivers/crypto/caam) and SEC / CAAM QI
+ * kernel module (Freescale QorIQ SDK).
+ *
+ * RTA is used in user space by USDPAA - User Space DataPath Acceleration
+ * Architecture (Freescale QorIQ SDK).
+ */
+
+/**
+ * DOC: Descriptor Buffer Management Routines
+ *
+ * Contains details of RTA descriptor buffer management and SEC Era
+ * management routines.
+ */
+
+/**
+ * PROGRAM_CNTXT_INIT - must be called before any descriptor run-time assembly
+ * call type field carry info i.e. whether descriptor is
+ * shared or job descriptor.
+ * @program: pointer to struct program
+ * @buffer: input buffer where the descriptor will be placed (uint32_t *)
+ * @offset: offset in input buffer from where the data will be written
+ * (unsigned int)
+ */
+#define PROGRAM_CNTXT_INIT(program, buffer, offset) \
+ rta_program_cntxt_init(program, buffer, offset)
+
+/**
+ * PROGRAM_FINALIZE - must be called to mark completion of RTA call.
+ * @program: pointer to struct program
+ *
+ * Return: total size of the descriptor in words or negative number on error.
+ */
+#define PROGRAM_FINALIZE(program) rta_program_finalize(program)
+
+/**
+ * PROGRAM_SET_36BIT_ADDR - must be called to set pointer size to 36 bits
+ * @program: pointer to struct program
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_36BIT_ADDR(program) rta_program_set_36bit_addr(program)
+
+/**
+ * PROGRAM_SET_BSWAP - must be called to enable byte swapping
+ * @program: pointer to struct program
+ *
+ * Byte swapping on a 4-byte boundary will be performed at the end - when
+ * calling PROGRAM_FINALIZE().
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_BSWAP(program) rta_program_set_bswap(program)
+
+/**
+ * WORD - must be called to insert in descriptor buffer a 32bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint32_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define WORD(program, val) rta_word(program, val)
+
+/**
+ * DWORD - must be called to insert in descriptor buffer a 64bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint64_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define DWORD(program, val) rta_dword(program, val)
+
+/**
+ * COPY_DATA - must be called to insert in descriptor buffer data larger than
+ * 64bits.
+ * @program: pointer to struct program
+ * @data: input data to be written in descriptor buffer (uint8_t *)
+ * @len: length of input data (unsigned int)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define COPY_DATA(program, data, len) rta_copy_data(program, (data), (len))
+
+/**
+ * DESC_LEN - determines job / shared descriptor buffer length (in words)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in words (unsigned int).
+ */
+#define DESC_LEN(buffer) rta_desc_len(buffer)
+
+/**
+ * DESC_BYTES - determines job / shared descriptor buffer length (in bytes)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in bytes (unsigned int).
+ */
+#define DESC_BYTES(buffer) rta_desc_bytes(buffer)
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+extern enum rta_sec_era rta_sec_era;
+
+/**
+ * rta_set_sec_era - Set SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ * @era: SEC Era (enum rta_sec_era)
+ *
+ * Return: 0 if the ERA was set successfully, -1 otherwise (int)
+ *
+ * Warning 1: Must be called *only once*, *before* using any other RTA API
+ * routine.
+ *
+ * Warning 2: *Not thread safe*.
+ */
+static inline int
+rta_set_sec_era(enum rta_sec_era era)
+{
+ if (era > MAX_SEC_ERA) {
+ rta_sec_era = DEFAULT_SEC_ERA;
+ pr_err("Unsupported SEC ERA. Defaulting to ERA %d\n",
+ DEFAULT_SEC_ERA + 1);
+ return -1;
+ }
+
+ rta_sec_era = era;
+ return 0;
+}
+
+/**
+ * rta_get_sec_era - Get SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ *
+ * Return: SEC Era (unsigned int).
+ */
+static inline unsigned int
+rta_get_sec_era(void)
+{
+ return rta_sec_era;
+}
+
+/**
+ * DOC: SEC Commands Routines
+ *
+ * Contains details of RTA wrapper routines over SEC engine commands.
+ */
+
+/**
+ * SHR_HDR - Configures Shared Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the shared
+ * descriptor should start (@c unsigned int).
+ * @flags: operational flags: RIF, DNR, CIF, SC, PD
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SHR_HDR(program, share, start_idx, flags) \
+ rta_shr_header(program, share, start_idx, flags)
+
+/**
+ * JOB_HDR - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR(program, share, start_idx, share_desc, flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags, 0)
+
+/**
+ * JOB_HDR_EXT - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ * @ext_flags: extended header flags: DSV (DECO Select Valid), DECO Id (limited
+ * by DSEL_MASK).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR_EXT(program, share, start_idx, share_desc, flags, ext_flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags | EXT, \
+ ext_flags)
+
+/**
+ * MOVE - Configures MOVE and MOVE_LEN commands
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVE(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVE, src, src_offset, dst, dst_offset, length, opt)
+
+/**
+ * MOVEB - Configures MOVEB command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command if byte swapping not enabled; else - when src/dst
+ * is descriptor buffer or MATH registers, data type is byte array when MOVE
+ * data type is 4-byte array and vice versa.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEB(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEB, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * MOVEDW - Configures MOVEDW command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command, with the following differences: data type is
+ * 8-byte array; word swapping is performed when SEC is programmed in little
+ * endian mode.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEDW(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEDW, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * FIFOLOAD - Configures FIFOLOAD command to load message data, PKHA data, IV,
+ * ICV, AAD and bit length message data into Input Data FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @src: pointer or actual data in case of immediate load; IMMED, COPY and DCOPY
+ * flags indicate action taken (inline imm data, inline ptr, inline from
+ * ptr).
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, IMMED, EXT, CLASS1, CLASS2, BOTH, FLUSH1,
+ * LAST1, LAST2, COPY, DCOPY.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOLOAD(program, data, src, length, flags) \
+ rta_fifo_load(program, data, src, length, flags)
+
+/**
+ * SEQFIFOLOAD - Configures SEQ FIFOLOAD command to load message data, PKHA
+ * data, IV, ICV, AAD and bit length message data into Input Data
+ * FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CLASS1, CLASS2, BOTH, FLUSH1, LAST1, LAST2,
+ * AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOLOAD(program, data, length, flags) \
+ rta_fifo_load(program, data, NONE, length, flags|SEQ)
+
+/**
+ * FIFOSTORE - Configures FIFOSTORE command, to move data from Output Data FIFO
+ * to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOSTORE(program, data, encrypt_flags, dst, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, dst, length, flags)
+
+/**
+ * SEQFIFOSTORE - Configures SEQ FIFOSTORE command, to move data from Output
+ * Data FIFO to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, METADATA, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOSTORE(program, data, encrypt_flags, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, 0, length, flags|SEQ)
+
+/**
+ * KEY - Configures KEY and SEQ KEY commands
+ * @program: pointer to struct program
+ * @key_dst: key store location: KEY1, KEY2, PKE, AFHA_SBOX, MDHA_SPLIT_KEY
+ * @encrypt_flags: key encryption mode: ENC, EKT, TK, NWB, PTS
+ * @src: pointer or actual data in case of immediate load (uint64_t); IMMED,
+ * COPY and DCOPY flags indicate action taken (inline imm data,
+ * inline ptr, inline from ptr).
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: for KEY: SGF, IMMED, COPY, DCOPY; for SEQKEY: SEQ,
+ * VLF, AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define KEY(program, key_dst, encrypt_flags, src, length, flags) \
+ rta_key(program, key_dst, encrypt_flags, src, length, flags)
+
+/**
+ * SEQINPTR - Configures SEQ IN PTR command
+ * @program: pointer to struct program
+ * @src: starting address for Input Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Input Sequence (uint32_t)
+ * @flags: operational flags: RBS, INL, SGF, PRE, EXT, RTO, RJD, SOP (when PRE,
+ * RTO or SOP are set, @src parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQINPTR(program, src, length, flags) \
+ rta_seq_in_ptr(program, src, length, flags)
+
+/**
+ * SEQOUTPTR - Configures SEQ OUT PTR command
+ * @program: pointer to struct program
+ * @dst: starting address for Output Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Output Sequence (uint32_t)
+ * @flags: operational flags: SGF, PRE, EXT, RTO, RST, EWS (when PRE or RTO are
+ * set, @dst parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQOUTPTR(program, dst, length, flags) \
+ rta_seq_out_ptr(program, dst, length, flags)
+
+/**
+ * ALG_OPERATION - Configures ALGORITHM OPERATION command
+ * @program: pointer to struct program
+ * @cipher_alg: algorithm to be used
+ * @aai: Additional Algorithm Information; contains mode information that is
+ * associated with the algorithm (check desc.h for specific values).
+ * @algo_state: algorithm state; defines the state of the algorithm that is
+ * being executed (check desc.h file for specific values).
+ * @icv_check: ICV checking; selects whether the algorithm should check
+ * calculated ICV with known ICV: ICV_CHECK_ENABLE,
+ * ICV_CHECK_DISABLE.
+ * @enc: selects between encryption and decryption: DIR_ENC, DIR_DEC
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define ALG_OPERATION(program, cipher_alg, aai, algo_state, icv_check, enc) \
+ rta_operation(program, cipher_alg, aai, algo_state, icv_check, enc)
+
+/**
+ * PROTOCOL - Configures PROTOCOL OPERATION command
+ * @program: pointer to struct program
+ * @optype: operation type: OP_TYPE_UNI_PROTOCOL / OP_TYPE_DECAP_PROTOCOL /
+ * OP_TYPE_ENCAP_PROTOCOL.
+ * @protid: protocol identifier value (check desc.h file for specific values)
+ * @protoinfo: protocol dependent value (check desc.h file for specific values)
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PROTOCOL(program, optype, protid, protoinfo) \
+ rta_proto_operation(program, optype, protid, protoinfo)
+
+/**
+ * DKP_PROTOCOL - Configures DKP (Derived Key Protocol) PROTOCOL command
+ * @program: pointer to struct program
+ * @protid: protocol identifier value - one of the following:
+ * OP_PCLID_DKP_{MD5 | SHA1 | SHA224 | SHA256 | SHA384 | SHA512}
+ * @key_src: How the initial ("negotiated") key is provided to the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_SRC_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @key_dst: How the derived ("split") key is returned by the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_DST_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @keylen: length of the initial key, in bytes (uint16_t)
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_type: enum rta_data_type
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define DKP_PROTOCOL(program, protid, key_src, key_dst, keylen, key, key_type) \
+ rta_dkp_proto(program, protid, key_src, key_dst, keylen, key, key_type)
+
+/**
+ * PKHA_OPERATION - Configures PKHA OPERATION command
+ * @program: pointer to struct program
+ * @op_pkha: PKHA operation; indicates the modular arithmetic function to
+ * execute (check desc.h file for specific values).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PKHA_OPERATION(program, op_pkha) rta_pkha_operation(program, op_pkha)
+
+/**
+ * JUMP - Configures JUMP command
+ * @program: pointer to struct program
+ * @addr: local offset for local jumps or address pointer for non-local jumps;
+ * IMM or PTR macros must be used to indicate type.
+ * @jump_type: type of action taken by jump (enum rta_jump_type)
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: operational flags - DONE1, DONE2, BOTH; various
+ * sharing and wait conditions (JSL = 1) - NIFP, NIP, NOP, NCP, CALM,
+ * SELF, SHARED, JQP; Math and PKHA status conditions (JSL = 0) - Z, N,
+ * NV, C, PK0, PK1, PKP.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP(program, addr, jump_type, test_type, cond) \
+ rta_jump(program, addr, jump_type, test_type, cond, NONE)
+
+/**
+ * JUMP_INC - Configures JUMP_INC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_INC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_INC, test_type, cond, src_dst)
+
+/**
+ * JUMP_DEC - Configures JUMP_DEC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_DEC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_DEC, test_type, cond, src_dst)
+
+/**
+ * LOAD - Configures LOAD command to load data registers from descriptor or from
+ * a memory location.
+ * @program: pointer to struct program
+ * @addr: immediate value or pointer to the data to be loaded; IMMED, COPY and
+ * DCOPY flags indicate action taken (inline imm data, inline ptr, inline
+ * from ptr).
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define LOAD(program, addr, dst, offset, length, flags) \
+ rta_load(program, addr, dst, offset, length, flags)
+
+/**
+ * SEQLOAD - Configures SEQ LOAD command to load data registers from descriptor
+ * or from a memory location.
+ * @program: pointer to struct program
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQLOAD(program, dst, offset, length, flags) \
+ rta_load(program, NONE, dst, offset, length, flags|SEQ)
+
+/**
+ * STORE - Configures STORE command to read data from registers and write them
+ * to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define STORE(program, src, offset, dst, length, flags) \
+ rta_store(program, src, offset, dst, length, flags)
+
+/**
+ * SEQSTORE - Configures SEQ STORE command to read data from registers and write
+ * them to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: SGF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQSTORE(program, src, offset, length, flags) \
+ rta_store(program, src, offset, NONE, length, flags|SEQ)
+
+/**
+ * MATHB - Configures MATHB command to perform binary operations
+ * @program: pointer to struct program
+ * @operand1: first operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, SHLD.
+ * @operand2: second operand: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD,
+ * OFIFO, JOBSRC, ZERO, ONE, Immediate value. IMMED2 must be used to
+ * indicate immediate value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: IFB, NFU, STL, SWP, IMMED, IMMED2
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHB(program, operand1, operator, operand2, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, operand2, result, \
+ length, opt)
+
+/**
+ * MATHI - Configures MATHI command to perform binary operations
+ * @program: pointer to struct program
+ * @operand: if !SSEL: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE.
+ * if SSEL: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD, OFIFO,
+ * JOBSRC, ZERO, ONE.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, FBYT (for !SSEL only).
+ * @imm: Immediate value (uint8_t). IMMED must be used to indicate immediate
+ * value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int). @imm is left-extended with zeros if needed.
+ * @opt: operational flags: NFU, SSEL, SWP, IMMED
+ *
+ * If !SSEL, @operand <@operator> @imm -> @result
+ * If SSEL, @imm <@operator> @operand -> @result
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHI(program, operand, operator, imm, result, length, opt) \
+ rta_mathi(program, operand, MATH_FUN_##operator, imm, result, length, \
+ opt)
+
+/**
+ * MATHU - Configures MATHU command to perform unary operations
+ * @program: pointer to struct program
+ * @operand1: operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ZBYT, BSWAP
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: NFU, STL, SWP, IMMED
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHU(program, operand1, operator, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, NONE, result, length, \
+ opt)
+
+/**
+ * SIGNATURE - Configures SIGNATURE command
+ * @program: pointer to struct program
+ * @sign_type: signature type: SIGN_TYPE_FINAL, SIGN_TYPE_FINAL_RESTORE,
+ * SIGN_TYPE_FINAL_NONZERO, SIGN_TYPE_IMM_2, SIGN_TYPE_IMM_3,
+ * SIGN_TYPE_IMM_4.
+ *
+ * After SIGNATURE command, DWORD or WORD must be used to insert signature in
+ * descriptor buffer.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SIGNATURE(program, sign_type) rta_signature(program, sign_type)
+
+/**
+ * NFIFOADD - Configures NFIFO command, a shortcut of RTA Load command to write
+ * to iNfo FIFO.
+ * @program: pointer to struct program
+ * @src: source for the input data in Alignment Block:IFIFO, OFIFO, PAD,
+ * MSGOUTSNOOP, ALTSOURCE, OFIFO_SYNC, MSGOUTSNOOP_ALT.
+ * @data: type of data that is going through the Input Data FIFO: MSG, MSG1,
+ * MSG2, IV1, IV2, ICV1, ICV2, SAD1, AAD1, AAD2, AFHA_SBOX, SKIP,
+ * PKHA registers, AB1, AB2, ABD.
+ * @length: length of the data copied in FIFO registers (uint32_t)
+ * @flags: select options between:
+ * -operational flags: LAST1, LAST2, FLUSH1, FLUSH2, OC, BP
+ * -when PAD is selected as source: BM, PR, PS
+ * -padding type: <em>PAD_ZERO, PAD_NONZERO, PAD_INCREMENT, PAD_RANDOM,
+ * PAD_ZERO_N1, PAD_NONZERO_0, PAD_N1, PAD_NONZERO_N
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define NFIFOADD(program, src, data, length, flags) \
+ rta_nfifo_load(program, src, data, length, flags)
+
+/**
+ * DOC: Self Referential Code Management Routines
+ *
+ * Contains details of RTA self referential code routines.
+ */
+
+/**
+ * REFERENCE - initialize a variable used for storing an index inside a
+ * descriptor buffer.
+ * @ref: reference to a descriptor buffer's index where an update is required
+ * with a value that will be known latter in the program flow.
+ */
+#define REFERENCE(ref) int ref = -1
+
+/**
+ * LABEL - initialize a variable used for storing an index inside a descriptor
+ * buffer.
+ * @label: label stores the value with what should be updated the REFERENCE line
+ * in the descriptor buffer.
+ */
+#define LABEL(label) unsigned int label = 0
+
+/**
+ * SET_LABEL - set a LABEL value
+ * @program: pointer to struct program
+ * @label: value that will be inserted in a line previously written in the
+ * descriptor buffer.
+ */
+#define SET_LABEL(program, label) (label = rta_set_label(program))
+
+/**
+ * PATCH_JUMP - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For JUMP command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_JUMP(program, line, new_ref) rta_patch_jmp(program, line, new_ref)
+
+/**
+ * PATCH_MOVE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For MOVE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_MOVE(program, line, new_ref) \
+ rta_patch_move(program, line, new_ref)
+
+/**
+ * PATCH_LOAD - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For LOAD command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_LOAD(program, line, new_ref) \
+ rta_patch_load(program, line, new_ref)
+
+/**
+ * PATCH_STORE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For STORE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_STORE(program, line, new_ref) \
+ rta_patch_store(program, line, new_ref)
+
+/**
+ * PATCH_HDR - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For HEADER command, the value represents the start index field.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_HDR(program, line, new_ref) \
+ rta_patch_header(program, line, new_ref)
+
+/**
+ * PATCH_RAW - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @mask: mask to be used for applying the new value (unsigned int). The mask
+ * selects which bits from the provided @new_val are taken into
+ * consideration when overwriting the existing value.
+ * @new_val: updated value that will be masked using the provided mask value
+ * and inserted in descriptor buffer at the specified line.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_RAW(program, line, mask, new_val) \
+ rta_patch_raw(program, line, mask, new_val)
+
+#endif /* __RTA_RTA_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
new file mode 100644
index 00000000..79a48da2
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -0,0 +1,346 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
+#define __RTA_FIFO_LOAD_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t fifo_load_table[][2] = {
+/*1*/ { PKA0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A0 },
+ { PKA1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A1 },
+ { PKA2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A2 },
+ { PKA3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A3 },
+ { PKB0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B0 },
+ { PKB1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B1 },
+ { PKB2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B2 },
+ { PKB3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B3 },
+ { PKA, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A },
+ { PKB, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B },
+ { PKN, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_N },
+ { SKIP, FIFOLD_CLASS_SKIP },
+ { MSG1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG },
+ { MSG2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG },
+ { MSGOUTSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG1OUT2 },
+ { MSGINSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG },
+ { IV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV },
+ { IV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_IV },
+ { AAD1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_AAD },
+ { ICV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV },
+ { ICV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV },
+ { BIT_DATA, FIFOLD_TYPE_BITDATA },
+/*23*/ { IFIFO, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_NOINFOFIFO }
+};
+
+/*
+ * Allowed FIFO_LOAD input data types for each SEC Era.
+ * Values represent the number of entries from fifo_load_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_load_table_sz[] = {22, 22, 23, 23,
+ 23, 23, 23, 23};
+
+static inline int
+rta_fifo_load(struct program *program, uint32_t src,
+ uint64_t loc, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t ext_length = 0, val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_LOAD;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_LOAD;
+ }
+
+ /* Parameters checking */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQ FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) && (flags & AIDF)) {
+ pr_err("SEQ FIFO LOAD: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & VLF) && ((flags & EXT) || (length >> 16))) {
+ pr_err("SEQ FIFO LOAD: Invalid usage of VLF\n");
+ goto err;
+ }
+ } else {
+ if (src == SKIP) {
+ pr_err("FIFO LOAD: Invalid src\n");
+ goto err;
+ }
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("FIFO LOAD: Invalid usage of SGF and IMM\n");
+ goto err;
+ }
+ if ((flags & IMMED) && ((flags & EXT) || (length >> 16))) {
+ pr_err("FIFO LOAD: Invalid usage of EXT and IMM\n");
+ goto err;
+ }
+ }
+
+ /* write input data type field */
+ ret = __rta_map_opcode(src, fifo_load_table,
+ fifo_load_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO LOAD: Source value is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (flags & CLASS1)
+ opcode |= FIFOLD_CLASS_CLASS1;
+ if (flags & CLASS2)
+ opcode |= FIFOLD_CLASS_CLASS2;
+ if (flags & BOTH)
+ opcode |= FIFOLD_CLASS_BOTH;
+
+ /* write fields: SGF|VLF, IMM, [LC1, LC2, F1] */
+ if (flags & FLUSH1)
+ opcode |= FIFOLD_TYPE_FLUSH1;
+ if (flags & LAST1)
+ opcode |= FIFOLD_TYPE_LAST1;
+ if (flags & LAST2)
+ opcode |= FIFOLD_TYPE_LAST2;
+ if (!is_seq_cmd) {
+ if (flags & SGF)
+ opcode |= FIFOLDST_SGF;
+ if (flags & IMMED)
+ opcode |= FIFOLD_IMM;
+ } else {
+ if (flags & VLF)
+ opcode |= FIFOLDST_VLF;
+ if (flags & AIDF)
+ opcode |= FIFOLD_AIDF;
+ }
+
+ /*
+ * Verify if extended length is required. In case of BITDATA, calculate
+ * number of full bytes and additional valid bits.
+ */
+ if ((flags & EXT) || (length >> 16)) {
+ opcode |= FIFOLDST_EXT;
+ if (src == BIT_DATA) {
+ ext_length = (length / 8);
+ length = (length % 8);
+ } else {
+ ext_length = length;
+ length = 0;
+ }
+ }
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (flags & IMMED)
+ __rta_inline_data(program, loc, flags & __COPY_MASK, length);
+ else if (!is_seq_cmd)
+ __rta_out64(program, program->ps, loc);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, ext_length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static const uint32_t fifo_store_table[][2] = {
+/*1*/ { PKA0, FIFOST_TYPE_PKHA_A0 },
+ { PKA1, FIFOST_TYPE_PKHA_A1 },
+ { PKA2, FIFOST_TYPE_PKHA_A2 },
+ { PKA3, FIFOST_TYPE_PKHA_A3 },
+ { PKB0, FIFOST_TYPE_PKHA_B0 },
+ { PKB1, FIFOST_TYPE_PKHA_B1 },
+ { PKB2, FIFOST_TYPE_PKHA_B2 },
+ { PKB3, FIFOST_TYPE_PKHA_B3 },
+ { PKA, FIFOST_TYPE_PKHA_A },
+ { PKB, FIFOST_TYPE_PKHA_B },
+ { PKN, FIFOST_TYPE_PKHA_N },
+ { PKE, FIFOST_TYPE_PKHA_E_JKEK },
+ { RNG, FIFOST_TYPE_RNGSTORE },
+ { RNGOFIFO, FIFOST_TYPE_RNGFIFO },
+ { AFHA_SBOX, FIFOST_TYPE_AF_SBOX_JKEK },
+ { MDHA_SPLIT_KEY, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_SPLIT_KEK },
+ { MSG, FIFOST_TYPE_MESSAGE_DATA },
+ { KEY1, FIFOST_CLASS_CLASS1KEY | FIFOST_TYPE_KEY_KEK },
+ { KEY2, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_KEY_KEK },
+ { OFIFO, FIFOST_TYPE_OUTFIFO_KEK},
+ { SKIP, FIFOST_TYPE_SKIP },
+/*22*/ { METADATA, FIFOST_TYPE_METADATA},
+ { MSG_CKSUM, FIFOST_TYPE_MESSAGE_DATA2 }
+};
+
+/*
+ * Allowed FIFO_STORE output data types for each SEC Era.
+ * Values represent the number of entries from fifo_store_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_store_table_sz[] = {21, 21, 21, 21,
+ 22, 22, 22, 23};
+
+static inline int
+rta_fifo_store(struct program *program, uint32_t src,
+ uint32_t encrypt_flags, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_STORE;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_STORE;
+ }
+
+ /* Parameter checking */
+ if (is_seq_cmd) {
+ if ((flags & VLF) && ((length >> 16) || (flags & EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid usage of VLF\n");
+ goto err;
+ }
+ if (dst) {
+ pr_err("SEQ FIFO STORE: Invalid command\n");
+ goto err;
+ }
+ if ((src == METADATA) && (flags & (CONT | EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid flags\n");
+ goto err;
+ }
+ } else {
+ if (((src == RNGOFIFO) && ((dst) || (flags & EXT))) ||
+ (src == METADATA)) {
+ pr_err("FIFO STORE: Invalid destination\n");
+ goto err;
+ }
+ }
+ if ((rta_sec_era == RTA_SEC_ERA_7) && (src == AFHA_SBOX)) {
+ pr_err("FIFO STORE: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write output data type field */
+ ret = __rta_map_opcode(src, fifo_store_table,
+ fifo_store_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO STORE: Source type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (encrypt_flags & TK)
+ opcode |= (0x1 << FIFOST_TYPE_SHIFT);
+ if (encrypt_flags & EKT) {
+ if (rta_sec_era == RTA_SEC_ERA_1) {
+ pr_err("FIFO STORE: AES-CCM source types not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ opcode |= (0x10 << FIFOST_TYPE_SHIFT);
+ opcode &= (uint32_t)~(0x20 << FIFOST_TYPE_SHIFT);
+ }
+
+ /* write flags fields */
+ if (flags & CONT)
+ opcode |= FIFOST_CONT;
+ if ((flags & VLF) && (is_seq_cmd))
+ opcode |= FIFOLDST_VLF;
+ if ((flags & SGF) && (!is_seq_cmd))
+ opcode |= FIFOLDST_SGF;
+ if (flags & CLASS1)
+ opcode |= FIFOST_CLASS_CLASS1KEY;
+ if (flags & CLASS2)
+ opcode |= FIFOST_CLASS_CLASS2KEY;
+ if (flags & BOTH)
+ opcode |= FIFOST_CLASS_BOTH;
+
+ /* Verify if extended length is required */
+ if ((length >> 16) || (flags & EXT))
+ opcode |= FIFOLDST_EXT;
+ else
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer field */
+ if ((!is_seq_cmd) && (dst))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_FIFO_LOAD_STORE_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
new file mode 100644
index 00000000..c2a27a2d
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -0,0 +1,251 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_HEADER_CMD_H__
+#define __RTA_HEADER_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed job header flags for each SEC Era. */
+static const uint32_t job_header_flags[] = {
+ DNR | TD | MTD | SHR | REO,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | EXT
+};
+
+/* Allowed shared header flags for each SEC Era. */
+static const uint32_t shr_header_flags[] = {
+ DNR | SC | PD,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF
+};
+
+static inline int
+rta_shr_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint32_t flags)
+{
+ uint32_t opcode = CMD_SHARED_DESC_HDR;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~shr_header_flags[rta_sec_era]) {
+ pr_err("SHR_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ default:
+ pr_err("SHR_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & CIF)
+ opcode |= HDR_CLEAR_IFIFO;
+ if (flags & SC)
+ opcode |= HDR_SAVECTX;
+ if (flags & PD)
+ opcode |= HDR_PROP_DNR;
+ if (flags & RIF)
+ opcode |= HDR_RIF;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1)
+ program->shrhdr = program->buffer;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+static inline int
+rta_job_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint64_t shr_desc, uint32_t flags,
+ uint32_t ext_flags)
+{
+ uint32_t opcode = CMD_DESC_HDR;
+ uint32_t hdr_ext = 0;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~job_header_flags[rta_sec_era]) {
+ pr_err("JOB_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ case SHR_DEFER:
+ opcode |= HDR_SHARE_DEFER;
+ break;
+ default:
+ pr_err("JOB_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & TD) && (flags & REO)) {
+ pr_err("JOB_DESC: REO flag not supported for trusted descriptors. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (flags & MTD) && !(flags & TD)) {
+ pr_err("JOB_DESC: Trying to MTD a descriptor that is not a TD. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & EXT) && !(flags & SHR) && (start_idx < 2)) {
+ pr_err("JOB_DESC: Start index must be >= 2 in case of no SHR and EXT. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= ((start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK);
+
+ if (flags & EXT) {
+ opcode |= HDR_EXT;
+
+ if (ext_flags & DSV) {
+ hdr_ext |= HDR_EXT_DSEL_VALID;
+ hdr_ext |= ext_flags & DSEL_MASK;
+ }
+
+ if (ext_flags & FTD) {
+ if (rta_sec_era <= RTA_SEC_ERA_5) {
+ pr_err("JOB_DESC: Fake trusted descriptor not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ hdr_ext |= HDR_EXT_FTD;
+ }
+ }
+ if (flags & RSMS)
+ opcode |= HDR_RSLS;
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & TD)
+ opcode |= HDR_TRUSTED;
+ if (flags & MTD)
+ opcode |= HDR_MAKE_TRUSTED;
+ if (flags & REO)
+ opcode |= HDR_REVERSE;
+ if (flags & SHR)
+ opcode |= HDR_SHARED;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1) {
+ program->jobhdr = program->buffer;
+
+ if (opcode & HDR_SHARED)
+ __rta_out64(program, program->ps, shr_desc);
+ }
+
+ if (flags & EXT)
+ __rta_out32(program, hdr_ext);
+
+ /* Note: descriptor length is set in program_finalize routine */
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_HEADER_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
new file mode 100644
index 00000000..2c85beec
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -0,0 +1,207 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_JUMP_CMD_H__
+#define __RTA_JUMP_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t jump_test_cond[][2] = {
+ { NIFP, JUMP_COND_NIFP },
+ { NIP, JUMP_COND_NIP },
+ { NOP, JUMP_COND_NOP },
+ { NCP, JUMP_COND_NCP },
+ { CALM, JUMP_COND_CALM },
+ { SELF, JUMP_COND_SELF },
+ { SHRD, JUMP_COND_SHRD },
+ { JQP, JUMP_COND_JQP },
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C },
+ { PK_0, JUMP_COND_PK_0 },
+ { PK_GCD_1, JUMP_COND_PK_GCD_1 },
+ { PK_PRIME, JUMP_COND_PK_PRIME },
+ { CLASS1, JUMP_CLASS_CLASS1 },
+ { CLASS2, JUMP_CLASS_CLASS2 },
+ { BOTH, JUMP_CLASS_BOTH }
+};
+
+static const uint32_t jump_test_math_cond[][2] = {
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C }
+};
+
+static const uint32_t jump_src_dst[][2] = {
+ { MATH0, JUMP_SRC_DST_MATH0 },
+ { MATH1, JUMP_SRC_DST_MATH1 },
+ { MATH2, JUMP_SRC_DST_MATH2 },
+ { MATH3, JUMP_SRC_DST_MATH3 },
+ { DPOVRD, JUMP_SRC_DST_DPOVRD },
+ { SEQINSZ, JUMP_SRC_DST_SEQINLEN },
+ { SEQOUTSZ, JUMP_SRC_DST_SEQOUTLEN },
+ { VSEQINSZ, JUMP_SRC_DST_VARSEQINLEN },
+ { VSEQOUTSZ, JUMP_SRC_DST_VARSEQOUTLEN }
+};
+
+static inline int
+rta_jump(struct program *program, uint64_t address,
+ enum rta_jump_type jump_type,
+ enum rta_jump_cond test_type,
+ uint32_t test_condition, uint32_t src_dst)
+{
+ uint32_t opcode = CMD_JUMP;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ if (((jump_type == GOSUB) || (jump_type == RETURN)) &&
+ (rta_sec_era < RTA_SEC_ERA_4)) {
+ pr_err("JUMP: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (((jump_type == LOCAL_JUMP_INC) || (jump_type == LOCAL_JUMP_DEC)) &&
+ (rta_sec_era <= RTA_SEC_ERA_5)) {
+ pr_err("JUMP_INCDEC: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (jump_type) {
+ case (LOCAL_JUMP):
+ /*
+ * opcode |= JUMP_TYPE_LOCAL;
+ * JUMP_TYPE_LOCAL is 0
+ */
+ break;
+ case (HALT):
+ opcode |= JUMP_TYPE_HALT;
+ break;
+ case (HALT_STATUS):
+ opcode |= JUMP_TYPE_HALT_USER;
+ break;
+ case (FAR_JUMP):
+ opcode |= JUMP_TYPE_NONLOCAL;
+ break;
+ case (GOSUB):
+ opcode |= JUMP_TYPE_GOSUB;
+ break;
+ case (RETURN):
+ opcode |= JUMP_TYPE_RETURN;
+ break;
+ case (LOCAL_JUMP_INC):
+ opcode |= JUMP_TYPE_LOCAL_INC;
+ break;
+ case (LOCAL_JUMP_DEC):
+ opcode |= JUMP_TYPE_LOCAL_DEC;
+ break;
+ default:
+ pr_err("JUMP: Invalid jump type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ switch (test_type) {
+ case (ALL_TRUE):
+ /*
+ * opcode |= JUMP_TEST_ALL;
+ * JUMP_TEST_ALL is 0
+ */
+ break;
+ case (ALL_FALSE):
+ opcode |= JUMP_TEST_INVALL;
+ break;
+ case (ANY_TRUE):
+ opcode |= JUMP_TEST_ANY;
+ break;
+ case (ANY_FALSE):
+ opcode |= JUMP_TEST_INVANY;
+ break;
+ default:
+ pr_err("JUMP: test type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ /* write test condition field */
+ if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
+ __rta_map_flags(test_condition, jump_test_cond,
+ ARRAY_SIZE(jump_test_cond), &opcode);
+ } else {
+ uint32_t val = 0;
+
+ ret = __rta_map_opcode(src_dst, jump_src_dst,
+ ARRAY_SIZE(jump_src_dst), &val);
+ if (ret < 0) {
+ pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ __rta_map_flags(test_condition, jump_test_math_cond,
+ ARRAY_SIZE(jump_test_math_cond), &opcode);
+ }
+
+ /* write local offset field for local jumps and user-defined halt */
+ if ((jump_type == LOCAL_JUMP) || (jump_type == LOCAL_JUMP_INC) ||
+ (jump_type == LOCAL_JUMP_DEC) || (jump_type == GOSUB) ||
+ (jump_type == HALT_STATUS))
+ opcode |= (uint32_t)(address & JUMP_OFFSET_MASK);
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (jump_type == FAR_JUMP)
+ __rta_out64(program, program->ps, address);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_JUMP_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
new file mode 100644
index 00000000..9bef1155
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -0,0 +1,222 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_KEY_CMD_H__
+#define __RTA_KEY_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed encryption flags for each SEC Era */
+static const uint32_t key_enc_flags[] = {
+ ENC,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS
+};
+
+static inline int
+rta_key(struct program *program, uint32_t key_dst,
+ uint32_t encrypt_flags, uint64_t src, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if (encrypt_flags & ~key_enc_flags[rta_sec_era]) {
+ pr_err("KEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write cmd type */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_KEY;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_KEY;
+ }
+
+ /* check parameters */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQKEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) &&
+ ((flags & VLF) || (flags & AIDF))) {
+ pr_err("SEQKEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ } else {
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((flags & SGF) && (flags & IMMED)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ if ((encrypt_flags & PTS) &&
+ ((encrypt_flags & ENC) || (encrypt_flags & NWB) ||
+ (key_dst == PKE))) {
+ pr_err("KEY: Invalid flag / destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (key_dst == AFHA_SBOX) {
+ if (rta_sec_era == RTA_SEC_ERA_7) {
+ pr_err("KEY: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * Sbox data loaded into the ARC-4 processor must be exactly
+ * 258 bytes long, or else a data sequence error is generated.
+ */
+ if (length != 258) {
+ pr_err("KEY: Invalid length. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /* write key destination and class fields */
+ switch (key_dst) {
+ case (KEY1):
+ opcode |= KEY_DEST_CLASS1;
+ break;
+ case (KEY2):
+ opcode |= KEY_DEST_CLASS2;
+ break;
+ case (PKE):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_PKHA_E;
+ break;
+ case (AFHA_SBOX):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_AFHA_SBOX;
+ break;
+ case (MDHA_SPLIT_KEY):
+ opcode |= KEY_DEST_CLASS2 | KEY_DEST_MDHA_SPLIT;
+ break;
+ default:
+ pr_err("KEY: Invalid destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* write key length */
+ length &= KEY_LENGTH_MASK;
+ opcode |= length;
+
+ /* write key command specific flags */
+ if (encrypt_flags & ENC) {
+ /* Encrypted (black) keys must be padded to 8 bytes (CCM) or
+ * 16 bytes (ECB) depending on EKT bit. AES-CCM encrypted keys
+ * (EKT = 1) have 6-byte nonce and 6-byte MAC after padding.
+ */
+ opcode |= KEY_ENC;
+ if (encrypt_flags & EKT) {
+ opcode |= KEY_EKT;
+ length = ALIGN(length, 8);
+ length += 12;
+ } else {
+ length = ALIGN(length, 16);
+ }
+ if (encrypt_flags & TK)
+ opcode |= KEY_TK;
+ }
+ if (encrypt_flags & NWB)
+ opcode |= KEY_NWB;
+ if (encrypt_flags & PTS)
+ opcode |= KEY_PTS;
+
+ /* write general command flags */
+ if (!is_seq_cmd) {
+ if (flags & IMMED)
+ opcode |= KEY_IMM;
+ if (flags & SGF)
+ opcode |= KEY_SGF;
+ } else {
+ if (flags & AIDF)
+ opcode |= KEY_AIDF;
+ if (flags & VLF)
+ opcode |= KEY_VLF;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_KEY_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
new file mode 100644
index 00000000..1db55b33
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -0,0 +1,335 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_LOAD_CMD_H__
+#define __RTA_LOAD_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed length and offset masks for each SEC Era in case DST = DCTRL */
+static const uint32_t load_len_mask_allowed[] = {
+ 0x000000ee,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe
+};
+
+static const uint32_t load_off_mask_allowed[] = {
+ 0x0000000f,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff
+};
+
+#define IMM_MUST 0
+#define IMM_CAN 1
+#define IMM_NO 2
+#define IMM_DSNM 3 /* it doesn't matter the src type */
+
+enum e_lenoff {
+ LENOF_03,
+ LENOF_4,
+ LENOF_48,
+ LENOF_448,
+ LENOF_18,
+ LENOF_32,
+ LENOF_24,
+ LENOF_16,
+ LENOF_8,
+ LENOF_128,
+ LENOF_256,
+ DSNM /* it doesn't matter the length/offset values */
+};
+
+struct load_map {
+ uint32_t dst;
+ uint32_t dst_opcode;
+ enum e_lenoff len_off;
+ uint8_t imm_src;
+
+};
+
+static const struct load_map load_dst[] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { CCTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CHACTRL,
+ LENOF_4, IMM_MUST },
+ { DCTRL, LDST_CLASS_DECO | LDST_IMM | LDST_SRCDST_WORD_DECOCTRL,
+ DSNM, IMM_DSNM },
+ { ICTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_IRQCTRL,
+ LENOF_4, IMM_MUST },
+ { DPOVRD, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_PCLOVRD,
+ LENOF_4, IMM_MUST },
+ { CLRW, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CLRW,
+ LENOF_4, IMM_MUST },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ,
+ LENOF_4, IMM_MUST },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ,
+ LENOF_4, IMM_MUST },
+ { ALTDS1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ALTDS_CLASS1,
+ LENOF_448, IMM_MUST },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ,
+ LENOF_4, IMM_MUST, },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ,
+ LENOF_4, IMM_MUST },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ,
+ LENOF_4, IMM_MUST },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ,
+ LENOF_4, IMM_MUST },
+ { NFIFO, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO,
+ LENOF_48, IMM_MUST },
+ { IFIFO, LDST_SRCDST_BYTE_INFIFO, LENOF_18, IMM_MUST },
+ { OFIFO, LDST_SRCDST_BYTE_OUTFIFO, LENOF_18, IMM_MUST },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0,
+ LENOF_32, IMM_CAN },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1,
+ LENOF_24, IMM_CAN },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2,
+ LENOF_16, IMM_CAN },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3,
+ LENOF_8, IMM_CAN },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { KEY1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { KEY2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF,
+ LENOF_256, IMM_NO },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID,
+ LENOF_448, IMM_MUST },
+/*32*/ { IDFNS, LDST_SRCDST_WORD_IFNSR, LENOF_18, IMM_MUST },
+ { ODFNS, LDST_SRCDST_WORD_OFNSR, LENOF_18, IMM_MUST },
+ { ALTSOURCE, LDST_SRCDST_BYTE_ALTSOURCE, LENOF_18, IMM_MUST },
+/*35*/ { NFIFO_SZL, LDST_SRCDST_WORD_INFO_FIFO_SZL, LENOF_48, IMM_MUST },
+ { NFIFO_SZM, LDST_SRCDST_WORD_INFO_FIFO_SZM, LENOF_03, IMM_MUST },
+ { NFIFO_L, LDST_SRCDST_WORD_INFO_FIFO_L, LENOF_48, IMM_MUST },
+ { NFIFO_M, LDST_SRCDST_WORD_INFO_FIFO_M, LENOF_03, IMM_MUST },
+ { SZL, LDST_SRCDST_WORD_SZL, LENOF_48, IMM_MUST },
+/*40*/ { SZM, LDST_SRCDST_WORD_SZM, LENOF_03, IMM_MUST }
+};
+
+/*
+ * Allowed LOAD destinations for each SEC Era.
+ * Values represent the number of entries from load_dst[] that are supported.
+ */
+static const unsigned int load_dst_sz[] = { 31, 34, 34, 40, 40, 40, 40, 40 };
+
+static inline int
+load_check_len_offset(int pos, uint32_t length, uint32_t offset)
+{
+ if ((load_dst[pos].dst == DCTRL) &&
+ ((length & ~load_len_mask_allowed[rta_sec_era]) ||
+ (offset & ~load_off_mask_allowed[rta_sec_era])))
+ goto err;
+
+ switch (load_dst[pos].len_off) {
+ case (LENOF_03):
+ if ((length > 3) || (offset))
+ goto err;
+ break;
+ case (LENOF_4):
+ if ((length != 4) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_48):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_448):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 4) && (offset == 4)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_18):
+ if ((length < 1) || (length > 8) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_32):
+ if ((length > 32) || (offset > 32) || ((offset + length) > 32))
+ goto err;
+ break;
+ case (LENOF_24):
+ if ((length > 24) || (offset > 24) || ((offset + length) > 24))
+ goto err;
+ break;
+ case (LENOF_16):
+ if ((length > 16) || (offset > 16) || ((offset + length) > 16))
+ goto err;
+ break;
+ case (LENOF_8):
+ if ((length > 8) || (offset > 8) || ((offset + length) > 8))
+ goto err;
+ break;
+ case (LENOF_128):
+ if ((length > 128) || (offset > 128) ||
+ ((offset + length) > 128))
+ goto err;
+ break;
+ case (LENOF_256):
+ if ((length < 1) || (length > 256) || ((length + offset) > 256))
+ goto err;
+ break;
+ case (DSNM):
+ break;
+ default:
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static inline int
+rta_load(struct program *program, uint64_t src, uint64_t dst,
+ uint32_t offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ int pos = -1, ret = -EINVAL;
+ unsigned int start_pc = program->current_pc, i;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_LOAD;
+ else
+ opcode = CMD_LOAD;
+
+ if ((length & 0xffffff00) || (offset & 0xffffff00)) {
+ pr_err("LOAD: Bad length/offset passed. Should be 8 bits\n");
+ goto err;
+ }
+
+ if (flags & SGF)
+ opcode |= LDST_SGF;
+ if (flags & VLF)
+ opcode |= LDST_VLF;
+
+ /* check load destination, length and offset and source type */
+ for (i = 0; i < load_dst_sz[rta_sec_era]; i++)
+ if (dst == load_dst[i].dst) {
+ pos = (int)i;
+ break;
+ }
+ if (-1 == pos) {
+ pr_err("LOAD: Invalid dst. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ if (load_dst[pos].imm_src == IMM_NO) {
+ pr_err("LOAD: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= LDST_IMM;
+ } else if (load_dst[pos].imm_src == IMM_MUST) {
+ pr_err("LOAD IMM: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ ret = load_check_len_offset(pos, length, offset);
+ if (ret < 0) {
+ pr_err("LOAD: Invalid length/offset. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= load_dst[pos].dst_opcode;
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if (dst == DESCBUF) {
+ opcode |= (length >> 2);
+ opcode |= ((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* DECO CONTROL: skip writing pointer of imm data */
+ if (dst == DCTRL)
+ return (int)start_pc;
+
+ /*
+ * For data copy, 3 possible ways to specify how to copy data:
+ * - IMMED & !COPY: copy data directly from src( max 8 bytes)
+ * - IMMED & COPY: copy data imm from the location specified by user
+ * - !IMMED and is not SEQ cmd: copy the address
+ */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else if (!(flags & SEQ))
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_LOAD_CMD_H__*/
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
new file mode 100644
index 00000000..a9b9091d
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -0,0 +1,402 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_MATH_CMD_H__
+#define __RTA_MATH_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t math_op1[][2] = {
+/*1*/ { MATH0, MATH_SRC0_REG0 },
+ { MATH1, MATH_SRC0_REG1 },
+ { MATH2, MATH_SRC0_REG2 },
+ { MATH3, MATH_SRC0_REG3 },
+ { SEQINSZ, MATH_SRC0_SEQINLEN },
+ { SEQOUTSZ, MATH_SRC0_SEQOUTLEN },
+ { VSEQINSZ, MATH_SRC0_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC0_VARSEQOUTLEN },
+ { ZERO, MATH_SRC0_ZERO },
+/*10*/ { NONE, 0 }, /* dummy value */
+ { DPOVRD, MATH_SRC0_DPOVRD },
+ { ONE, MATH_SRC0_ONE }
+};
+
+/*
+ * Allowed MATH op1 sources for each SEC Era.
+ * Values represent the number of entries from math_op1[] that are supported.
+ */
+static const unsigned int math_op1_sz[] = {10, 10, 12, 12, 12, 12, 12, 12};
+
+static const uint32_t math_op2[][2] = {
+/*1*/ { MATH0, MATH_SRC1_REG0 },
+ { MATH1, MATH_SRC1_REG1 },
+ { MATH2, MATH_SRC1_REG2 },
+ { MATH3, MATH_SRC1_REG3 },
+ { ABD, MATH_SRC1_INFIFO },
+ { OFIFO, MATH_SRC1_OUTFIFO },
+ { ONE, MATH_SRC1_ONE },
+/*8*/ { NONE, 0 }, /* dummy value */
+ { JOBSRC, MATH_SRC1_JOBSOURCE },
+ { DPOVRD, MATH_SRC1_DPOVRD },
+ { VSEQINSZ, MATH_SRC1_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC1_VARSEQOUTLEN },
+/*13*/ { ZERO, MATH_SRC1_ZERO }
+};
+
+/*
+ * Allowed MATH op2 sources for each SEC Era.
+ * Values represent the number of entries from math_op2[] that are supported.
+ */
+static const unsigned int math_op2_sz[] = {8, 9, 13, 13, 13, 13, 13, 13};
+
+static const uint32_t math_result[][2] = {
+/*1*/ { MATH0, MATH_DEST_REG0 },
+ { MATH1, MATH_DEST_REG1 },
+ { MATH2, MATH_DEST_REG2 },
+ { MATH3, MATH_DEST_REG3 },
+ { SEQINSZ, MATH_DEST_SEQINLEN },
+ { SEQOUTSZ, MATH_DEST_SEQOUTLEN },
+ { VSEQINSZ, MATH_DEST_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_DEST_VARSEQOUTLEN },
+/*9*/ { NONE, MATH_DEST_NONE },
+ { DPOVRD, MATH_DEST_DPOVRD }
+};
+
+/*
+ * Allowed MATH result destinations for each SEC Era.
+ * Values represent the number of entries from math_result[] that are
+ * supported.
+ */
+static const unsigned int math_result_sz[] = {9, 9, 10, 10, 10, 10, 10, 10};
+
+static inline int
+rta_math(struct program *program, uint64_t operand1,
+ uint32_t op, uint64_t operand2, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATH;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (((op == MATH_FUN_BSWAP) && (rta_sec_era < RTA_SEC_ERA_4)) ||
+ ((op == MATH_FUN_ZBYT) && (rta_sec_era < RTA_SEC_ERA_2))) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (options & SWP) {
+ if (rta_sec_era < RTA_SEC_ERA_7) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((options & IFB) ||
+ (!(options & IMMED) && !(options & IMMED2)) ||
+ ((options & IMMED) && (options & IMMED2))) {
+ pr_err("MATH: SWP - invalid configuration. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /*
+ * SHLD operation is different from others and we
+ * assume that we can have _NONE as first operand
+ * or _SEQINSZ as second operand
+ */
+ if ((op != MATH_FUN_SHLD) && ((operand1 == NONE) ||
+ (operand2 == SEQINSZ))) {
+ pr_err("MATH: Invalid operand. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * We first check if it is unary operation. In that
+ * case second operand must be _NONE
+ */
+ if (((op == MATH_FUN_ZBYT) || (op == MATH_FUN_BSWAP)) &&
+ (operand2 != NONE)) {
+ pr_err("MATH: Invalid operand2. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (options & IMMED) {
+ opcode |= MATH_SRC0_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand1, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand1 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write second operand field */
+ if (options & IMMED2) {
+ opcode |= MATH_SRC1_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand2, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand2 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATH: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /*
+ * as we encode operations with their "real" values, we do not
+ * to translate but we do need to validate the value
+ */
+ switch (op) {
+ /*Binary operators */
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_SHLD):
+ /* Unary operators */
+ case (MATH_FUN_ZBYT):
+ case (MATH_FUN_BSWAP):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATH: operator is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= (options & ~(IMMED | IMMED2));
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATH: length is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* Write immediate value */
+ if ((options & IMMED) && !(options & IMMED2)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand1);
+ } else if ((options & IMMED2) && !(options & IMMED)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand2);
+ } else if ((options & IMMED) && (options & IMMED2)) {
+ __rta_out32(program, lower_32_bits(operand1));
+ __rta_out32(program, lower_32_bits(operand2));
+ }
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_mathi(struct program *program, uint64_t operand,
+ uint32_t op, uint8_t imm, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATHI;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ pr_err("MATHI: Command not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (((op == MATH_FUN_FBYT) && (options & SSEL))) {
+ pr_err("MATHI: Illegal combination - FBYT and SSEL. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((options & SWP) && (rta_sec_era < RTA_SEC_ERA_7)) {
+ pr_err("MATHI: SWP not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (!(options & SSEL))
+ ret = __rta_map_opcode((uint32_t)operand, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ else
+ ret = __rta_map_opcode((uint32_t)operand, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATHI: operand not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (!(options & SSEL))
+ opcode |= val;
+ else
+ opcode |= (val << (MATHI_SRC1_SHIFT - MATH_SRC1_SHIFT));
+
+ /* Write second operand field */
+ opcode |= (imm << MATHI_IMM_SHIFT);
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATHI: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= (val << (MATHI_DEST_SHIFT - MATH_DEST_SHIFT));
+
+ /*
+ * as we encode operations with their "real" values, we do not have to
+ * translate but we do need to validate the value
+ */
+ switch (op) {
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_FBYT):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATHI: operator not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= options;
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATHI: length %d not supported. SEC PC: %d; Instr: %d\n",
+ length, program->current_pc,
+ program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_MATH_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
new file mode 100644
index 00000000..10d2e193
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -0,0 +1,445 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_MOVE_CMD_H__
+#define __RTA_MOVE_CMD_H__
+
+#define MOVE_SET_AUX_SRC 0x01
+#define MOVE_SET_AUX_DST 0x02
+#define MOVE_SET_AUX_LS 0x03
+#define MOVE_SET_LEN_16b 0x04
+
+#define MOVE_SET_AUX_MATH 0x10
+#define MOVE_SET_AUX_MATH_SRC (MOVE_SET_AUX_SRC | MOVE_SET_AUX_MATH)
+#define MOVE_SET_AUX_MATH_DST (MOVE_SET_AUX_DST | MOVE_SET_AUX_MATH)
+
+#define MASK_16b 0xFF
+
+/* MOVE command type */
+#define __MOVE 1
+#define __MOVEB 2
+#define __MOVEDW 3
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t move_src_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_SRC_CLASS1CTX },
+ { CONTEXT2, MOVE_SRC_CLASS2CTX },
+ { OFIFO, MOVE_SRC_OUTFIFO },
+ { DESCBUF, MOVE_SRC_DESCBUF },
+ { MATH0, MOVE_SRC_MATH0 },
+ { MATH1, MOVE_SRC_MATH1 },
+ { MATH2, MOVE_SRC_MATH2 },
+ { MATH3, MOVE_SRC_MATH3 },
+/*9*/ { IFIFOABD, MOVE_SRC_INFIFO },
+ { IFIFOAB1, MOVE_SRC_INFIFO_CL | MOVE_AUX_LS },
+ { IFIFOAB2, MOVE_SRC_INFIFO_CL },
+/*12*/ { ABD, MOVE_SRC_INFIFO_NO_NFIFO },
+ { AB1, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_LS },
+ { AB2, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_MS }
+};
+
+/* Allowed MOVE / MOVE_LEN sources for each SEC Era.
+ * Values represent the number of entries from move_src_table[] that are
+ * supported.
+ */
+static const unsigned int move_src_table_sz[] = {9, 11, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t move_dst_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_DEST_CLASS1CTX },
+ { CONTEXT2, MOVE_DEST_CLASS2CTX },
+ { OFIFO, MOVE_DEST_OUTFIFO },
+ { DESCBUF, MOVE_DEST_DESCBUF },
+ { MATH0, MOVE_DEST_MATH0 },
+ { MATH1, MOVE_DEST_MATH1 },
+ { MATH2, MOVE_DEST_MATH2 },
+ { MATH3, MOVE_DEST_MATH3 },
+ { IFIFOAB1, MOVE_DEST_CLASS1INFIFO },
+ { IFIFOAB2, MOVE_DEST_CLASS2INFIFO },
+ { PKA, MOVE_DEST_PK_A },
+ { KEY1, MOVE_DEST_CLASS1KEY },
+ { KEY2, MOVE_DEST_CLASS2KEY },
+/*14*/ { IFIFO, MOVE_DEST_INFIFO },
+/*15*/ { ALTSOURCE, MOVE_DEST_ALTSOURCE}
+};
+
+/* Allowed MOVE / MOVE_LEN destinations for each SEC Era.
+ * Values represent the number of entries from move_dst_table[] that are
+ * supported.
+ */
+static const
+unsigned int move_dst_table_sz[] = {13, 14, 14, 15, 15, 15, 15, 15};
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt);
+
+static inline int
+math_offset(uint16_t offset);
+
+static inline int
+rta_move(struct program *program, int cmd_type, uint64_t src,
+ uint16_t src_offset, uint64_t dst,
+ uint16_t dst_offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint16_t offset = 0, opt = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_move_len_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (cmd_type != __MOVE)) {
+ pr_err("MOVE: MOVEB / MOVEDW not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* write command type */
+ if (cmd_type == __MOVEB) {
+ opcode = CMD_MOVEB;
+ } else if (cmd_type == __MOVEDW) {
+ opcode = CMD_MOVEDW;
+ } else if (!(flags & IMMED)) {
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ pr_err("MOVE: MOVE_LEN not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((length != MATH0) && (length != MATH1) &&
+ (length != MATH2) && (length != MATH3)) {
+ pr_err("MOVE: MOVE_LEN length must be MATH[0-3]. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode = CMD_MOVE_LEN;
+ is_move_len_cmd = true;
+ } else {
+ opcode = CMD_MOVE;
+ }
+
+ /* write offset first, to check for invalid combinations or incorrect
+ * offset values sooner; decide which offset should be here
+ * (src or dst)
+ */
+ ret = set_move_offset(program, src, src_offset, dst, dst_offset,
+ &offset, &opt);
+ if (ret < 0)
+ goto err;
+
+ opcode |= (offset << MOVE_OFFSET_SHIFT) & MOVE_OFFSET_MASK;
+
+ /* set AUX field if required */
+ if (opt == MOVE_SET_AUX_SRC) {
+ opcode |= ((src_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_DST) {
+ opcode |= ((dst_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_LS) {
+ opcode |= MOVE_AUX_LS;
+ } else if (opt & MOVE_SET_AUX_MATH) {
+ if (opt & MOVE_SET_AUX_SRC)
+ offset = src_offset;
+ else
+ offset = dst_offset;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ if (offset)
+ pr_debug("MOVE: Offset not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era),
+ program->current_pc,
+ program->current_instruction);
+ /* nothing to do for offset = 0 */
+ } else {
+ ret = math_offset(offset);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid offset in MATH register. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode |= (uint32_t)ret;
+ }
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode((uint32_t)src, move_src_table,
+ move_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write destination field */
+ ret = __rta_map_opcode((uint32_t)dst, move_dst_table,
+ move_dst_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write flags */
+ if (flags & (FLUSH1 | FLUSH2))
+ opcode |= MOVE_AUX_MS;
+ if (flags & (LAST2 | LAST1))
+ opcode |= MOVE_AUX_LS;
+ if (flags & WAITCOMP)
+ opcode |= MOVE_WAITCOMP;
+
+ if (!is_move_len_cmd) {
+ /* write length */
+ if (opt == MOVE_SET_LEN_16b)
+ opcode |= (length & (MOVE_OFFSET_MASK | MOVE_LEN_MASK));
+ else
+ opcode |= (length & MOVE_LEN_MASK);
+ } else {
+ /* write mrsel */
+ switch (length) {
+ case (MATH0):
+ /*
+ * opcode |= MOVELEN_MRSEL_MATH0;
+ * MOVELEN_MRSEL_MATH0 is 0
+ */
+ break;
+ case (MATH1):
+ opcode |= MOVELEN_MRSEL_MATH1;
+ break;
+ case (MATH2):
+ opcode |= MOVELEN_MRSEL_MATH2;
+ break;
+ case (MATH3):
+ opcode |= MOVELEN_MRSEL_MATH3;
+ break;
+ }
+
+ /* write size */
+ if (rta_sec_era >= RTA_SEC_ERA_7) {
+ if (flags & SIZE_WORD)
+ opcode |= MOVELEN_SIZE_WORD;
+ else if (flags & SIZE_BYTE)
+ opcode |= MOVELEN_SIZE_BYTE;
+ else if (flags & SIZE_DWORD)
+ opcode |= MOVELEN_SIZE_DWORD;
+ }
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt)
+{
+ switch (src) {
+ case (CONTEXT1):
+ case (CONTEXT2):
+ if (dst == DESCBUF) {
+ *opt = MOVE_SET_AUX_SRC;
+ *offset = dst_offset;
+ } else if ((dst == KEY1) || (dst == KEY2)) {
+ if ((src_offset) && (dst_offset)) {
+ pr_err("MOVE: Bad offset. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (dst_offset) {
+ *opt = MOVE_SET_AUX_LS;
+ *offset = dst_offset;
+ } else {
+ *offset = src_offset;
+ }
+ } else {
+ if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ }
+ break;
+
+ case (OFIFO):
+ if (dst == OFIFO) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) &&
+ (src_offset || dst_offset)) {
+ pr_err("MOVE: Offset should be zero. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ break;
+
+ case (DESCBUF):
+ if ((dst == CONTEXT1) || (dst == CONTEXT2)) {
+ *opt = MOVE_SET_AUX_DST;
+ } else if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (dst == DESCBUF) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Invalid offset alignment. SEC PC: %d; Instr %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ break;
+
+ case (MATH0):
+ case (MATH1):
+ case (MATH2):
+ case (MATH3):
+ if ((dst == OFIFO) || (dst == ALTSOURCE)) {
+ if (src_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = src_offset;
+ } else if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) {
+ *offset = src_offset;
+ } else {
+ *offset = dst_offset;
+
+ /*
+ * This condition is basically the negation of:
+ * dst in { CONTEXT[1-2], MATH[0-3] }
+ */
+ if ((dst != KEY1) && (dst != KEY2))
+ *opt = MOVE_SET_AUX_MATH_SRC;
+ }
+ break;
+
+ case (IFIFOABD):
+ case (IFIFOAB1):
+ case (IFIFOAB2):
+ case (ABD):
+ case (AB1):
+ case (AB2):
+ if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA) || (dst == ALTSOURCE)) {
+ pr_err("MOVE: Bad DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else {
+ if (dst == OFIFO) {
+ *opt = MOVE_SET_LEN_16b;
+ } else {
+ if (dst_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ err:
+ return -EINVAL;
+}
+
+static inline int
+math_offset(uint16_t offset)
+{
+ switch (offset) {
+ case 0:
+ return 0;
+ case 4:
+ return MOVE_AUX_LS;
+ case 6:
+ return MOVE_AUX_MS;
+ case 7:
+ return MOVE_AUX_LS | MOVE_AUX_MS;
+ }
+
+ return -EINVAL;
+}
+
+#endif /* __RTA_MOVE_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
new file mode 100644
index 00000000..30be0825
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -0,0 +1,196 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_NFIFO_CMD_H__
+#define __RTA_NFIFO_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t nfifo_src[][2] = {
+/*1*/ { IFIFO, NFIFOENTRY_STYPE_DFIFO },
+ { OFIFO, NFIFOENTRY_STYPE_OFIFO },
+ { PAD, NFIFOENTRY_STYPE_PAD },
+/*4*/ { MSGOUTSNOOP, NFIFOENTRY_STYPE_SNOOP | NFIFOENTRY_DEST_BOTH },
+/*5*/ { ALTSOURCE, NFIFOENTRY_STYPE_ALTSOURCE },
+ { OFIFO_SYNC, NFIFOENTRY_STYPE_OFIFO_SYNC },
+/*7*/ { MSGOUTSNOOP_ALT, NFIFOENTRY_STYPE_SNOOP_ALT | NFIFOENTRY_DEST_BOTH }
+};
+
+/*
+ * Allowed NFIFO LOAD sources for each SEC Era.
+ * Values represent the number of entries from nfifo_src[] that are supported.
+ */
+static const unsigned int nfifo_src_sz[] = {4, 5, 5, 5, 5, 5, 5, 7};
+
+static const uint32_t nfifo_data[][2] = {
+ { MSG, NFIFOENTRY_DTYPE_MSG },
+ { MSG1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_MSG },
+ { MSG2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG },
+ { IV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_IV },
+ { IV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_IV },
+ { ICV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_ICV },
+ { ICV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_ICV },
+ { SAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SAD },
+ { AAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_AAD },
+ { AAD2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_AAD },
+ { AFHA_SBOX, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SBOX },
+ { SKIP, NFIFOENTRY_DTYPE_SKIP },
+ { PKE, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_E },
+ { PKN, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_N },
+ { PKA, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A },
+ { PKA0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A0 },
+ { PKA1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A1 },
+ { PKA2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A2 },
+ { PKA3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A3 },
+ { PKB, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B },
+ { PKB0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B0 },
+ { PKB1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B1 },
+ { PKB2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B2 },
+ { PKB3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B3 },
+ { AB1, NFIFOENTRY_DEST_CLASS1 },
+ { AB2, NFIFOENTRY_DEST_CLASS2 },
+ { ABD, NFIFOENTRY_DEST_DECO }
+};
+
+static const uint32_t nfifo_flags[][2] = {
+/*1*/ { LAST1, NFIFOENTRY_LC1 },
+ { LAST2, NFIFOENTRY_LC2 },
+ { FLUSH1, NFIFOENTRY_FC1 },
+ { BP, NFIFOENTRY_BND },
+ { PAD_ZERO, NFIFOENTRY_PTYPE_ZEROS },
+ { PAD_NONZERO, NFIFOENTRY_PTYPE_RND_NOZEROS },
+ { PAD_INCREMENT, NFIFOENTRY_PTYPE_INCREMENT },
+ { PAD_RANDOM, NFIFOENTRY_PTYPE_RND },
+ { PAD_ZERO_N1, NFIFOENTRY_PTYPE_ZEROS_NZ },
+ { PAD_NONZERO_0, NFIFOENTRY_PTYPE_RND_NZ_LZ },
+ { PAD_N1, NFIFOENTRY_PTYPE_N },
+/*12*/ { PAD_NONZERO_N, NFIFOENTRY_PTYPE_RND_NZ_N },
+ { FLUSH2, NFIFOENTRY_FC2 },
+ { OC, NFIFOENTRY_OC }
+};
+
+/*
+ * Allowed NFIFO LOAD flags for each SEC Era.
+ * Values represent the number of entries from nfifo_flags[] that are supported.
+ */
+static const unsigned int nfifo_flags_sz[] = {12, 14, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t nfifo_pad_flags[][2] = {
+ { BM, NFIFOENTRY_BM },
+ { PS, NFIFOENTRY_PS },
+ { PR, NFIFOENTRY_PR }
+};
+
+/*
+ * Allowed NFIFO LOAD pad flags for each SEC Era.
+ * Values represent the number of entries from nfifo_pad_flags[] that are
+ * supported.
+ */
+static const unsigned int nfifo_pad_flags_sz[] = {2, 2, 2, 2, 3, 3, 3, 3};
+
+static inline int
+rta_nfifo_load(struct program *program, uint32_t src,
+ uint32_t data, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ uint32_t load_cmd = CMD_LOAD | LDST_IMM | LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO;
+ unsigned int start_pc = program->current_pc;
+
+ if ((data == AFHA_SBOX) && (rta_sec_era == RTA_SEC_ERA_7)) {
+ pr_err("NFIFO: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode(src, nfifo_src, nfifo_src_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write type field */
+ ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write DL field */
+ if (!(flags & EXT)) {
+ opcode |= length & NFIFOENTRY_DLEN_MASK;
+ load_cmd |= 4;
+ } else {
+ load_cmd |= 8;
+ }
+
+ /* write flags */
+ __rta_map_flags(flags, nfifo_flags, nfifo_flags_sz[rta_sec_era],
+ &opcode);
+
+ /* in case of padding, check the destination */
+ if (src == PAD)
+ __rta_map_flags(flags, nfifo_pad_flags,
+ nfifo_pad_flags_sz[rta_sec_era], &opcode);
+
+ /* write LOAD command first */
+ __rta_out32(program, load_cmd);
+ __rta_out32(program, opcode);
+
+ if (flags & EXT)
+ __rta_out32(program, length & NFIFOENTRY_DLEN_MASK);
+
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_NFIFO_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
new file mode 100644
index 00000000..5e88fb46
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -0,0 +1,599 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_OPERATION_CMD_H__
+#define __RTA_OPERATION_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_alg_aai_aes(uint16_t aai)
+{
+ uint16_t aes_mode = aai & OP_ALG_AESA_MODE_MASK;
+
+ if (aai & OP_ALG_AAI_C2K) {
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ return -1;
+ if ((aes_mode != OP_ALG_AAI_CCM) &&
+ (aes_mode != OP_ALG_AAI_GCM))
+ return -EINVAL;
+ }
+
+ switch (aes_mode) {
+ case OP_ALG_AAI_CBC_CMAC:
+ case OP_ALG_AAI_CTR_CMAC_LTE:
+ case OP_ALG_AAI_CTR_CMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_CTR:
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_OFB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_XTS:
+ case OP_ALG_AAI_CMAC:
+ case OP_ALG_AAI_XCBC_MAC:
+ case OP_ALG_AAI_CCM:
+ case OP_ALG_AAI_GCM:
+ case OP_ALG_AAI_CBC_XCBCMAC:
+ case OP_ALG_AAI_CTR_XCBCMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_des(uint16_t aai)
+{
+ uint16_t aai_code = (uint16_t)(aai & ~OP_ALG_AAI_CHECKODD);
+
+ switch (aai_code) {
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_OFB:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_md5(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_SMAC:
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_sha(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_rng(uint16_t aai)
+{
+ uint16_t rng_mode = aai & OP_ALG_RNG_MODE_MASK;
+ uint16_t rng_sh = aai & OP_ALG_AAI_RNG4_SH_MASK;
+
+ switch (rng_mode) {
+ case OP_ALG_AAI_RNG:
+ case OP_ALG_AAI_RNG_NZB:
+ case OP_ALG_AAI_RNG_OBP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* State Handle bits are valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && rng_sh)
+ return -EINVAL;
+
+ /* PS, AI, SK bits are also valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && (aai &
+ (OP_ALG_AAI_RNG4_PS | OP_ALG_AAI_RNG4_AI | OP_ALG_AAI_RNG4_SK)))
+ return -EINVAL;
+
+ switch (rng_sh) {
+ case OP_ALG_AAI_RNG4_SH_0:
+ case OP_ALG_AAI_RNG4_SH_1:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_crc(uint16_t aai)
+{
+ uint16_t aai_code = aai & OP_ALG_CRC_POLY_MASK;
+
+ switch (aai_code) {
+ case OP_ALG_AAI_802:
+ case OP_ALG_AAI_3385:
+ case OP_ALG_AAI_CUST_POLY:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_kasumi(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_GSM:
+ case OP_ALG_AAI_EDGE:
+ case OP_ALG_AAI_F8:
+ case OP_ALG_AAI_F9:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f9(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f8(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuce(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuca(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+struct alg_aai_map {
+ uint32_t chipher_algo;
+ int (*aai_func)(uint16_t);
+ uint32_t class;
+};
+
+static const struct alg_aai_map alg_table[] = {
+/*1*/ { OP_ALG_ALGSEL_AES, __rta_alg_aai_aes, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_3DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_MD5, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA1, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA224, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA256, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA384, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA512, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_RNG, __rta_alg_aai_rng, OP_TYPE_CLASS1_ALG },
+/*11*/ { OP_ALG_ALGSEL_CRC, __rta_alg_aai_crc, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ARC4, NULL, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F8, __rta_alg_aai_snow_f8, OP_TYPE_CLASS1_ALG },
+/*14*/ { OP_ALG_ALGSEL_KASUMI, __rta_alg_aai_kasumi, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F9, __rta_alg_aai_snow_f9, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ZUCE, __rta_alg_aai_zuce, OP_TYPE_CLASS1_ALG },
+/*17*/ { OP_ALG_ALGSEL_ZUCA, __rta_alg_aai_zuca, OP_TYPE_CLASS2_ALG }
+};
+
+/*
+ * Allowed OPERATION algorithms for each SEC Era.
+ * Values represent the number of entries from alg_table[] that are supported.
+ */
+static const unsigned int alg_table_sz[] = {14, 15, 15, 15, 17, 17, 11, 17};
+
+static inline int
+rta_operation(struct program *program, uint32_t cipher_algo,
+ uint16_t aai, uint8_t algo_state,
+ int icv_checking, int enc)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ unsigned int start_pc = program->current_pc;
+ int ret;
+
+ for (i = 0; i < alg_table_sz[rta_sec_era]; i++) {
+ if (alg_table[i].chipher_algo == cipher_algo) {
+ opcode |= cipher_algo | alg_table[i].class;
+ /* nothing else to verify */
+ if (alg_table[i].aai_func == NULL) {
+ found = 1;
+ break;
+ }
+
+ aai &= OP_ALG_AAI_MASK;
+
+ ret = (*alg_table[i].aai_func)(aai);
+ if (ret < 0) {
+ pr_err("OPERATION: Bad AAI Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= aai;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("OPERATION: Invalid Command. SEC Program Line: %d\n",
+ program->current_pc);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (algo_state) {
+ case OP_ALG_AS_UPDATE:
+ case OP_ALG_AS_INIT:
+ case OP_ALG_AS_FINALIZE:
+ case OP_ALG_AS_INITFINAL:
+ opcode |= algo_state;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (icv_checking) {
+ case ICV_CHECK_DISABLE:
+ /*
+ * opcode |= OP_ALG_ICV_OFF;
+ * OP_ALG_ICV_OFF is 0
+ */
+ break;
+ case ICV_CHECK_ENABLE:
+ opcode |= OP_ALG_ICV_ON;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (enc) {
+ case DIR_DEC:
+ /*
+ * opcode |= OP_ALG_DECRYPT;
+ * OP_ALG_DECRYPT is 0
+ */
+ break;
+ case DIR_ENC:
+ opcode |= OP_ALG_ENCRYPT;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ return ret;
+}
+
+/*
+ * OPERATION PKHA routines
+ */
+static inline int
+__rta_pkha_clearmem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_CLEARMEM_ALL):
+ case (OP_ALG_PKMODE_CLEARMEM_ABE):
+ case (OP_ALG_PKMODE_CLEARMEM_ABN):
+ case (OP_ALG_PKMODE_CLEARMEM_AB):
+ case (OP_ALG_PKMODE_CLEARMEM_AEN):
+ case (OP_ALG_PKMODE_CLEARMEM_AE):
+ case (OP_ALG_PKMODE_CLEARMEM_AN):
+ case (OP_ALG_PKMODE_CLEARMEM_A):
+ case (OP_ALG_PKMODE_CLEARMEM_BEN):
+ case (OP_ALG_PKMODE_CLEARMEM_BE):
+ case (OP_ALG_PKMODE_CLEARMEM_BN):
+ case (OP_ALG_PKMODE_CLEARMEM_B):
+ case (OP_ALG_PKMODE_CLEARMEM_EN):
+ case (OP_ALG_PKMODE_CLEARMEM_N):
+ case (OP_ALG_PKMODE_CLEARMEM_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_mod_arithmetic(uint32_t pkha_op)
+{
+ pkha_op &= (uint32_t)~OP_ALG_PKMODE_OUT_A;
+
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_MULT_IM):
+ case (OP_ALG_PKMODE_MOD_MULT_IM_OM):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_EXPO_TEQ):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM_TEQ):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_F2M_ADD):
+ case (OP_ALG_PKMODE_F2M_MUL):
+ case (OP_ALG_PKMODE_F2M_MUL_IM):
+ case (OP_ALG_PKMODE_F2M_MUL_IM_OM):
+ case (OP_ALG_PKMODE_F2M_EXP):
+ case (OP_ALG_PKMODE_F2M_EXP_TEQ):
+ case (OP_ALG_PKMODE_F2M_AMODN):
+ case (OP_ALG_PKMODE_F2M_INV):
+ case (OP_ALG_PKMODE_F2M_R2):
+ case (OP_ALG_PKMODE_F2M_GCD):
+ case (OP_ALG_PKMODE_F2M_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_copymem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rta_pkha_operation(struct program *program, uint32_t op_pkha)
+{
+ uint32_t opcode = CMD_OPERATION | OP_TYPE_PK | OP_ALG_PK;
+ uint32_t pkha_func;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ pkha_func = op_pkha & OP_ALG_PK_FUN_MASK;
+
+ switch (pkha_func) {
+ case (OP_ALG_PKMODE_CLEARMEM):
+ ret = __rta_pkha_clearmem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ ret = __rta_pkha_mod_arithmetic(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_COPY_NSZ):
+ case (OP_ALG_PKMODE_COPY_SSZ):
+ ret = __rta_pkha_copymem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ goto err;
+ }
+
+ opcode |= op_pkha;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_OPERATION_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
new file mode 100644
index 00000000..2e7b2f2d
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -0,0 +1,732 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_PROTOCOL_CMD_H__
+#define __RTA_PROTOCOL_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_ssl_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_SSL30_RC4_40_MD5_2:
+ case OP_PCL_SSL30_RC4_128_MD5_2:
+ case OP_PCL_SSL30_RC4_128_SHA_5:
+ case OP_PCL_SSL30_RC4_40_MD5_3:
+ case OP_PCL_SSL30_RC4_128_MD5_3:
+ case OP_PCL_SSL30_RC4_128_SHA:
+ case OP_PCL_SSL30_RC4_128_MD5:
+ case OP_PCL_SSL30_RC4_40_SHA:
+ case OP_PCL_SSL30_RC4_40_MD5:
+ case OP_PCL_SSL30_RC4_128_SHA_2:
+ case OP_PCL_SSL30_RC4_128_SHA_3:
+ case OP_PCL_SSL30_RC4_128_SHA_4:
+ case OP_PCL_SSL30_RC4_128_SHA_6:
+ case OP_PCL_SSL30_RC4_128_SHA_7:
+ case OP_PCL_SSL30_RC4_128_SHA_8:
+ case OP_PCL_SSL30_RC4_128_SHA_9:
+ case OP_PCL_SSL30_RC4_128_SHA_10:
+ case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+ /* fall through if not Era 7 */
+ case OP_PCL_SSL30_DES40_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_SHA_2:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_2:
+ case OP_PCL_SSL30_DES_CBC_SHA_3:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
+ case OP_PCL_SSL30_DES40_CBC_SHA_3:
+ case OP_PCL_SSL30_DES_CBC_SHA_4:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_SHA_4:
+ case OP_PCL_SSL30_DES_CBC_SHA_5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
+ case OP_PCL_SSL30_DES40_CBC_SHA_5:
+ case OP_PCL_SSL30_DES_CBC_SHA_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
+ case OP_PCL_SSL30_DES40_CBC_SHA_6:
+ case OP_PCL_SSL30_DES_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
+ case OP_PCL_SSL30_DES_CBC_SHA:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_MD5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_MD5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_6:
+ case OP_PCL_SSL30_AES_256_CBC_SHA:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_6:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_7:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_8:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_8:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
+ case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_10:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_10:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_13:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_14:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_17:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_17:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
+ case OP_PCL_TLS12_AES_128_CBC_SHA160:
+ case OP_PCL_TLS12_AES_128_CBC_SHA224:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256:
+ case OP_PCL_TLS12_AES_128_CBC_SHA384:
+ case OP_PCL_TLS12_AES_128_CBC_SHA512:
+ case OP_PCL_TLS12_AES_192_CBC_SHA160:
+ case OP_PCL_TLS12_AES_192_CBC_SHA224:
+ case OP_PCL_TLS12_AES_192_CBC_SHA256:
+ case OP_PCL_TLS12_AES_192_CBC_SHA512:
+ case OP_PCL_TLS12_AES_256_CBC_SHA160:
+ case OP_PCL_TLS12_AES_256_CBC_SHA224:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256:
+ case OP_PCL_TLS12_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_AES_256_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ike_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_IKE_HMAC_MD5:
+ case OP_PCL_IKE_HMAC_SHA1:
+ case OP_PCL_IKE_HMAC_AES128_CBC:
+ case OP_PCL_IKE_HMAC_SHA256:
+ case OP_PCL_IKE_HMAC_SHA384:
+ case OP_PCL_IKE_HMAC_SHA512:
+ case OP_PCL_IKE_HMAC_AES128_CMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ipsec_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_IPSEC_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_IPSEC_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ /* CCM, GCM, GMAC require PROTINFO[7:0] = 0 */
+ if (proto_cls2 == OP_PCL_IPSEC_HMAC_NULL)
+ return 0;
+ return -EINVAL;
+ case OP_PCL_IPSEC_NULL:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_AES_CTR:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (proto_cls2) {
+ case OP_PCL_IPSEC_HMAC_NULL:
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_AES_XCBC_MAC_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ case OP_PCL_IPSEC_AES_CMAC_96:
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_srtp_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_SRTP_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_SRTP_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_SRTP_AES_CTR:
+ switch (proto_cls2) {
+ case OP_PCL_SRTP_HMAC_SHA1_160:
+ return 0;
+ }
+ /* no break */
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_macsec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_MACSEC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wifi_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIFI:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wimax_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIMAX_OFDM:
+ case OP_PCL_WIMAX_OFDMA:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Allowed blob proto flags for each SEC Era */
+static const uint32_t proto_blob_flags[] = {
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
+};
+
+static inline int
+__rta_blob_proto(uint16_t protoinfo)
+{
+ if (protoinfo & ~proto_blob_flags[rta_sec_era])
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_BLOB_FORMAT_MASK) {
+ case OP_PCL_BLOB_FORMAT_NORMAL:
+ case OP_PCL_BLOB_FORMAT_MASTER_VER:
+ case OP_PCL_BLOB_FORMAT_TEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_BLOB_REG_MASK) {
+ case OP_PCL_BLOB_AFHA_SBOX:
+ if (rta_sec_era < RTA_SEC_ERA_3)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_BLOB_REG_MEMORY:
+ case OP_PCL_BLOB_REG_KEY1:
+ case OP_PCL_BLOB_REG_KEY2:
+ case OP_PCL_BLOB_REG_SPLIT:
+ case OP_PCL_BLOB_REG_PKE:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_dlc_proto(uint16_t protoinfo)
+{
+ if ((rta_sec_era < RTA_SEC_ERA_2) &&
+ (protoinfo & (OP_PCL_PKPROT_DSA_MSG | OP_PCL_PKPROT_HASH_MASK |
+ OP_PCL_PKPROT_EKT_Z | OP_PCL_PKPROT_DECRYPT_Z |
+ OP_PCL_PKPROT_DECRYPT_PRI)))
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_PKPROT_HASH_MASK) {
+ case OP_PCL_PKPROT_HASH_MD5:
+ case OP_PCL_PKPROT_HASH_SHA1:
+ case OP_PCL_PKPROT_HASH_SHA224:
+ case OP_PCL_PKPROT_HASH_SHA256:
+ case OP_PCL_PKPROT_HASH_SHA384:
+ case OP_PCL_PKPROT_HASH_SHA512:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_enc_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_ENC_F_IN:
+ if ((protoinfo & OP_PCL_RSAPROT_FFF_MASK) !=
+ OP_PCL_RSAPROT_FFF_RED)
+ return -EINVAL;
+ break;
+ case OP_PCL_RSAPROT_OP_ENC_F_OUT:
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_dec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_DEC_ND:
+ case OP_PCL_RSAPROT_OP_DEC_PQD:
+ case OP_PCL_RSAPROT_OP_DEC_PQDPDQC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_RSAPROT_PPP_MASK) {
+ case OP_PCL_RSAPROT_PPP_RED:
+ case OP_PCL_RSAPROT_PPP_ENC:
+ case OP_PCL_RSAPROT_PPP_EKT:
+ case OP_PCL_RSAPROT_PPP_TK_ENC:
+ case OP_PCL_RSAPROT_PPP_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (protoinfo & OP_PCL_RSAPROT_FMT_PKCSV15)
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * DKP Protocol - Restrictions on key (SRC,DST) combinations
+ * For e.g. key_in_out[0][0] = 1 means (SRC=IMM,DST=IMM) combination is allowed
+ */
+static const uint8_t key_in_out[4][4] = { {1, 0, 0, 0},
+ {1, 1, 1, 1},
+ {1, 0, 1, 0},
+ {1, 0, 0, 1} };
+
+static inline int
+__rta_dkp_proto(uint16_t protoinfo)
+{
+ int key_src = (protoinfo & OP_PCL_DKP_SRC_MASK) >> OP_PCL_DKP_SRC_SHIFT;
+ int key_dst = (protoinfo & OP_PCL_DKP_DST_MASK) >> OP_PCL_DKP_DST_SHIFT;
+
+ if (!key_in_out[key_src][key_dst]) {
+ pr_err("PROTO_DESC: Invalid DKP key (SRC,DST)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+__rta_3g_dcrc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_DCRC_CRC7:
+ case OP_PCL_3G_DCRC_CRC11:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_3g_rlc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_RLC_NULL:
+ case OP_PCL_3G_RLC_KASUMI:
+ case OP_PCL_3G_RLC_SNOW:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_LTE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ break;
+ case OP_PCL_LTE_NULL:
+ case OP_PCL_LTE_SNOW:
+ case OP_PCL_LTE_AES:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_mixed_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_LTE_MIXED_AUTH_MASK) {
+ case OP_PCL_LTE_MIXED_AUTH_NULL:
+ case OP_PCL_LTE_MIXED_AUTH_SNOW:
+ case OP_PCL_LTE_MIXED_AUTH_AES:
+ case OP_PCL_LTE_MIXED_AUTH_ZUC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_LTE_MIXED_ENC_MASK) {
+ case OP_PCL_LTE_MIXED_ENC_NULL:
+ case OP_PCL_LTE_MIXED_ENC_SNOW:
+ case OP_PCL_LTE_MIXED_ENC_AES:
+ case OP_PCL_LTE_MIXED_ENC_ZUC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+struct proto_map {
+ uint32_t optype;
+ uint32_t protid;
+ int (*protoinfo_func)(uint16_t);
+};
+
+static const struct proto_map proto_table[] = {
+/*1*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_SSL30_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSAVERIFY, __rta_dlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC, __rta_ipsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SRTP, __rta_srtp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SSL30, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
+/*21*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_BLOB, __rta_blob_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DIFFIEHELLMAN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSAENCRYPT, __rta_rsa_enc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSADECRYPT, __rta_rsa_dec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_DCRC, __rta_3g_dcrc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_PDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_SDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER, __rta_lte_pdcp_proto},
+/*29*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL, __rta_lte_pdcp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_MD5, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA1, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA224, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA256, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA384, __rta_dkp_proto},
+/*35*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA512, __rta_dkp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+/*37*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+/*38*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ __rta_lte_pdcp_mixed_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC_NEW, __rta_ipsec_proto},
+};
+
+/*
+ * Allowed OPERATION protocols for each SEC Era.
+ * Values represent the number of entries from proto_table[] that are supported.
+ */
+static const unsigned int proto_table_sz[] = {21, 29, 29, 29, 29, 35, 37, 39};
+
+static inline int
+rta_proto_operation(struct program *program, uint32_t optype,
+ uint32_t protid, uint16_t protoinfo)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ uint32_t optype_tmp = optype;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ for (i = 0; i < proto_table_sz[rta_sec_era]; i++) {
+ /* clear last bit in optype to match also decap proto */
+ optype_tmp &= (uint32_t)~(1 << OP_TYPE_SHIFT);
+ if (optype_tmp == proto_table[i].optype) {
+ if (proto_table[i].protid == protid) {
+ /* nothing else to verify */
+ if (proto_table[i].protoinfo_func == NULL) {
+ found = 1;
+ break;
+ }
+ /* check protoinfo */
+ ret = (*proto_table[i].protoinfo_func)
+ (protoinfo);
+ if (ret < 0) {
+ pr_err("PROTO_DESC: Bad PROTO Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ pr_err("PROTO_DESC: Operation Type Mismatch. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ __rta_out32(program, opcode | optype | protid | protoinfo);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_dkp_proto(struct program *program, uint32_t protid,
+ uint16_t key_src, uint16_t key_dst,
+ uint16_t keylen, uint64_t key,
+ enum rta_data_type key_type)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int in_words = 0, out_words = 0;
+ int ret;
+
+ key_src &= OP_PCL_DKP_SRC_MASK;
+ key_dst &= OP_PCL_DKP_DST_MASK;
+ keylen &= OP_PCL_DKP_KEY_MASK;
+
+ ret = rta_proto_operation(program, OP_TYPE_UNI_PROTOCOL, protid,
+ key_src | key_dst | keylen);
+ if (ret < 0)
+ return ret;
+
+ if ((key_src == OP_PCL_DKP_SRC_PTR) ||
+ (key_src == OP_PCL_DKP_SRC_SGF)) {
+ __rta_out64(program, program->ps, key);
+ in_words = program->ps ? 2 : 1;
+ } else if (key_src == OP_PCL_DKP_SRC_IMM) {
+ __rta_inline_data(program, key, inline_flags(key_type), keylen);
+ in_words = (unsigned int)((keylen + 3) / 4);
+ }
+
+ if ((key_dst == OP_PCL_DKP_DST_PTR) ||
+ (key_dst == OP_PCL_DKP_DST_SGF)) {
+ out_words = in_words;
+ } else if (key_dst == OP_PCL_DKP_DST_IMM) {
+ out_words = split_key_len(protid) / 4;
+ }
+
+ if (out_words < in_words) {
+ pr_err("PROTO_DESC: DKP doesn't currently support a smaller descriptor\n");
+ program->first_error_pc = start_pc;
+ return -EINVAL;
+ }
+
+ /* If needed, reserve space in resulting descriptor for derived key */
+ program->current_pc += (out_words - in_words);
+
+ return (int)start_pc;
+}
+
+#endif /* __RTA_PROTOCOL_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
new file mode 100644
index 00000000..c12edb0c
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -0,0 +1,823 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SEC_RUN_TIME_ASM_H__
+#define __RTA_SEC_RUN_TIME_ASM_H__
+
+#include "hw/desc.h"
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/**
+ * enum rta_sec_era - SEC HW block revisions supported by the RTA library
+ * @RTA_SEC_ERA_1: SEC Era 1
+ * @RTA_SEC_ERA_2: SEC Era 2
+ * @RTA_SEC_ERA_3: SEC Era 3
+ * @RTA_SEC_ERA_4: SEC Era 4
+ * @RTA_SEC_ERA_5: SEC Era 5
+ * @RTA_SEC_ERA_6: SEC Era 6
+ * @RTA_SEC_ERA_7: SEC Era 7
+ * @RTA_SEC_ERA_8: SEC Era 8
+ * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
+ */
+enum rta_sec_era {
+ RTA_SEC_ERA_1,
+ RTA_SEC_ERA_2,
+ RTA_SEC_ERA_3,
+ RTA_SEC_ERA_4,
+ RTA_SEC_ERA_5,
+ RTA_SEC_ERA_6,
+ RTA_SEC_ERA_7,
+ RTA_SEC_ERA_8,
+ MAX_SEC_ERA = RTA_SEC_ERA_8
+};
+
+/**
+ * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
+ * an unsupported value.
+ */
+#define DEFAULT_SEC_ERA MAX_SEC_ERA
+
+/**
+ * USER_SEC_ERA - translates the SEC Era from internal to user representation.
+ * @sec_era: SEC Era in internal (library) representation
+ */
+#define USER_SEC_ERA(sec_era) (sec_era + 1)
+
+/**
+ * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
+ * @sec_era: SEC Era in user representation
+ */
+#define INTL_SEC_ERA(sec_era) (sec_era - 1)
+
+/**
+ * enum rta_jump_type - Types of action taken by JUMP command
+ * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
+ * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
+ * indicated by the POINTER field after the JUMP command.
+ * @HALT: conditional halt - stop the execution of the current descriptor and
+ * writes PKHA / Math condition bits as status / error code.
+ * @HALT_STATUS: conditional halt with user-specified status - stop the
+ * execution of the current descriptor and writes the value of
+ * "LOCAL OFFSET" JUMP field as status / error code.
+ * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
+ * return address in the Return Address register; subroutine calls
+ * cannot be nested.
+ * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
+ * offset is taken from the Return Address register.
+ * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ */
+enum rta_jump_type {
+ LOCAL_JUMP,
+ FAR_JUMP,
+ HALT,
+ HALT_STATUS,
+ GOSUB,
+ RETURN,
+ LOCAL_JUMP_INC,
+ LOCAL_JUMP_DEC
+};
+
+/**
+ * enum rta_jump_cond - How test conditions are evaluated by JUMP command
+ * @ALL_TRUE: perform action if ALL selected conditions are true
+ * @ALL_FALSE: perform action if ALL selected conditions are false
+ * @ANY_TRUE: perform action if ANY of the selected conditions is true
+ * @ANY_FALSE: perform action if ANY of the selected conditions is false
+ */
+enum rta_jump_cond {
+ ALL_TRUE,
+ ALL_FALSE,
+ ANY_TRUE,
+ ANY_FALSE
+};
+
+/**
+ * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
+ * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
+ * dependencies are allowed between them).
+ * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
+ * "OK to share" in DECO Control Register (DCTRL).
+ * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
+ * completed.
+ * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
+ * loaded.
+ * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
+ * in the shared descriptor associated with the job descriptor.
+ */
+enum rta_share_type {
+ SHR_NEVER,
+ SHR_WAIT,
+ SHR_SERIAL,
+ SHR_ALWAYS,
+ SHR_DEFER
+};
+
+/**
+ * enum rta_data_type - Indicates how is the data provided and how to include it
+ * in the descriptor.
+ * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
+ * physical (bus) address.
+ * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
+ * data address is a virtual address.
+ * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
+ * immediate data; data address is a physical (bus) address
+ * in external memory and CDMA is programmed to transfer the
+ * data into descriptor buffer being built in Workspace Area.
+ */
+enum rta_data_type {
+ RTA_DATA_PTR = 1,
+ RTA_DATA_IMM,
+ RTA_DATA_IMM_DMA
+};
+
+/* Registers definitions */
+enum rta_regs {
+ /* CCB Registers */
+ CONTEXT1 = 1,
+ CONTEXT2,
+ KEY1,
+ KEY2,
+ KEY1SZ,
+ KEY2SZ,
+ ICV1SZ,
+ ICV2SZ,
+ DATA1SZ,
+ DATA2SZ,
+ ALTDS1,
+ IV1SZ,
+ AAD1SZ,
+ MODE1,
+ MODE2,
+ CCTRL,
+ DCTRL,
+ ICTRL,
+ CLRW,
+ CSTAT,
+ IFIFO,
+ NFIFO,
+ OFIFO,
+ PKASZ,
+ PKBSZ,
+ PKNSZ,
+ PKESZ,
+ /* DECO Registers */
+ MATH0,
+ MATH1,
+ MATH2,
+ MATH3,
+ DESCBUF,
+ JOBDESCBUF,
+ SHAREDESCBUF,
+ DPOVRD,
+ DJQDA,
+ DSTAT,
+ DPID,
+ DJQCTRL,
+ ALTSOURCE,
+ SEQINSZ,
+ SEQOUTSZ,
+ VSEQINSZ,
+ VSEQOUTSZ,
+ /* PKHA Registers */
+ PKA,
+ PKN,
+ PKA0,
+ PKA1,
+ PKA2,
+ PKA3,
+ PKB,
+ PKB0,
+ PKB1,
+ PKB2,
+ PKB3,
+ PKE,
+ /* Pseudo registers */
+ AB1,
+ AB2,
+ ABD,
+ IFIFOABD,
+ IFIFOAB1,
+ IFIFOAB2,
+ AFHA_SBOX,
+ MDHA_SPLIT_KEY,
+ JOBSRC,
+ ZERO,
+ ONE,
+ AAD1,
+ IV1,
+ IV2,
+ MSG1,
+ MSG2,
+ MSG,
+ MSG_CKSUM,
+ MSGOUTSNOOP,
+ MSGINSNOOP,
+ ICV1,
+ ICV2,
+ SKIP,
+ NONE,
+ RNGOFIFO,
+ RNG,
+ IDFNS,
+ ODFNS,
+ NFIFOSZ,
+ SZ,
+ PAD,
+ SAD1,
+ AAD2,
+ BIT_DATA,
+ NFIFO_SZL,
+ NFIFO_SZM,
+ NFIFO_L,
+ NFIFO_M,
+ SZL,
+ SZM,
+ JOBDESCBUF_EFF,
+ SHAREDESCBUF_EFF,
+ METADATA,
+ GTR,
+ STR,
+ OFIFO_SYNC,
+ MSGOUTSNOOP_ALT
+};
+
+/* Command flags */
+#define FLUSH1 BIT(0)
+#define LAST1 BIT(1)
+#define LAST2 BIT(2)
+#define IMMED BIT(3)
+#define SGF BIT(4)
+#define VLF BIT(5)
+#define EXT BIT(6)
+#define CONT BIT(7)
+#define SEQ BIT(8)
+#define AIDF BIT(9)
+#define FLUSH2 BIT(10)
+#define CLASS1 BIT(11)
+#define CLASS2 BIT(12)
+#define BOTH BIT(13)
+
+/**
+ * DCOPY - (AIOP only) command param is pointer to external memory
+ *
+ * CDMA must be used to transfer the key via DMA into Workspace Area.
+ * Valid only in combination with IMMED flag.
+ */
+#define DCOPY BIT(30)
+
+#define COPY BIT(31) /* command param is pointer (not immediate)
+ * valid only in combination when IMMED
+ */
+
+#define __COPY_MASK (COPY | DCOPY)
+
+/* SEQ IN/OUT PTR Command specific flags */
+#define RBS BIT(16)
+#define INL BIT(17)
+#define PRE BIT(18)
+#define RTO BIT(19)
+#define RJD BIT(20)
+#define SOP BIT(21)
+#define RST BIT(22)
+#define EWS BIT(23)
+
+#define ENC BIT(14) /* Encrypted Key */
+#define EKT BIT(15) /* AES CCM Encryption (default is
+ * AES ECB Encryption)
+ */
+#define TK BIT(16) /* Trusted Descriptor Key (default is
+ * Job Descriptor Key)
+ */
+#define NWB BIT(17) /* No Write Back Key */
+#define PTS BIT(18) /* Plaintext Store */
+
+/* HEADER Command specific flags */
+#define RIF BIT(16)
+#define DNR BIT(17)
+#define CIF BIT(18)
+#define PD BIT(19)
+#define RSMS BIT(20)
+#define TD BIT(21)
+#define MTD BIT(22)
+#define REO BIT(23)
+#define SHR BIT(24)
+#define SC BIT(25)
+/* Extended HEADER specific flags */
+#define DSV BIT(7)
+#define DSEL_MASK 0x00000007 /* DECO Select */
+#define FTD BIT(8)
+
+/* JUMP Command specific flags */
+#define NIFP BIT(20)
+#define NIP BIT(21)
+#define NOP BIT(22)
+#define NCP BIT(23)
+#define CALM BIT(24)
+
+#define MATH_Z BIT(25)
+#define MATH_N BIT(26)
+#define MATH_NV BIT(27)
+#define MATH_C BIT(28)
+#define PK_0 BIT(29)
+#define PK_GCD_1 BIT(30)
+#define PK_PRIME BIT(31)
+#define SELF BIT(0)
+#define SHRD BIT(1)
+#define JQP BIT(2)
+
+/* NFIFOADD specific flags */
+#define PAD_ZERO BIT(16)
+#define PAD_NONZERO BIT(17)
+#define PAD_INCREMENT BIT(18)
+#define PAD_RANDOM BIT(19)
+#define PAD_ZERO_N1 BIT(20)
+#define PAD_NONZERO_0 BIT(21)
+#define PAD_N1 BIT(23)
+#define PAD_NONZERO_N BIT(24)
+#define OC BIT(25)
+#define BM BIT(26)
+#define PR BIT(27)
+#define PS BIT(28)
+#define BP BIT(29)
+
+/* MOVE Command specific flags */
+#define WAITCOMP BIT(16)
+#define SIZE_WORD BIT(17)
+#define SIZE_BYTE BIT(18)
+#define SIZE_DWORD BIT(19)
+
+/* MATH command specific flags */
+#define IFB MATH_IFB
+#define NFU MATH_NFU
+#define STL MATH_STL
+#define SSEL MATH_SSEL
+#define SWP MATH_SWP
+#define IMMED2 BIT(31)
+
+/**
+ * struct program - descriptor buffer management structure
+ * @current_pc: current offset in descriptor
+ * @current_instruction: current instruction in descriptor
+ * @first_error_pc: offset of the first error in descriptor
+ * @start_pc: start offset in descriptor buffer
+ * @buffer: buffer carrying descriptor
+ * @shrhdr: shared descriptor header
+ * @jobhdr: job descriptor header
+ * @ps: pointer fields size; if ps is true, pointers will be 36bits in
+ * length; if ps is false, pointers will be 32bits in length
+ * @bswap: if true, perform byte swap on a 4-byte boundary
+ */
+struct program {
+ unsigned int current_pc;
+ unsigned int current_instruction;
+ unsigned int first_error_pc;
+ unsigned int start_pc;
+ uint32_t *buffer;
+ uint32_t *shrhdr;
+ uint32_t *jobhdr;
+ bool ps;
+ bool bswap;
+};
+
+static inline void
+rta_program_cntxt_init(struct program *program,
+ uint32_t *buffer, unsigned int offset)
+{
+ program->current_pc = 0;
+ program->current_instruction = 0;
+ program->first_error_pc = 0;
+ program->start_pc = offset;
+ program->buffer = buffer;
+ program->shrhdr = NULL;
+ program->jobhdr = NULL;
+ program->ps = false;
+ program->bswap = false;
+}
+
+static inline int
+rta_program_finalize(struct program *program)
+{
+ /* Descriptor is usually not allowed to go beyond 64 words size */
+ if (program->current_pc > MAX_CAAM_DESCSIZE)
+ pr_warn("Descriptor Size exceeded max limit of 64 words\n");
+
+ /* Descriptor is erroneous */
+ if (program->first_error_pc) {
+ pr_err("Descriptor creation error\n");
+ return -EINVAL;
+ }
+
+ /* Update descriptor length in shared and job descriptor headers */
+ if (program->shrhdr != NULL)
+ *program->shrhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+ else if (program->jobhdr != NULL)
+ *program->jobhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+
+ return (int)program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_36bit_addr(struct program *program)
+{
+ program->ps = true;
+ return program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_bswap(struct program *program)
+{
+ program->bswap = true;
+ return program->current_pc;
+}
+
+static inline void
+__rta_out32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = program->bswap ?
+ swab32(val) : val;
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_be32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_be32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_le32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_le32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out64(struct program *program, bool is_ext, uint64_t val)
+{
+ if (is_ext) {
+ /*
+ * Since we are guaranteed only a 4-byte alignment in the
+ * descriptor buffer, we have to do 2 x 32-bit (word) writes.
+ * For the order of the 2 words to be correct, we need to
+ * take into account the endianness of the CPU.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+#else
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+#endif
+ } else {
+ __rta_out32(program, lower_32_bits(val));
+ }
+}
+
+static inline unsigned int
+rta_word(struct program *program, uint32_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, val);
+
+ return start_pc;
+}
+
+static inline unsigned int
+rta_dword(struct program *program, uint64_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out64(program, true, val);
+
+ return start_pc;
+}
+
+static inline uint32_t
+inline_flags(enum rta_data_type data_type)
+{
+ switch (data_type) {
+ case RTA_DATA_PTR:
+ return 0;
+ case RTA_DATA_IMM:
+ return IMMED | COPY;
+ case RTA_DATA_IMM_DMA:
+ return IMMED | DCOPY;
+ default:
+ /* warn and default to RTA_DATA_PTR */
+ pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
+ return 0;
+ }
+}
+
+static inline unsigned int
+rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
+{
+ unsigned int i;
+ unsigned int start_pc = program->current_pc;
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+
+ for (i = 0; i < length; i++)
+ *tmp++ = data[i];
+ program->current_pc += (length + 3) / 4;
+
+ return start_pc;
+}
+
+#if defined(__EWL__) && defined(AIOP)
+static inline void
+__rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
+{ cdma_read(ws_dst, ext_address, size); }
+#else
+static inline void
+__rta_dma_data(void *ws_dst __maybe_unused,
+ uint64_t ext_address __maybe_unused,
+ uint16_t size __maybe_unused)
+{ pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
+#endif /* defined(__EWL__) && defined(AIOP) */
+
+static inline void
+__rta_inline_data(struct program *program, uint64_t data,
+ uint32_t copy_data, uint32_t length)
+{
+ if (!copy_data) {
+ __rta_out64(program, length > 4, data);
+ } else if (copy_data & COPY) {
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
+ program->current_pc += ((length + 3) / 4);
+ } else if (copy_data & DCOPY) {
+ __rta_dma_data(&program->buffer[program->current_pc], data,
+ (uint16_t)length);
+ program->current_pc += ((length + 3) / 4);
+ }
+}
+
+static inline unsigned int
+rta_desc_len(uint32_t *buffer)
+{
+ if ((*buffer & CMD_MASK) == CMD_DESC_HDR)
+ return *buffer & HDR_DESCLEN_MASK;
+ else
+ return *buffer & HDR_DESCLEN_SHR_MASK;
+}
+
+static inline unsigned int
+rta_desc_bytes(uint32_t *buffer)
+{
+ return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
+}
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
+ * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline uint32_t
+split_key_len(uint32_t hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ uint32_t idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (uint32_t)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline uint32_t
+split_key_pad_len(uint32_t hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
+static inline unsigned int
+rta_set_label(struct program *program)
+{
+ return program->current_pc + program->start_pc;
+}
+
+static inline int
+rta_patch_move(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~MOVE_OFFSET_MASK;
+ opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~JUMP_OFFSET_MASK;
+ opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_header(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~HDR_START_IDX_MASK;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_load(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = (bswap ? swab32(program->buffer[line]) :
+ program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
+
+ if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
+ opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ else
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_store(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~LDST_OFFSET_MASK;
+
+ switch (opcode & LDST_SRCDST_MASK) {
+ case LDST_SRCDST_WORD_DESCBUF:
+ case LDST_SRCDST_WORD_DESCBUF_JOB:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED:
+ case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
+ opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ break;
+ default:
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_raw(struct program *program, int line, unsigned int mask,
+ unsigned int new_val)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~mask;
+ opcode |= new_val & mask;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+__rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
+ unsigned int num_of_entries, uint32_t *val)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++)
+ if (map_table[i][0] == name) {
+ *val = map_table[i][1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void
+__rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
+ unsigned int num_of_entries, uint32_t *opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++) {
+ if (flags_table[i][0] & flags)
+ *opcode |= flags_table[i][1];
+ }
+}
+
+#endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
new file mode 100644
index 00000000..8d421a5d
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -0,0 +1,208 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
+#define __RTA_SEQ_IN_OUT_PTR_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed SEQ IN PTR flags for each SEC Era. */
+static const uint32_t seq_in_ptr_flags[] = {
+ RBS | INL | SGF | PRE | EXT | RTO,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP
+};
+
+/* Allowed SEQ OUT PTR flags for each SEC Era. */
+static const uint32_t seq_out_ptr_flags[] = {
+ SGF | PRE | EXT,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS
+};
+
+static inline int
+rta_seq_in_ptr(struct program *program, uint64_t src,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_IN_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if (flags & ~seq_in_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ IN PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & INL) && (flags & RJD)) {
+ pr_err("SEQ IN PTR: Invalid usage of INL and RJD flags\n");
+ goto err;
+ }
+ if ((src) && (flags & (SOP | RTO | PRE))) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & SOP) && (flags & (RBS | PRE | RTO | EXT))) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and (RBS or PRE or RTO or EXT) flags\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & RBS)
+ opcode |= SQIN_RBS;
+ if (flags & INL)
+ opcode |= SQIN_INL;
+ if (flags & SGF)
+ opcode |= SQIN_SGF;
+ if (flags & PRE)
+ opcode |= SQIN_PRE;
+ if (flags & RTO)
+ opcode |= SQIN_RTO;
+ if (flags & RJD)
+ opcode |= SQIN_RJD;
+ if (flags & SOP)
+ opcode |= SQIN_SOP;
+ if ((length >> 16) || (flags & EXT)) {
+ if (flags & SOP) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and EXT flags\n");
+ goto err;
+ }
+
+ opcode |= SQIN_EXT;
+ } else {
+ opcode |= length & SQIN_LEN_MASK;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQIN_PRE | SQIN_RTO | SQIN_SOP)))
+ __rta_out64(program, program->ps, src);
+
+ /* write extended length field */
+ if (opcode & SQIN_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_seq_out_ptr(struct program *program, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_OUT_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if (flags & ~seq_out_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ OUT PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if ((dst) && (flags & (RTO | PRE))) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & RST) && !(flags & RTO)) {
+ pr_err("SEQ OUT PTR: RST flag must be used with RTO flag\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & SGF)
+ opcode |= SQOUT_SGF;
+ if (flags & PRE)
+ opcode |= SQOUT_PRE;
+ if (flags & RTO)
+ opcode |= SQOUT_RTO;
+ if (flags & RST)
+ opcode |= SQOUT_RST;
+ if (flags & EWS)
+ opcode |= SQOUT_EWS;
+ if ((length >> 16) || (flags & EXT))
+ opcode |= SQOUT_EXT;
+ else
+ opcode |= length & SQOUT_LEN_MASK;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQOUT_PRE | SQOUT_RTO)))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & SQOUT_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_SEQ_IN_OUT_PTR_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
new file mode 100644
index 00000000..ac4f3ff8
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -0,0 +1,75 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SIGNATURE_CMD_H__
+#define __RTA_SIGNATURE_CMD_H__
+
+static inline int
+rta_signature(struct program *program, uint32_t sign_type)
+{
+ uint32_t opcode = CMD_SIGNATURE;
+ unsigned int start_pc = program->current_pc;
+
+ switch (sign_type) {
+ case (SIGN_TYPE_FINAL):
+ case (SIGN_TYPE_FINAL_RESTORE):
+ case (SIGN_TYPE_FINAL_NONZERO):
+ case (SIGN_TYPE_IMM_2):
+ case (SIGN_TYPE_IMM_3):
+ case (SIGN_TYPE_IMM_4):
+ opcode |= sign_type;
+ break;
+ default:
+ pr_err("SIGNATURE Command: Invalid type selection\n");
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_SIGNATURE_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
new file mode 100644
index 00000000..8fd01801
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -0,0 +1,185 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_STORE_CMD_H__
+#define __RTA_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t store_src_table[][2] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { DJQDA, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQDAR },
+ { MODE1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { MODE2, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { DJQCTRL, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQCTRL },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DSTAT, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_STAT },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID },
+ { CCTRL, LDST_SRCDST_WORD_CHACTRL },
+ { ICTRL, LDST_SRCDST_WORD_IRQCTRL },
+ { CLRW, LDST_SRCDST_WORD_CLRW },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0 },
+ { CSTAT, LDST_SRCDST_WORD_STAT },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1 },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2 },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF },
+/*30*/ { JOBDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_JOB },
+ { SHAREDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_SHARED },
+/*32*/ { JOBDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_JOB_WE },
+ { SHAREDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_SHARED_WE },
+/*34*/ { GTR, LDST_CLASS_DECO | LDST_SRCDST_WORD_GTR },
+ { STR, LDST_CLASS_DECO | LDST_SRCDST_WORD_STR }
+};
+
+/*
+ * Allowed STORE sources for each SEC ERA.
+ * Values represent the number of entries from source_src_table[] that are
+ * supported.
+ */
+static const unsigned int store_src_table_sz[] = {29, 31, 33, 33,
+ 33, 33, 35, 35};
+
+static inline int
+rta_store(struct program *program, uint64_t src,
+ uint16_t offset, uint64_t dst, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_STORE;
+ else
+ opcode = CMD_STORE;
+
+ /* parameters check */
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ if ((flags & IMMED) && (offset != 0)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((flags & SEQ) && ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) ||
+ (src == SHAREDESCBUF_EFF))) {
+ pr_err("STORE: Invalid SRC type. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (flags & IMMED)
+ opcode |= LDST_IMM;
+
+ if ((flags & SGF) || (flags & VLF))
+ opcode |= LDST_VLF;
+
+ /*
+ * source for data to be stored can be specified as:
+ * - register location; set in src field[9-15];
+ * - if IMMED flag is set, data is set in value field [0-31];
+ * user can give this value as actual value or pointer to data
+ */
+ if (!(flags & IMMED)) {
+ ret = __rta_map_opcode((uint32_t)src, store_src_table,
+ store_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("STORE: Invalid source. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if ((src == DESCBUF) || (src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF)) {
+ opcode |= (length >> 2);
+ opcode |= (uint32_t)((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (uint32_t)(offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF))
+ return (int)start_pc;
+
+ /* for STORE, a pointer to where the data will be stored if needed */
+ if (!(flags & SEQ))
+ __rta_out64(program, program->ps, dst);
+
+ /* for IMMED data, place the data here */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_STORE_CMD_H__ */
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
new file mode 100644
index 00000000..6b3411f0
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -0,0 +1,558 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpseci.h>
+#include <fsl_dpseci_cmd.h>
+
+int
+dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPSECI_CMD_OPEN(cmd, dpseci_id);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int
+dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPSECI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int
+dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int
+dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ int *type,
+ struct dpseci_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
+
+ return 0;
+}
+
+int
+dpseci_set_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ struct dpseci_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
+
+ return 0;
+}
+
+int
+dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
+
+ return 0;
+}
+
+int
+dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
+
+ return 0;
+}
+
+int
+dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
+
+ return 0;
+}
+
+int
+dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPSECI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
new file mode 100644
index 00000000..0fb47f73
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -0,0 +1,746 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_DPSECI_H
+#define __FSL_DPSECI_H
+
+/* Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPSECI object
+ */
+#define DPSECI_PRIO_NUM 8
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (uint8_t)(-1)
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param dpseci_id DPSECI unique ID
+ * @param token Returned token; use in subsequent API calls
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ */
+struct dpseci_cfg {
+ uint8_t num_tx_queues; /* num of queues towards the SEC */
+ uint8_t num_rx_queues; /* num of queues back from the SEC */
+ uint8_t priorities[DPSECI_PRIO_NUM];
+ /**< Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
+ */
+};
+
+/**
+ * dpseci_create() - Create the DPSECI object
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param dprc_token Parent container token; '0' for default container
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param cfg Configuration structure
+ * @param obj_id returned object id
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param dprc_token Parent container token; '0' for default container
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param object_id The object id; it must be a valid id within the
+ * container that created this object;
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param en Returns '1' if object is enabled; '0' otherwise
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_irq_cfg - IRQ configuration
+ */
+struct dpseci_irq_cfg {
+ uint64_t addr;
+ /* Address that must be written to signal a message-based interrupt */
+ uint32_t val;
+ /* Value to write into irq_addr address */
+ int irq_num;
+ /* A user defined number associated with this IRQ */
+};
+
+/**
+ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index Identifies the interrupt index to configure
+ * @param irq_cfg IRQ configuration
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ struct dpseci_irq_cfg *irq_cfg);
+
+/**
+ * dpseci_get_irq() - Get IRQ information from the DPSECI
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param type Interrupt type: 0 represents message interrupt
+ * type (both irq_addr and irq_val are valid)
+ * @param irq_cfg IRQ attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ int *type,
+ struct dpseci_irq_cfg *irq_cfg);
+
+/**
+ * dpseci_set_irq_enable() - Set overall interrupt state.
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param en Interrupt state - enable = 1, disable = 0
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en);
+
+/**
+ * dpseci_get_irq_enable() - Get overall interrupt state
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param en Returned Interrupt state - enable = 1,
+ * disable = 0
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en);
+
+/**
+ * dpseci_set_irq_mask() - Set interrupt mask.
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param mask event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask);
+
+/**
+ * dpseci_get_irq_mask() - Get interrupt mask.
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param mask Returned event mask to trigger interrupt
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask);
+
+/**
+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param status Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status);
+
+/**
+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param status bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @param id: DPSECI object ID
+ * @param num_tx_queues: number of queues towards the SEC
+ * @param num_rx_queues: number of queues back from the SEC
+ */
+struct dpseci_attr {
+ int id; /* DPSECI object ID */
+ uint8_t num_tx_queues; /* number of queues towards the SEC */
+ uint8_t num_rx_queues; /* number of queues back from the SEC */
+};
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param attr Returned object's attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO = 1,
+ DPSECI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type; /* Destination type */
+ int dest_id;
+ /* Either DPIO ID or DPCON ID, depending on the destination type */
+ uint8_t priority;
+ /* Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
+ */
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ */
+struct dpseci_rx_queue_cfg {
+ uint32_t options;
+ /* Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ */
+ int order_preservation_en;
+ /* order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in
+ * 'options'
+ */
+ uint64_t user_ctx;
+ /* User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ */
+ struct dpseci_dest_cfg dest_cfg;
+ /* Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+};
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @param cfg Rx queue configuration
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ */
+struct dpseci_rx_queue_attr {
+ uint64_t user_ctx;
+ /* User context value provided in the frame descriptor of
+ * each dequeued frame
+ */
+ int order_preservation_en;
+ /* Status of the order preservation configuration on the queue */
+ struct dpseci_dest_cfg dest_cfg;
+ /* Queue destination configuration */
+ uint32_t fqid;
+ /* Virtual FQID value to be used for dequeue operations */
+};
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @param attr Returned Rx queue attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ */
+struct dpseci_tx_queue_attr {
+ uint32_t fqid;
+ /* Virtual FQID to be used for sending frames to SEC hardware */
+ uint8_t priority;
+ /* SEC hardware processing priority for the queue */
+};
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @param attr Returned Tx queue attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ */
+
+struct dpseci_sec_attr {
+ uint16_t ip_id; /* ID for SEC */
+ uint8_t major_rev; /* Major revision number for SEC */
+ uint8_t minor_rev; /* Minor revision number for SEC */
+ uint8_t era; /* SEC Era */
+ uint8_t deco_num;
+ /* The number of copies of the DECO that are implemented in
+ * this version of SEC
+ */
+ uint8_t zuc_auth_acc_num;
+ /* The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ */
+ uint8_t zuc_enc_acc_num;
+ /* The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ */
+ uint8_t snow_f8_acc_num;
+ /* The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ */
+ uint8_t snow_f9_acc_num;
+ /* The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ */
+ uint8_t crc_acc_num;
+ /* The number of copies of the CRC module that are implemented
+ * in this version of SEC
+ */
+ uint8_t pk_acc_num;
+ /* The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ */
+ uint8_t kasumi_acc_num;
+ /* The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ */
+ uint8_t rng_acc_num;
+ /* The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ */
+ uint8_t md_acc_num;
+ /* The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ */
+ uint8_t arc4_acc_num;
+ /* The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ */
+ uint8_t des_acc_num;
+ /* The number of copies of the DES module that are implemented
+ * in this version of SEC
+ */
+ uint8_t aes_acc_num;
+ /* The number of copies of the AES module that are implemented
+ * in this version of SEC
+ */
+};
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param attr Returned SEC attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
+
+/**
+ * struct dpseci_sec_counters - Structure representing global SEC counters and
+ * not per dpseci counters
+ */
+struct dpseci_sec_counters {
+ uint64_t dequeued_requests; /* Number of Requests Dequeued */
+ uint64_t ob_enc_requests; /* Number of Outbound Encrypt Requests */
+ uint64_t ib_dec_requests; /* Number of Inbound Decrypt Requests */
+ uint64_t ob_enc_bytes; /* Number of Outbound Bytes Encrypted */
+ uint64_t ob_prot_bytes; /* Number of Outbound Bytes Protected */
+ uint64_t ib_dec_bytes; /* Number of Inbound Bytes Decrypted */
+ uint64_t ib_valid_bytes; /* Number of Inbound Bytes Validated */
+};
+
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param counters Returned SEC counters
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param major_ver Major version of data path sec API
+ * @param minor_ver Minor version of data path sec API
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPSECI_H */
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
new file mode 100644
index 00000000..1fb18cc3
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -0,0 +1,256 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_DPSECI_CMD_H
+#define _FSL_DPSECI_CMD_H
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 0
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPSECI_CMDID_OPEN ((0x809 << 4) | (0x1))
+#define DPSECI_CMDID_CREATE ((0x909 << 4) | (0x1))
+#define DPSECI_CMDID_DESTROY ((0x989 << 4) | (0x1))
+#define DPSECI_CMDID_GET_API_VERSION ((0xa09 << 4) | (0x1))
+
+#define DPSECI_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPSECI_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPSECI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPSECI_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPSECI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPSECI_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
+#define DPSECI_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
+#define DPSECI_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
+#define DPSECI_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
+
+#define DPSECI_CMDID_SET_RX_QUEUE ((0x194 << 4) | (0x1))
+#define DPSECI_CMDID_GET_RX_QUEUE ((0x196 << 4) | (0x1))
+#define DPSECI_CMDID_GET_TX_QUEUE ((0x197 << 4) | (0x1))
+#define DPSECI_CMDID_GET_SEC_ATTR ((0x198 << 4) | (0x1))
+#define DPSECI_CMDID_GET_SEC_COUNTERS ((0x199 << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
+ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
+ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
+ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
+ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
+ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPSECI_CMD_H */
diff --git a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile
index 9fb0be85..b47cda0c 100644
--- a/drivers/crypto/kasumi/Makefile
+++ b/drivers/crypto/kasumi/Makefile
@@ -59,11 +59,4 @@ LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index c22128d4..9da9e897 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -47,27 +47,6 @@
#define KASUMI_MAX_BURST 4
#define BYTE_LEN 8
-/**
- * Global static parameter used to create a unique name for each KASUMI
- * crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_KASUMI_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
/** Get xform chain order. */
static enum kasumi_operation
kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -380,7 +359,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
}
enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;
@@ -431,7 +410,7 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
}
enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
- processed_op);
+ processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;
@@ -455,6 +434,19 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
+ (curr_c_op->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
/* Set status as enqueued (not processed yet) by default. */
curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
@@ -550,23 +542,27 @@ kasumi_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
-static int cryptodev_kasumi_remove(const char *name);
+static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
static int
cryptodev_kasumi_create(const char *name,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct kasumi_private *internals;
uint64_t cpu_flags = 0;
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
@@ -577,14 +573,7 @@ cryptodev_kasumi_create(const char *name,
return -EFAULT;
}
- /* Create a unique device name. */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- KASUMI_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct kasumi_private), init_params->socket_id);
if (dev == NULL) {
KASUMI_LOG_ERR("failed to create cryptodev vdev");
@@ -609,37 +598,51 @@ cryptodev_kasumi_create(const char *name,
return 0;
init_error:
- KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name);
+ KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed",
+ init_params->name);
- cryptodev_kasumi_remove(crypto_dev_name);
+ cryptodev_kasumi_remove(vdev);
return -EFAULT;
}
static int
-cryptodev_kasumi_probe(const char *name,
- const char *input_args)
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_kasumi_create(name, &init_params);
+ return cryptodev_kasumi_create(name, vdev, &init_params);
}
static int
-cryptodev_kasumi_remove(const char *name)
+cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index b9285a43..62ebdbd2 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -89,7 +89,8 @@ static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
/** Configure device */
static int
-kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev)
+kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -201,7 +202,7 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size == ring_size) {
+ if (rte_ring_get_size(r) == ring_size) {
KASUMI_LOG_INFO("Reusing existing ring %s"
" for processed packets",
qp->name);
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
index c143929f..bc2724b3 100644
--- a/drivers/crypto/null/Makefile
+++ b/drivers/crypto/null/Makefile
@@ -51,11 +51,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
# export include files
SYMLINK-y-include +=
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index c69606b3..023450a3 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -38,27 +38,6 @@
#include "null_crypto_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_NULL_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-
/** verify and set session parameters */
int
null_crypto_set_session_parameters(
@@ -176,7 +155,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
@@ -190,17 +169,13 @@ cryptodev_null_create(const char *name,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct null_crypto_private *internals;
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- NULL_CRYPTO_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct null_crypto_private),
init_params->socket_id);
if (dev == NULL) {
@@ -216,7 +191,8 @@ cryptodev_null_create(const char *name,
dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = dev->data->dev_private;
@@ -226,27 +202,34 @@ cryptodev_null_create(const char *name,
return 0;
init_error:
- NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed", name);
- cryptodev_null_remove(crypto_dev_name);
+ NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed",
+ init_params->name);
+ cryptodev_null_remove(init_params->name);
return -EFAULT;
}
/** Initialise null crypto device */
static int
-cryptodev_null_probe(const char *name,
- const char *input_args)
+cryptodev_null_probe(struct rte_vdev_device *dev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
+ name, init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
@@ -268,9 +251,15 @@ cryptodev_null_remove(const char *name)
return 0;
}
+static int
+cryptodev_null_remove_dev(struct rte_vdev_device *dev)
+{
+ return cryptodev_null_remove(rte_vdev_device_name(dev));
+}
+
static struct rte_vdev_driver cryptodev_null_pmd_drv = {
.probe = cryptodev_null_probe,
- .remove = cryptodev_null_remove
+ .remove = cryptodev_null_remove_dev,
};
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index 26ff6319..12c946c9 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -85,7 +85,8 @@ static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] =
/** Configure device */
static int
-null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev)
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -193,7 +194,7 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
NULL_CRYPTO_LOG_INFO(
"Reusing existing ring %s for processed packets",
qp->name);
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
index 8c4250c8..e5fdfb59 100644
--- a/drivers/crypto/openssl/Makefile
+++ b/drivers/crypto/openssl/Makefile
@@ -50,11 +50,4 @@ LDLIBS += -lcrypto
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 832ea1d0..f0c5ca3c 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -42,31 +42,11 @@
#include "rte_openssl_pmd_private.h"
-static int cryptodev_openssl_remove(const char *name);
+#define DES_BLOCK_SIZE 8
-/*----------------------------------------------------------------------------*/
-
-/**
- * Global static parameter used to create a unique name for each
- * OPENSSL crypto device.
- */
-static unsigned int unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
+static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
- ret = snprintf(name, size, "%s_%u",
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
+/*----------------------------------------------------------------------------*/
/**
* Increment counter by 1
@@ -311,7 +291,21 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
sess->cipher.key.data) != 0)
return -EINVAL;
break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+ sess->cipher.bpi_ctx = EVP_CIPHER_CTX_new();
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt */
+ if (EVP_EncryptInit_ex(sess->cipher.bpi_ctx, EVP_des_ecb(),
+ NULL, xform->cipher.key.data, 0) != 1)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
return -EINVAL;
@@ -429,6 +423,9 @@ openssl_reset_session(struct openssl_session *sess)
{
EVP_CIPHER_CTX_free(sess->cipher.ctx);
+ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
+ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
+
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
EVP_MD_CTX_destroy(sess->auth.auth.ctx);
@@ -484,24 +481,112 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
* Process Operations
*------------------------------------------------------------------------------
*/
+static inline int
+process_openssl_encryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+static inline int
+process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
/** Process standard openssl cipher encryption */
static int
-process_openssl_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, uint8_t *key, int srclen,
+process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
- int dstlen, totlen;
+ int totlen;
if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
goto process_cipher_encrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
- if (EVP_EncryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
goto process_cipher_encrypt_err;
- if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ if (EVP_EncryptFinal_ex(ctx, dst, &totlen) <= 0)
goto process_cipher_encrypt_err;
return 0;
@@ -511,26 +596,48 @@ process_cipher_encrypt_err:
return -EINVAL;
}
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t i;
+ uint8_t encrypted_iv[DES_BLOCK_SIZE];
+ int encrypted_ivlen;
+
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen,
+ iv, DES_BLOCK_SIZE) <= 0)
+ goto process_cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst + i) = *(src + i) ^ (encrypted_iv[i]);
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed");
+ return -EINVAL;
+}
/** Process standard openssl cipher decryption */
static int
-process_openssl_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, uint8_t *key, int srclen,
+process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
- int dstlen, totlen;
+ int totlen;
if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
goto process_cipher_decrypt_err;
- if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
- goto process_cipher_decrypt_err;
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
- if (EVP_DecryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
goto process_cipher_decrypt_err;
- if (EVP_DecryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ if (EVP_DecryptFinal_ex(ctx, dst, &totlen) <= 0)
goto process_cipher_decrypt_err;
-
return 0;
process_cipher_decrypt_err:
@@ -540,11 +647,25 @@ process_cipher_decrypt_err:
/** Process cipher des 3 ctr encryption, decryption algorithm */
static int
-process_openssl_cipher_des3ctr(uint8_t *src, uint8_t *dst,
- uint8_t *iv, uint8_t *key, int srclen, EVP_CIPHER_CTX *ctx)
+process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx)
{
uint8_t ebuf[8], ctr[8];
int unused, n;
+ struct rte_mbuf *m;
+ uint8_t *src;
+ int l;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_cipher_des3ctr_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ l = rte_pktmbuf_data_len(m) - offset;
/* We use 3DES encryption also for decryption.
* IV is not important for 3DES ecb
@@ -553,9 +674,8 @@ process_openssl_cipher_des3ctr(uint8_t *src, uint8_t *dst,
goto process_cipher_des3ctr_err;
memcpy(ctr, iv, 8);
- n = 0;
- while (n < srclen) {
+ for (n = 0; n < srclen; n++) {
if (n % 8 == 0) {
if (EVP_EncryptUpdate(ctx,
(unsigned char *)&ebuf, &unused,
@@ -563,8 +683,16 @@ process_openssl_cipher_des3ctr(uint8_t *src, uint8_t *dst,
goto process_cipher_des3ctr_err;
ctr_inc(ctr);
}
- dst[n] = src[n] ^ ebuf[n % 8];
- n++;
+ dst[n] = *(src++) ^ ebuf[n % 8];
+
+ l--;
+ if (!l) {
+ m = m->next;
+ if (m) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m);
+ }
+ }
}
return 0;
@@ -576,9 +704,9 @@ process_cipher_des3ctr_err:
/** Process auth/encription aes-gcm algorithm */
static int
-process_openssl_auth_encryption_gcm(uint8_t *src, int srclen,
- uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag,
+process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
{
int len = 0, unused = 0;
@@ -593,20 +721,20 @@ process_openssl_auth_encryption_gcm(uint8_t *src, int srclen,
if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
goto process_auth_encryption_gcm_err;
- if (aadlen > 0) {
+ if (aadlen > 0)
if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
goto process_auth_encryption_gcm_err;
- /* Workaround open ssl bug in version less then 1.0.1f */
- if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
- goto process_auth_encryption_gcm_err;
- }
-
if (srclen > 0)
- if (EVP_EncryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
goto process_auth_encryption_gcm_err;
- if (EVP_EncryptFinal_ex(ctx, dst + len, &len) <= 0)
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
goto process_auth_encryption_gcm_err;
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
@@ -620,10 +748,10 @@ process_auth_encryption_gcm_err:
}
static int
-process_openssl_auth_decryption_gcm(uint8_t *src, int srclen,
- uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx,
+ const EVP_CIPHER *algo)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
@@ -640,20 +768,20 @@ process_openssl_auth_decryption_gcm(uint8_t *src, int srclen,
if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
goto process_auth_decryption_gcm_err;
- if (aadlen > 0) {
+ if (aadlen > 0)
if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
goto process_auth_decryption_gcm_err;
- /* Workaround open ssl bug in version less then 1.0.1f */
- if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
- goto process_auth_decryption_gcm_err;
- }
-
if (srclen > 0)
- if (EVP_DecryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
goto process_auth_decryption_gcm_err;
- if (EVP_DecryptFinal_ex(ctx, dst + len, &len) <= 0)
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
goto process_auth_decryption_gcm_final_err;
return 0;
@@ -668,21 +796,50 @@ process_auth_decryption_gcm_final_err:
/** Process standard openssl auth algorithms */
static int
-process_openssl_auth(uint8_t *src, uint8_t *dst,
+process_openssl_auth(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
__rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
{
size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
goto process_auth_err;
- if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
goto process_auth_err;
-
return 0;
process_auth_err:
@@ -692,18 +849,48 @@ process_auth_err:
/** Process standard openssl auth algorithms with hmac */
static int
-process_openssl_auth_hmac(uint8_t *src, uint8_t *dst,
+process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
__rte_unused uint8_t *iv, EVP_PKEY *pkey,
- int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
{
size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
goto process_auth_err;
- if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
goto process_auth_err;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
goto process_auth_err;
@@ -723,9 +910,18 @@ process_openssl_combined_op
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
/* cipher */
- uint8_t *src = NULL, *dst = NULL, *iv, *tag, *aad;
+ uint8_t *dst = NULL, *iv, *tag, *aad;
int srclen, ivlen, aadlen, status = -1;
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
iv = op->sym->cipher.iv.data;
ivlen = op->sym->cipher.iv.length;
aad = op->sym->auth.aad.data;
@@ -741,22 +937,22 @@ process_openssl_combined_op
srclen = 0;
else {
srclen = op->sym->cipher.data.length;
- src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
- op->sym->cipher.data.offset);
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
}
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_auth_encryption_gcm(
- src, srclen, aad, aadlen, iv, ivlen,
- sess->cipher.key.data, dst, tag,
- sess->cipher.ctx, sess->cipher.evp_algo);
+ mbuf_src, op->sym->cipher.data.offset, srclen,
+ aad, aadlen, iv, ivlen, sess->cipher.key.data,
+ dst, tag, sess->cipher.ctx,
+ sess->cipher.evp_algo);
else
status = process_openssl_auth_decryption_gcm(
- src, srclen, aad, aadlen, iv, ivlen,
- sess->cipher.key.data, dst, tag,
- sess->cipher.ctx, sess->cipher.evp_algo);
+ mbuf_src, op->sym->cipher.data.offset, srclen,
+ aad, aadlen, iv, ivlen, sess->cipher.key.data,
+ dst, tag, sess->cipher.ctx,
+ sess->cipher.evp_algo);
if (status != 0) {
if (status == (-EFAULT) &&
@@ -774,12 +970,19 @@ process_openssl_cipher_op
(struct rte_crypto_op *op, struct openssl_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
- uint8_t *src, *dst, *iv;
+ uint8_t *dst, *iv;
int srclen, status;
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
srclen = op->sym->cipher.data.length;
- src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
- op->sym->cipher.data.offset);
dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
op->sym->cipher.data.offset);
@@ -787,17 +990,20 @@ process_openssl_cipher_op
if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- status = process_openssl_cipher_encrypt(src, dst, iv,
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx,
sess->cipher.evp_algo);
else
- status = process_openssl_cipher_decrypt(src, dst, iv,
+ status = process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx,
sess->cipher.evp_algo);
else
- status = process_openssl_cipher_des3ctr(src, dst, iv,
+ status = process_openssl_cipher_des3ctr(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
sess->cipher.key.data, srclen,
sess->cipher.ctx);
@@ -805,18 +1011,108 @@ process_openssl_cipher_op
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+/** Process cipher operation */
+static void
+process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ uint8_t block_size, last_block_len;
+ int srclen, status = 0;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = op->sym->cipher.iv.data;
+
+ block_size = DES_BLOCK_SIZE;
+
+ last_block_len = srclen % block_size;
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* Encrypt only with ECB mode XOR IV */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv, srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ srclen -= last_block_len;
+ /* Encrypt with the block aligned stream with CBC mode */
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen;
+ /*
+ * IV is the last encrypted block from
+ * the previous operation
+ */
+ iv = dst - block_size;
+ src += srclen;
+ srclen = last_block_len;
+ /* Encrypt the last frame with ECB mode */
+ status |= process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ srclen, sess->cipher.bpi_ctx);
+ }
+ }
+ } else {
+ /* Decrypt only with ECB mode (encrypt, as it is same operation) */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv,
+ srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen - last_block_len;
+ src += srclen - last_block_len;
+ /*
+ * IV is the last full block
+ */
+ iv = src - block_size;
+ /*
+ * Decrypt the last frame with ECB mode
+ * (encrypt, as it is the same operation)
+ */
+ status = process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ last_block_len, sess->cipher.bpi_ctx);
+ /* Prepare parameters for CBC mode op */
+ iv = op->sym->cipher.iv.data;
+ dst += last_block_len - srclen;
+ srclen -= last_block_len;
+ }
+
+ /* Decrypt with CBC mode */
+ status |= process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
/** Process auth operation */
static void
process_openssl_auth_op
(struct rte_crypto_op *op, struct openssl_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
- uint8_t *src, *dst;
+ uint8_t *dst;
int srclen, status;
srclen = op->sym->auth.data.length;
- src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
- op->sym->auth.data.offset);
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
@@ -831,13 +1127,14 @@ process_openssl_auth_op
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
- status = process_openssl_auth(src, dst,
- NULL, NULL, srclen,
+ status = process_openssl_auth(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL, NULL, srclen,
sess->auth.auth.ctx, sess->auth.auth.evp_algo);
break;
case OPENSSL_AUTH_AS_HMAC:
- status = process_openssl_auth_hmac(src, dst,
- NULL, sess->auth.hmac.pkey, srclen,
+ status = process_openssl_auth_hmac(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL,
+ sess->auth.hmac.pkey, srclen,
sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
break;
default:
@@ -851,8 +1148,7 @@ process_openssl_auth_op
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
/* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(mbuf_src,
- op->sym->auth.digest.length);
+ rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
}
if (status != 0)
@@ -890,6 +1186,9 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
case OPENSSL_CHAIN_COMBINED:
process_openssl_combined_op(op, sess, msrc, mdst);
break;
+ case OPENSSL_CHAIN_CIPHER_BPI:
+ process_openssl_docsis_bpi_op(op, sess, msrc, mdst);
+ break;
default:
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
break;
@@ -903,7 +1202,6 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
op->sym->session = NULL;
}
-
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -958,7 +1256,7 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
unsigned int nb_dequeued = 0;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)ops, nb_ops);
+ (void **)ops, nb_ops, NULL);
qp->stats.dequeued_count += nb_dequeued;
return nb_dequeued;
@@ -967,20 +1265,17 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
/** Create OPENSSL crypto device */
static int
cryptodev_openssl_create(const char *name,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct openssl_private *internals;
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- OPENSSL_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct openssl_private),
init_params->socket_id);
if (dev == NULL) {
@@ -997,7 +1292,8 @@ cryptodev_openssl_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_CPU_AESNI;
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
@@ -1008,39 +1304,53 @@ cryptodev_openssl_create(const char *name,
return 0;
init_error:
- OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed", name);
+ OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed",
+ init_params->name);
- cryptodev_openssl_remove(crypto_dev_name);
+ cryptodev_openssl_remove(vdev);
return -EFAULT;
}
/** Initialise OPENSSL crypto device */
static int
-cryptodev_openssl_probe(const char *name,
- const char *input_args)
+cryptodev_openssl_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_openssl_create(name, &init_params);
+ return cryptodev_openssl_create(name, vdev, &init_params);
}
/** Uninitialise OPENSSL crypto device */
static int
-cryptodev_openssl_remove(const char *name)
+cryptodev_openssl_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 875550c7..22a68730 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -350,9 +350,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 0
},
.aad_size = {
- .min = 8,
- .max = 12,
- .increment = 4
+ .min = 0,
+ .max = 65535,
+ .increment = 1
}
}, }
}, }
@@ -366,8 +366,8 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.block_size = 16,
.key_size = {
.min = 16,
- .max = 16,
- .increment = 0
+ .max = 32,
+ .increment = 8
},
.iv_size = {
.min = 12,
@@ -442,6 +442,26 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -449,7 +469,8 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
/** Configure device */
static int
-openssl_pmd_config(__rte_unused struct rte_cryptodev *dev)
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -559,7 +580,7 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
OPENSSL_LOG_INFO(
"Reusing existing ring %s for processed ops",
qp->name);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index 65c5f979..4d820c51 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -62,6 +62,7 @@
enum openssl_chain_order {
OPENSSL_CHAIN_ONLY_CIPHER,
OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_BPI,
OPENSSL_CHAIN_CIPHER_AUTH,
OPENSSL_CHAIN_AUTH_CIPHER,
OPENSSL_CHAIN_COMBINED,
@@ -127,6 +128,7 @@ struct openssl_session {
/**< pointer to EVP algorithm function */
EVP_CIPHER_CTX *ctx;
/**< pointer to EVP context structure */
+ EVP_CIPHER_CTX *bpi_ctx;
} cipher;
/** Authentication Parameters */
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 20a70d4a..7322ffe4 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -56,11 +56,4 @@ SYMLINK-y-include +=
# versioning export map
EXPORT_MAP := rte_pmd_qat_version.map
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev
-
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
index 47f1c91a..d218f85d 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -47,14 +47,15 @@
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
+#include <rte_io.h>
+
/* CSR write macro */
-#define ADF_CSR_WR(csrAddr, csrOffset, val) \
- (void)((*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset)) \
- = (val)))
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
/* CSR read macro */
-#define ADF_CSR_RD(csrAddr, csrOffset) \
- (*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset)))
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index dcc0df59..5c63406b 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -47,6 +47,7 @@
#ifndef _ICP_QAT_ALGS_H_
#define _ICP_QAT_ALGS_H_
#include <rte_memory.h>
+#include <rte_crypto.h>
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
@@ -79,13 +80,33 @@ struct qat_alg_buf {
uint64_t addr;
} __rte_packed;
+enum qat_crypto_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER 16
+
struct qat_alg_buf_list {
uint64_t resrvd;
uint32_t num_bufs;
uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[];
+ struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
} __rte_packed __rte_cache_aligned;
+struct qat_crypto_op_cookie {
+ struct qat_alg_buf_list qat_sgl_list_src;
+ struct qat_alg_buf_list qat_sgl_list_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
/* Common content descriptor */
struct qat_alg_cd {
struct icp_qat_hw_cipher_algo_blk cipher;
@@ -99,6 +120,7 @@ struct qat_session {
enum icp_qat_hw_cipher_mode qat_mode;
enum icp_qat_hw_auth_algo qat_hash_alg;
enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
struct qat_alg_cd cd;
uint8_t *cd_cur_ptr;
phys_addr_t cd_paddr;
@@ -130,7 +152,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
unsigned int operation);
void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- uint16_t proto);
+ enum qat_crypto_proto_flag proto_flags);
void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
int alg, const uint8_t *key,
@@ -141,7 +163,12 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
unsigned int keylen);
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 8900668d..154e1ddd 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -54,12 +54,31 @@
#include <rte_crypto_sym.h>
#include "../qat_logs.h"
-#include "qat_algs.h"
#include <openssl/sha.h> /* Needed to calculate pre-compute values */
#include <openssl/aes.h> /* Needed to calculate pre-compute values */
#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include "qat_algs.h"
+
+/* returns block size in bytes per cipher algo */
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
+{
+ switch (qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_DES:
+ return ICP_QAT_HW_DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_3DES:
+ return ICP_QAT_HW_3DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_AES128:
+ case ICP_QAT_HW_CIPHER_ALGO_AES192:
+ case ICP_QAT_HW_CIPHER_ALGO_AES256:
+ return ICP_QAT_HW_AES_BLK_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
/*
* Returns size in bytes per hash algo for state1 size field in cd_ctrl
@@ -90,6 +109,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
@@ -422,7 +444,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
}
void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- uint16_t proto)
+ enum qat_crypto_proto_flag proto_flags)
{
PMD_INIT_FUNC_TRACE();
header->hdr_flags =
@@ -435,10 +457,58 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
ICP_QAT_FW_LA_PARTIAL_NONE);
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- proto);
+
+ switch (proto_flags) {
+ case QAT_CRYPTO_PROTO_FLAG_NONE:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_CCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_GCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_ZUC:
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ }
+
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+}
+
+/*
+ * Snow3G and ZUC should never use this function
+ * and set its protocol flag in both cipher and auth part of content
+ * descriptor building function
+ */
+static enum qat_crypto_proto_flag
+qat_get_crypto_proto_flag(uint16_t flags)
+{
+ int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ switch (proto) {
+ case ICP_QAT_FW_LA_GCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ break;
+ case ICP_QAT_FW_LA_CCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ break;
+ }
+
+ return qat_proto_flag;
}
int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
@@ -453,8 +523,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
uint32_t total_key_size;
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
uint16_t cipher_offset, cd_size;
uint32_t wordIndex = 0;
uint32_t *temp_key = NULL;
@@ -494,7 +565,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
*/
cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
+ || cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
@@ -506,7 +579,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
@@ -515,21 +589,34 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
- proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
+ total_key_size = ICP_QAT_HW_DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
}
cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
header->service_cmd_id = cdesc->qat_cmd;
- qat_alg_init_common_hdr(header, proto);
+ qat_alg_init_common_hdr(header, qat_proto_flag);
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
-
cipher->cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
cdesc->qat_cipher_alg, key_convert,
@@ -590,12 +677,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
(struct icp_qat_fw_la_auth_req_params *)
((char *)&req_tmpl->serv_specif_rqpars +
sizeof(struct icp_qat_fw_la_cipher_req_params));
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
uint16_t state1_size = 0, state2_size = 0;
uint16_t hash_offset, cd_size;
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
PMD_INIT_FUNC_TRACE();
@@ -645,7 +733,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
cdesc->qat_hash_alg, digestsize);
if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
- || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
hash->auth_counter.counter = 0;
else
hash->auth_counter.counter = rte_bswap32(
@@ -708,7 +797,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- proto = ICP_QAT_FW_LA_GCM_PROTO;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
@@ -730,7 +819,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
*aad_len = rte_bswap32(add_auth_data_length);
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
@@ -751,6 +840,24 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
auth_param->hash_state_sz =
RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
+ state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cdesc->cd_cur_ptr += state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+
+ break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
authkey, authkeylen, cdesc->cd_cur_ptr,
@@ -788,7 +895,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
}
/* Request template setup */
- qat_alg_init_common_hdr(header, proto);
+ qat_alg_init_common_hdr(header, qat_proto_flag);
header->service_cmd_id = cdesc->qat_cmd;
/* Auth CD config setup */
@@ -832,6 +939,19 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
@@ -856,6 +976,18 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_DES_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
@@ -868,3 +1000,15 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
}
return 0;
}
+
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index a4119fcd..386aa453 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -59,6 +59,8 @@
#include <rte_string_fns.h>
#include <rte_spinlock.h>
#include <rte_hexdump.h>
+#include <rte_crypto_sym.h>
+#include <openssl/evp.h>
#include "qat_logs.h"
#include "qat_algs.h"
@@ -67,443 +69,151 @@
#define BYTE_LENGTH 8
-static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
- { /* SHA1 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
- .block_size = 64,
- .key_size = {
- .min = 64,
- .max = 64,
- .increment = 0
- },
- .digest_size = {
- .min = 20,
- .max = 20,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* SHA224 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
- .block_size = 64,
- .key_size = {
- .min = 64,
- .max = 64,
- .increment = 0
- },
- .digest_size = {
- .min = 28,
- .max = 28,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* SHA256 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
- .block_size = 64,
- .key_size = {
- .min = 64,
- .max = 64,
- .increment = 0
- },
- .digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* SHA384 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
- .block_size = 64,
- .key_size = {
- .min = 128,
- .max = 128,
- .increment = 0
- },
- .digest_size = {
- .min = 48,
- .max = 48,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* SHA512 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
- .block_size = 128,
- .key_size = {
- .min = 128,
- .max = 128,
- .increment = 0
- },
- .digest_size = {
- .min = 64,
- .max = 64,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* MD5 HMAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
- .block_size = 64,
- .key_size = {
- .min = 8,
- .max = 64,
- .increment = 8
- },
- .digest_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* AES XCBC MAC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .digest_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .aad_size = { 0 }
- }, }
- }, }
- },
- { /* AES GCM (AUTH) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
- },
- .digest_size = {
- .min = 8,
- .max = 16,
- .increment = 4
- },
- .aad_size = {
- .min = 8,
- .max = 12,
- .increment = 4
- }
- }, }
- }, }
- },
- { /* AES GMAC (AUTH) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_AES_GMAC,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
- },
- .digest_size = {
- .min = 8,
- .max = 16,
- .increment = 4
- },
- .aad_size = {
- .min = 1,
- .max = 65535,
- .increment = 1
- }
- }, }
- }, }
- },
- { /* SNOW 3G (UIA2) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .digest_size = {
- .min = 4,
- .max = 4,
- .increment = 0
- },
- .aad_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* AES GCM (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_GCM,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
- },
- .iv_size = {
- .min = 12,
- .max = 12,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* AES CBC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_CBC,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
- },
- .iv_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* SNOW 3G (UEA2) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .iv_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* AES CTR */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_AES_CTR,
- .block_size = 16,
- .key_size = {
- .min = 16,
- .max = 32,
- .increment = 8
- },
- .iv_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* NULL (AUTH) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_NULL,
- .block_size = 1,
- .key_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- },
- .digest_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- },
- .aad_size = { 0 }
- }, },
- }, },
- },
- { /* NULL (CIPHER) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_NULL,
- .block_size = 1,
- .key_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- },
- .iv_size = {
- .min = 0,
- .max = 0,
- .increment = 0
- }
- }, },
- }, }
- },
- { /* KASUMI (F8) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .iv_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* KASUMI (F9) */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
- {.auth = {
- .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 16,
- .increment = 0
- },
- .digest_size = {
- .min = 4,
- .max = 4,
- .increment = 0
- },
- .aad_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* 3DES CBC */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 24,
- .increment = 8
- },
- .iv_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
- }, }
- }, }
- },
- { /* 3DES CTR */
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
- {.sym = {
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
- {.cipher = {
- .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
- .block_size = 8,
- .key_size = {
- .min = 16,
- .max = 24,
- .increment = 8
- },
- .iv_size = {
- .min = 8,
- .max = 8,
- .increment = 0
- }
- }, }
- }, }
- },
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_encrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_decrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static void *
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key)
+{
+ const EVP_CIPHER *algo = NULL;
+ EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+
+ if (ctx == NULL)
+ goto ctx_init_err;
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ goto ctx_init_err;
+
+ return ctx;
+
+ctx_init_err:
+ if (ctx != NULL)
+ EVP_CIPHER_CTX_free(ctx);
+ return NULL;
+}
+
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
static inline uint32_t
adf_modulo(uint32_t data, uint32_t shift);
static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie);
void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
void *session)
@@ -512,7 +222,11 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
phys_addr_t cd_paddr;
PMD_INIT_FUNC_TRACE();
- if (session) {
+ if (sess) {
+ if (sess->bpi_ctx) {
+ bpi_cipher_ctx_free(sess->bpi_ctx);
+ sess->bpi_ctx = NULL;
+ }
cd_paddr = sess->cd_paddr;
memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
sess->cd_paddr = cd_paddr;
@@ -576,10 +290,8 @@ void *
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
- struct qat_pmd_private *internals = dev->data->dev_private;
-
struct qat_session *session = session_private;
-
+ struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
/* Get cipher xform from crypto xform chain */
@@ -637,6 +349,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
@@ -645,13 +365,59 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ goto error_out;
+ }
+ if (qat_alg_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_F8:
case RTE_CRYPTO_CIPHER_AES_XTS:
case RTE_CRYPTO_CIPHER_ARC4:
- case RTE_CRYPTO_CIPHER_ZUC_EEA3:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
goto error_out;
@@ -674,7 +440,10 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
return session;
error_out:
- rte_mempool_put(internals->sess_mp, session);
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
return NULL;
}
@@ -683,12 +452,9 @@ void *
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
- struct qat_pmd_private *internals = dev->data->dev_private;
-
struct qat_session *session = session_private;
int qat_cmd_id;
-
PMD_INIT_FUNC_TRACE();
/* Get requested QAT command id */
@@ -730,10 +496,10 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
session->qat_cmd);
goto error_out;
}
+
return session;
error_out:
- rte_mempool_put(internals->sess_mp, session);
return NULL;
}
@@ -743,10 +509,10 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct qat_session *session_private)
{
- struct qat_pmd_private *internals = dev->data->dev_private;
struct qat_session *session = session_private;
struct rte_crypto_auth_xform *auth_xform = NULL;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform = qat_get_auth_xform(xform);
switch (auth_xform->algo) {
@@ -786,6 +552,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ goto error_out;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA512:
@@ -795,7 +570,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
goto error_out;
@@ -828,8 +602,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
return session;
error_out:
- if (internals->sess_mp != NULL)
- rte_mempool_put(internals->sess_mp, session);
return NULL;
}
@@ -839,6 +611,112 @@ unsigned qat_crypto_sym_get_session_private_size(
return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
}
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
+ last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
+ last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src before post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src after post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after post-process:", dst,
+ last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
@@ -873,9 +751,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
}
while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size]);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasnt sent
+ */
+ rte_atomic16_sub(&tmp_qp->inflights16,
+ nb_ops_possible - nb_ops_sent);
if (nb_ops_sent == 0)
return 0;
goto kick_tail;
@@ -914,15 +799,21 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
+ sizeof(struct icp_qat_fw_comn_resp));
+
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
resp_msg->comn_hdr.comn_status)) {
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
+ struct qat_session *sess = (struct qat_session *)
+ (rx_op->sym->session->_private);
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
+
*(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
queue->head = adf_modulo(queue->head +
queue->msg_size,
@@ -945,8 +836,57 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
}
static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
+ struct qat_alg_buf_list *list, uint32_t data_len)
{
+ int nr = 1;
+
+ uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ buff_start + rte_pktmbuf_data_len(buf);
+
+ list->bufers[0].addr = buff_start;
+ list->bufers[0].resrvd = 0;
+ list->bufers[0].len = buf_len;
+
+ if (data_len <= buf_len) {
+ list->num_bufs = nr;
+ list->bufers[0].len = data_len;
+ return 0;
+ }
+
+ buf = buf->next;
+ while (buf) {
+ if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+ PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
+ " entry(%u)",
+ QAT_SGL_MAX_NUMBER);
+ return -EINVAL;
+ }
+
+ list->bufers[nr].len = rte_pktmbuf_data_len(buf);
+ list->bufers[nr].resrvd = 0;
+ list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+
+ buf_len += list->bufers[nr].len;
+ buf = buf->next;
+
+ if (buf_len > data_len) {
+ list->bufers[nr].len -=
+ buf_len - data_len;
+ buf = NULL;
+ }
+ ++nr;
+ }
+ list->num_bufs = nr;
+
+ return 0;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie)
+{
+ int ret = 0;
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -955,8 +895,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
uint32_t cipher_len = 0, cipher_ofs = 0;
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
- uint32_t digest_appended = 1;
uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
@@ -1001,37 +941,54 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
if (ctx->qat_cipher_alg ==
ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
if (unlikely(
(cipher_param->cipher_length % BYTE_LENGTH != 0)
|| (cipher_param->cipher_offset
% BYTE_LENGTH != 0))) {
PMD_DRV_LOG(ERR,
- "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
cipher_len = op->sym->cipher.data.length >> 3;
cipher_ofs = op->sym->cipher.data.offset >> 3;
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
} else {
cipher_len = op->sym->cipher.data.length;
cipher_ofs = op->sym->cipher.data.offset;
}
/* copy IV into request if it fits */
- if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
+ /*
+ * If IV length is zero do not copy anything but still
+ * use request descriptor embedded IV
+ *
+ */
+ if (op->sym->cipher.iv.length) {
+ if (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
}
min_ofs = cipher_ofs;
}
@@ -1039,11 +996,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
if (do_auth) {
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
|| (auth_param->auth_len % BYTE_LENGTH != 0))) {
PMD_DRV_LOG(ERR,
- "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
return -EINVAL;
}
@@ -1062,29 +1021,34 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
}
}
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ auth_ofs = op->sym->cipher.data.offset;
+ auth_len = op->sym->cipher.data.length;
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
}
min_ofs = auth_ofs;
- if (op->sym->auth.digest.phys_addr) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
- digest_appended = 0;
- }
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
}
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
/* adjust for chain case */
if (do_cipher && do_auth)
min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
if (unlikely(op->sym->m_dst != NULL)) {
/* Out-of-place operation (OOP)
* Don't align DMA start. DMA the minimum data-set
@@ -1094,6 +1058,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
dst_buf_start =
rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
@@ -1139,16 +1104,43 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
(cipher_param->cipher_offset + cipher_param->cipher_length)
: (auth_param->auth_off + auth_param->auth_len);
- if (do_auth && digest_appended) {
- if (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE)
- qat_req->comn_mid.dst_length
- += op->sym->auth.digest.length;
- else
- qat_req->comn_mid.src_length
- += op->sym->auth.digest.length;
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
+ &qat_op_cookie->qat_sgl_list_src,
+ qat_req->comn_mid.src_length);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ dst_buf_start,
+ &qat_op_cookie->qat_sgl_list_dst,
+ qat_req->comn_mid.dst_length);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot "
+ "fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ qat_op_cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -1180,7 +1172,6 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
}
}
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
@@ -1216,10 +1207,11 @@ void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
offsetof(struct rte_cryptodev_sym_session, _private);
}
-int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
+int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
PMD_INIT_FUNC_TRACE();
- return -ENOTSUP;
+ return 0;
}
int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
@@ -1259,7 +1251,7 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
ADF_NUM_SYM_QPS_PER_BUNDLE *
ADF_NUM_BUNDLES_PER_DEV;
info->feature_flags = dev->feature_flags;
- info->capabilities = qat_pmd_capabilities;
+ info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
}
@@ -1283,9 +1275,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
}
stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6b844881..b740d6b0 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -37,6 +37,8 @@
#include <rte_cryptodev_pmd.h>
#include <rte_memzone.h>
+#include "qat_crypto_capabilities.h"
+
/*
* This macro rounds up a number to a be a multiple of
* the alignment when the alignment is a power of 2
@@ -69,20 +71,22 @@ struct qat_qp {
struct qat_queue tx_q;
struct qat_queue rx_q;
struct rte_cryptodev_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
} __rte_cache_aligned;
/** private data structure for each QAT device */
struct qat_pmd_private {
- char sess_mp_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *sess_mp;
-
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
/**< Max number of sessions supported by device */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
};
-int qat_dev_config(struct rte_cryptodev *dev);
+int qat_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
int qat_dev_start(struct rte_cryptodev *dev);
void qat_dev_stop(struct rte_cryptodev *dev);
int qat_dev_close(struct rte_cryptodev *dev);
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
new file mode 100644
index 00000000..1294f247
--- /dev/null
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -0,0 +1,574 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_CAPABILITIES_H_
+#define _QAT_CRYPTO_CAPABILITIES_H_
+
+#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 8, \
+ .max = 64, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES XCBC MAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 240, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GMAC (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 1, \
+ .max = 65535, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UIA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UEA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* NULL (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, }, \
+ }, }, \
+ }, \
+ { /* NULL (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, }, \
+ }, } \
+ }, \
+ { /* KASUMI (F8) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* KASUMI (F9) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES DOCSISBPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+ { /* ZUC (EEA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* ZUC (EIA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 2e7188bd..a358ccd7 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -41,6 +41,7 @@
#include "qat_logs.h"
#include "qat_crypto.h"
+#include "qat_algs.h"
#include "adf_transport_access_macros.h"
#define ADF_MAX_SYM_DESC 4096
@@ -135,7 +136,10 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
int socket_id)
{
struct qat_qp *qp;
+ struct rte_pci_device *pci_dev;
int ret;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
PMD_INIT_FUNC_TRACE();
@@ -153,7 +157,9 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
return -EINVAL;
}
- if (dev->pci_dev->mem_resource[0].addr == NULL) {
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
PMD_DRV_LOG(ERR, "Could not find VF config space "
"(UIO driver attached?).");
return -EINVAL;
@@ -166,7 +172,6 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
queue_pair_id);
return -EINVAL;
}
-
/* Allocate the queue pair data structure. */
qp = rte_zmalloc("qat PMD qp metadata",
sizeof(*qp), RTE_CACHE_LINE_SIZE);
@@ -174,7 +179,12 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
return -ENOMEM;
}
- qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
+ qp->nb_descriptors = qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
rte_atomic16_init(&qp->inflights16);
if (qat_tx_queue_create(dev, &(qp->tx_q),
@@ -191,8 +201,47 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
qat_queue_delete(&(qp->tx_q));
goto create_err;
}
+
adf_configure_queues(qp);
adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
+ dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ queue_pair_id);
+
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ sizeof(struct qat_crypto_op_cookie), 64, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ return -EFAULT;
+ }
+
+ struct qat_crypto_op_cookie *sql_cookie =
+ qp->op_cookies[i];
+
+ sql_cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_src);
+
+ sql_cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_dst);
+ }
dev->data->queue_pairs[queue_pair_id] = qp;
return 0;
@@ -205,6 +254,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
{
struct qat_qp *qp =
(struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
+ uint32_t i;
PMD_INIT_FUNC_TRACE();
if (qp == NULL) {
@@ -221,6 +271,14 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
}
adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
rte_free(qp);
dev->data->queue_pairs[queue_pair_id] = NULL;
return 0;
@@ -289,6 +347,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
void *io_addr;
const struct rte_memzone *qp_mz;
uint32_t queue_size_bytes = nb_desc*desc_size;
+ struct rte_pci_device *pci_dev;
PMD_INIT_FUNC_TRACE();
if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
@@ -349,7 +408,9 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
queue->queue_size);
- io_addr = dev->pci_dev->mem_resource[0].addr;
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ io_addr = pci_dev->mem_resource[0].addr;
WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
queue->hw_queue_number, queue_base);
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 1e7ee61c..1bdd30da 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -39,6 +39,17 @@
#include "qat_crypto.h"
#include "qat_logs.h"
+static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
static struct rte_cryptodev_ops crypto_qat_ops = {
/* Device related operations */
@@ -67,7 +78,7 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
* The set of PCI devices this driver supports
*/
-static struct rte_pci_id pci_id_qat_map[] = {
+static const struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x0443),
},
@@ -77,6 +88,9 @@ static struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x19e3),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
{.device_id = 0},
};
@@ -88,9 +102,9 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
PMD_INIT_FUNC_TRACE();
PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
- cryptodev->pci_dev->addr.bus,
- cryptodev->pci_dev->addr.devid,
- cryptodev->pci_dev->addr.function);
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.bus,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
cryptodev->dev_ops = &crypto_qat_ops;
@@ -100,10 +114,25 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = cryptodev->data->dev_private;
internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
+ switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
+ case 0x0443:
+ internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Invalid dev_id, can't determine capabilities");
+ break;
+ }
/*
* For secondary processes, we don't initialise any further as primary
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
new file mode 100644
index 00000000..c273e784
--- /dev/null
+++ b/drivers/crypto/scheduler/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_crypto_scheduler.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_crypto_scheduler_version.map
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_cryptodev_scheduler_operations.h
+SYMLINK-y-include += rte_cryptodev_scheduler.h
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
new file mode 100644
index 00000000..319dcf0a
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -0,0 +1,593 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_reorder.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+/** update the scheduler pmd's capability with attaching device's
+ * capability.
+ * For each device to be attached, the scheduler's capability should be
+ * the common capability set of all slaves
+ **/
+static uint32_t
+sync_caps(struct rte_cryptodev_capabilities *caps,
+ uint32_t nb_caps,
+ const struct rte_cryptodev_capabilities *slave_caps)
+{
+ uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+ uint32_t i;
+
+ while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+ nb_slave_caps++;
+
+ if (nb_caps == 0) {
+ rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
+ return nb_slave_caps;
+ }
+
+ for (i = 0; i < sync_nb_caps; i++) {
+ struct rte_cryptodev_capabilities *cap = &caps[i];
+ uint32_t j;
+
+ for (j = 0; j < nb_slave_caps; j++) {
+ const struct rte_cryptodev_capabilities *s_cap =
+ &slave_caps[j];
+
+ if (s_cap->op != cap->op || s_cap->sym.xform_type !=
+ cap->sym.xform_type)
+ continue;
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (s_cap->sym.auth.algo !=
+ cap->sym.auth.algo)
+ continue;
+
+ cap->sym.auth.digest_size.min =
+ s_cap->sym.auth.digest_size.min <
+ cap->sym.auth.digest_size.min ?
+ s_cap->sym.auth.digest_size.min :
+ cap->sym.auth.digest_size.min;
+ cap->sym.auth.digest_size.max =
+ s_cap->sym.auth.digest_size.max <
+ cap->sym.auth.digest_size.max ?
+ s_cap->sym.auth.digest_size.max :
+ cap->sym.auth.digest_size.max;
+
+ }
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ if (s_cap->sym.cipher.algo !=
+ cap->sym.cipher.algo)
+ continue;
+
+ /* no common cap found */
+ break;
+ }
+
+ if (j < nb_slave_caps)
+ continue;
+
+ /* remove a uncommon cap from the array */
+ for (j = i; j < sync_nb_caps - 1; j++)
+ rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
+
+ memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
+ sync_nb_caps--;
+ }
+
+ return sync_nb_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+ struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
+ uint32_t nb_caps = 0, i;
+
+ if (sched_ctx->capabilities)
+ rte_free(sched_ctx->capabilities);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
+ if (nb_caps == 0)
+ return -1;
+ }
+
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities) *
+ (nb_caps + 1), 0, SOCKET_ID_ANY);
+ if (!sched_ctx->capabilities)
+ return -ENOMEM;
+
+ rte_memcpy(sched_ctx->capabilities, tmp_caps,
+ sizeof(struct rte_cryptodev_capabilities) * nb_caps);
+
+ return 0;
+}
+
+static void
+update_scheduler_feature_flag(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ dev->feature_flags = 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ dev->feature_flags |= dev_info.feature_flags;
+ }
+}
+
+static void
+update_max_nb_qp(struct scheduler_ctx *sched_ctx)
+{
+ uint32_t i;
+ uint32_t max_nb_qp;
+
+ if (!sched_ctx->nb_slaves)
+ return;
+
+ max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
+ dev_info.max_nb_queue_pairs : max_nb_qp;
+ }
+
+ sched_ctx->max_nb_queue_pairs = max_nb_qp;
+}
+
+/** Attach a device to the scheduler. */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ struct scheduler_slave *slave;
+ struct rte_cryptodev_info dev_info;
+ uint32_t i;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CS_LOG_ERR("Too many slaves attached");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ if (sched_ctx->slaves[i].dev_id == slave_id) {
+ CS_LOG_ERR("Slave already added");
+ return -ENOTSUP;
+ }
+
+ slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+
+ rte_cryptodev_info_get(slave_id, &dev_info);
+
+ slave->dev_id = slave_id;
+ slave->dev_type = dev_info.dev_type;
+ sched_ctx->nb_slaves++;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ slave->dev_id = 0;
+ slave->dev_type = 0;
+ sched_ctx->nb_slaves--;
+
+ CS_LOG_ERR("capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i, slave_pos;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
+ if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+ break;
+ if (slave_pos == sched_ctx->nb_slaves) {
+ CS_LOG_ERR("Cannot find slave");
+ return -ENOTSUP;
+ }
+
+ if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
+ CS_LOG_ERR("Failed to detach slave");
+ return -ENOTSUP;
+ }
+
+ for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
+ memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
+ sizeof(struct scheduler_slave));
+ }
+ memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
+ sizeof(struct scheduler_slave));
+ sched_ctx->nb_slaves--;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ CS_LOG_ERR("capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (mode == sched_ctx->mode)
+ return 0;
+
+ switch (mode) {
+ case CDEV_SCHED_MODE_ROUNDROBIN:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ roundrobin_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ pkt_size_based_distr_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_FAILOVER:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ failover_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ default:
+ CS_LOG_ERR("Not yet supported");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ return rte_cryptodev_scheduler_mode_set(scheduler_id, mode);
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return sched_ctx->mode;
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ return rte_cryptodev_scheduler_mode_get(scheduler_id);
+}
+
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ sched_ctx->reordering_enabled = enable_reorder;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return (int)sched_ctx->reordering_enabled;
+}
+
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler) {
+
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ strncpy(sched_ctx->name, scheduler->name,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+ strncpy(sched_ctx->description, scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+
+ /* load scheduler instance operations functions */
+ sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
+ sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
+ sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
+ sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
+ sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
+ sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+ sched_ctx->ops.option_set = scheduler->ops->option_set;
+ sched_ctx->ops.option_get = scheduler->ops->option_get;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ if (sched_ctx->ops.create_private_ctx) {
+ int ret = (*sched_ctx->ops.create_private_ctx)(dev);
+
+ if (ret < 0) {
+ CS_LOG_ERR("Unable to create scheduler private "
+ "context");
+ return ret;
+ }
+ }
+
+ sched_ctx->mode = scheduler->mode;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
+
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
+ option_type >= CDEV_SCHED_OPTION_COUNT) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (!option) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_set)(dev, option_type, option);
+}
+
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (!option) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_get)(dev, option_type, option);
+}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
new file mode 100644
index 00000000..2ba6e470
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -0,0 +1,334 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_H
+#define _RTE_CRYPTO_SCHEDULER_H
+
+/**
+ * @file rte_cryptodev_scheduler.h
+ *
+ * RTE Cryptodev Scheduler Device
+ *
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * Cryptodevs into a single logical crypto device, and the scheduling the
+ * crypto operations to the slaves based on the mode of the specified mode of
+ * operation specified and supported. This implementation supports 3 modes of
+ * operation: round robin, packet-size based, and fail-over.
+ */
+
+#include <stdint.h>
+#include "rte_cryptodev_scheduler_operations.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of bonded devices per device */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
+/** Round-robin scheduling mode string */
+#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
+/** Packet-size based distribution scheduling mode string */
+#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
+/** Fail-over scheduling mode string */
+#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+
+/**
+ * Crypto scheduler PMD operation modes
+ */
+enum rte_cryptodev_scheduler_mode {
+ CDEV_SCHED_MODE_NOT_SET = 0,
+ /** User defined mode */
+ CDEV_SCHED_MODE_USERDEFINED,
+ /** Round-robin mode */
+ CDEV_SCHED_MODE_ROUNDROBIN,
+ /** Packet-size based distribution mode */
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ /** Fail-over mode */
+ CDEV_SCHED_MODE_FAILOVER,
+
+ CDEV_SCHED_MODE_COUNT /**< number of modes */
+};
+
+#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN (64)
+#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN (256)
+
+/**
+ * Crypto scheduler option types
+ */
+enum rte_cryptodev_schedule_option_type {
+ CDEV_SCHED_OPTION_NOT_SET = 0,
+ CDEV_SCHED_OPTION_THRESHOLD,
+
+ CDEV_SCHED_OPTION_COUNT
+};
+
+/**
+ * Threshold option structure
+ */
+struct rte_cryptodev_scheduler_threshold_option {
+ uint32_t threshold; /**< Threshold for packet-size mode */
+};
+
+struct rte_cryptodev_scheduler;
+
+/**
+ * Load a user defined scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param scheduler
+ * Pointer to the user defined scheduler
+ *
+ * @return
+ * - 0 if the scheduler is successfully loaded
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler);
+
+/**
+ * Attach a crypto device to the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be attached
+ *
+ * @return
+ * - 0 if the slave is attached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -ENOMEM if the scheduler's slave list is full.
+ */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Detach a crypto device from the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be detached
+ *
+ * @return
+ * - 0 if the slave is detached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
+
+
+/**
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * - 0 if the mode is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return mode
+ * - non-negative enumerate value: the scheduling mode
+ * - -ENOTSUP if the operation is not supported.
+ */
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * @deprecated
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * 0 if attaching successful, negative integer if otherwise.
+ */
+__rte_deprecated
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * @deprecated
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * If successful, returns the scheduling mode, negative integer
+ * otherwise
+ */
+__rte_deprecated
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * Set the crypto ops reordering feature on/off
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param enable_reorder
+ * Set the crypto op reordering feature
+ * - 0: disable reordering
+ * - 1: enable reordering
+ *
+ * @return
+ * - 0 if the ordering is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder);
+
+/**
+ * Get the current crypto ops reordering feature
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * - 0 if reordering is disabled
+ * - 1 if reordering is enabled
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+
+/**
+ * Get the the attached slaves' count and/or ID
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slaves
+ * If successful, the function will write back all slaves' device IDs to it.
+ * This parameter will either be an uint8_t array of
+ * RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ *
+ * @return
+ * - non-negative number: the number of slaves attached
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * The specific mode's option structure
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * If successful, the function will write back the current
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+/** The data structure associated with each mode of scheduler. */
+struct rte_cryptodev_scheduler {
+ const char *name; /**< Scheduler name */
+ const char *description; /**< Scheduler description */
+ enum rte_cryptodev_scheduler_mode mode; /**< Scheduling mode */
+
+ /** Pointer to scheduler operation structure */
+ struct rte_cryptodev_scheduler_ops *ops;
+};
+
+/** Round-robin mode scheduler */
+extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+/** Packet-size based distribution mode scheduler */
+extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+/** Fail-over mode scheduler */
+extern struct rte_cryptodev_scheduler *failover_scheduler;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_H */
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
new file mode 100644
index 00000000..719165c3
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -0,0 +1,85 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+#define _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+
+#include <rte_cryptodev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+
+typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
+typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_queue_pair)(
+ struct rte_cryptodev *dev, uint16_t qp_id);
+
+typedef int (*rte_cryptodev_scheduler_create_private_ctx)(
+ struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_option_set)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+typedef int (*rte_cryptodev_scheduler_config_option_get)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+struct rte_cryptodev_scheduler_ops {
+ rte_cryptodev_scheduler_slave_attach_t slave_attach;
+ rte_cryptodev_scheduler_slave_attach_t slave_detach;
+
+ rte_cryptodev_scheduler_start_t scheduler_start;
+ rte_cryptodev_scheduler_stop_t scheduler_stop;
+
+ rte_cryptodev_scheduler_config_queue_pair config_queue_pair;
+
+ rte_cryptodev_scheduler_create_private_ctx create_private_ctx;
+
+ rte_cryptodev_scheduler_config_option_set option_set;
+ rte_cryptodev_scheduler_config_option_get option_get;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_OPERATIONS_H */
diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
new file mode 100644
index 00000000..0a8b4716
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -0,0 +1,23 @@
+DPDK_17.02 {
+ global:
+
+ rte_cryptodev_scheduler_load_user_scheduler;
+ rte_cryptodev_scheduler_slave_attach;
+ rte_cryptodev_scheduler_slave_detach;
+ rte_crpytodev_scheduler_mode_set;
+ rte_crpytodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_ordering_set;
+ rte_cryptodev_scheduler_ordering_get;
+
+};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_mode_set;
+ rte_cryptodev_scheduler_option_get;
+ rte_cryptodev_scheduler_option_set;
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
new file mode 100644
index 00000000..2471a5f1
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -0,0 +1,287 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_FAILOVER_SLAVES 2
+#define SLAVE_SWITCH_MASK (0x01)
+
+struct fo_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+
+ uint8_t deq_idx;
+};
+
+static inline uint16_t __attribute__((always_inline))
+failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ if (unlikely(processed_ops < nb_ops))
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+ PRIMARY_SLAVE_IDX, ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+ SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
+ nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+
+ if (nb_deq_ops == nb_ops)
+ return nb_deq_ops;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+ slave->nb_inflight_cops -= nb_deq_ops2;
+ }
+
+ return nb_deq_ops + nb_deq_ops2;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->nb_slaves < 2) {
+ CS_LOG_ERR("Number of slaves shall no less than 2");
+ return -ENOMEM;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = schedule_enqueue_ordering;
+ dev->dequeue_burst = schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = schedule_enqueue;
+ dev->dequeue_burst = schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)
+ dev->data->queue_pairs[i])->private_qp_ctx;
+
+ rte_memcpy(&qp_ctx->primary_slave,
+ &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ rte_memcpy(&qp_ctx->secondary_slave,
+ &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx;
+
+ fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
+ rte_socket_id());
+ if (!fo_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /*option_get */
+};
+
+struct rte_cryptodev_scheduler fo_scheduler = {
+ .name = "failover-scheduler",
+ .description = "scheduler which enqueues to the primary slave, "
+ "and only then enqueues to the secondary slave "
+ "upon failing on enqueuing to primary",
+ .mode = CDEV_SCHED_MODE_FAILOVER,
+ .ops = &scheduler_fo_ops
+};
+
+struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
new file mode 100644
index 00000000..6b628dfa
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -0,0 +1,464 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
+#define SLAVE_IDX_SWITCH_MASK (0x01)
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_PKT_SIZE_SLAVES 2
+
+/** pkt size based scheduler context */
+struct psd_scheduler_ctx {
+ uint32_t threshold;
+};
+
+/** pkt size based scheduler queue pair context */
+struct psd_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+ uint32_t threshold;
+ uint8_t deq_idx;
+} __rte_cache_aligned;
+
+/** scheduling operation variables' wrapping */
+struct psd_schedule_op {
+ uint8_t slave_idx;
+ uint16_t pos;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
+ struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
+ struct scheduler_session *sess;
+ uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
+ };
+ struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
+ {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+ };
+ struct psd_schedule_op *p_enq_op;
+ uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
+ uint32_t job_len;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++) {
+ rte_prefetch0(ops[i]->sym);
+ rte_prefetch0(ops[i]->sym->session);
+ }
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym);
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ sess = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ /* job_len is initialized as cipher data length, once
+ * it is 0, equals to auth data length
+ */
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ /* decide the target op based on the job length */
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ /* stop schedule cops before the queue is full, this shall
+ * prevent the failed enqueue
+ */
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ job_len = ops[i+1]->sym->cipher.data.length;
+ job_len += (ops[i+1]->sym->cipher.data.length == 0) *
+ ops[i+1]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+ ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ job_len = ops[i+2]->sym->cipher.data.length;
+ job_len += (ops[i+2]->sym->cipher.data.length == 0) *
+ ops[i+2]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+ ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ job_len = ops[i+3]->sym->cipher.data.length;
+ job_len += (ops[i+3]->sym->cipher.data.length == 0) *
+ ops[i+3]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+ ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+ }
+
+ for (; i < nb_ops; i++) {
+ sess = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+ }
+
+ processed_ops_pri = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
+ sched_ops[PRIMARY_SLAVE_IDX],
+ enq_ops[PRIMARY_SLAVE_IDX].pos);
+ /* enqueue shall not fail as the slave queue is monitored */
+ RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+
+ processed_ops_sec = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
+ sched_ops[SECONDARY_SLAVE_IDX],
+ enq_ops[SECONDARY_SLAVE_IDX].pos);
+ RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+
+ return processed_ops_pri + processed_ops_sec;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct psd_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops_pri;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+
+ if (nb_deq_ops_pri == nb_ops)
+ return nb_deq_ops_pri;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops_pri],
+ nb_ops - nb_deq_ops_pri);
+ slave->nb_inflight_cops -= nb_deq_ops_sec;
+
+ if (!slave->nb_inflight_cops)
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
+ SLAVE_IDX_SWITCH_MASK;
+ }
+
+ return nb_deq_ops_pri + nb_deq_ops_sec;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ /* for packet size based scheduler, nb_slaves have to >= 2 */
+ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
+ CS_LOG_ERR("not enough slaves to start");
+ return -1;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx =
+ qp_ctx->private_qp_ctx;
+
+ ps_qp_ctx->primary_slave.dev_id =
+ sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->primary_slave.qp_id = i;
+ ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->secondary_slave.dev_id =
+ sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->secondary_slave.qp_id = i;
+ ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->threshold = psd_ctx->threshold;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
+
+ if (ps_qp_ctx->primary_slave.nb_inflight_cops +
+ ps_qp_ctx->secondary_slave.nb_inflight_cops) {
+ CS_LOG_ERR("Some crypto ops left in slave queue");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx;
+
+ ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
+ rte_socket_id());
+ if (!ps_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!psd_ctx) {
+ CS_LOG_ERR("failed allocate memory");
+ return -ENOMEM;
+ }
+
+ psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
+
+ sched_ctx->private_ctx = (void *)psd_ctx;
+
+ return 0;
+}
+static int
+scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ uint32_t threshold;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CS_LOG_ERR("Option not supported");
+ return -EINVAL;
+ }
+
+ threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
+ option)->threshold;
+ if (!rte_is_power_of_2(threshold)) {
+ CS_LOG_ERR("Threshold is not power of 2");
+ return -EINVAL;
+ }
+
+ psd_ctx->threshold = ~(threshold - 1);
+
+ return 0;
+}
+
+static int
+scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ struct rte_cryptodev_scheduler_threshold_option *threshold_option;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CS_LOG_ERR("Option not supported");
+ return -EINVAL;
+ }
+
+ threshold_option = option;
+ threshold_option->threshold = (~psd_ctx->threshold) + 1;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ scheduler_option_set,
+ scheduler_option_get
+};
+
+struct rte_cryptodev_scheduler psd_scheduler = {
+ .name = "packet-size-based-scheduler",
+ .description = "scheduler which will distribute crypto op "
+ "burst based on the packet size",
+ .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ .ops = &scheduler_ps_ops
+};
+
+struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
new file mode 100644
index 00000000..0b63c20b
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -0,0 +1,456 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_reorder.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+struct scheduler_init_params {
+ struct rte_crypto_vdev_init_params def_p;
+ uint32_t nb_slaves;
+ enum rte_cryptodev_scheduler_mode mode;
+ uint32_t enable_ordering;
+ char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+ [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+};
+
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
+#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
+#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+
+const char *scheduler_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
+ RTE_CRYPTODEV_VDEV_SLAVE,
+ RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_ORDERING,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID
+};
+
+struct scheduler_parse_map {
+ const char *name;
+ uint32_t val;
+};
+
+const struct scheduler_parse_map scheduler_mode_map[] = {
+ {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
+ CDEV_SCHED_MODE_ROUNDROBIN},
+ {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR},
+ {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
+ CDEV_SCHED_MODE_FAILOVER}
+};
+
+const struct scheduler_parse_map scheduler_ordering_map[] = {
+ {"enable", 1},
+ {"disable", 0}
+};
+
+static int
+cryptodev_scheduler_create(const char *name,
+ struct scheduler_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i;
+ int ret;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name,
+ sizeof(struct scheduler_ctx),
+ init_params->def_p.socket_id);
+ if (dev == NULL) {
+ CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+ name);
+ return -EFAULT;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ dev->dev_ops = rte_crypto_scheduler_pmd_ops;
+
+ sched_ctx = dev->data->dev_private;
+ sched_ctx->max_nb_queue_pairs =
+ init_params->def_p.max_nb_queue_pairs;
+
+ if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
+ init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
+ init_params->mode);
+ if (ret < 0) {
+ rte_cryptodev_pmd_release_device(dev);
+ return ret;
+ }
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (scheduler_mode_map[i].val != sched_ctx->mode)
+ continue;
+
+ RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
+ scheduler_mode_map[i].name);
+ break;
+ }
+ }
+
+ sched_ctx->reordering_enabled = init_params->enable_ordering;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (scheduler_ordering_map[i].val !=
+ sched_ctx->reordering_enabled)
+ continue;
+
+ RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
+ scheduler_ordering_map[i].name);
+
+ break;
+ }
+
+ for (i = 0; i < init_params->nb_slaves; i++) {
+ sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+ rte_zmalloc_socket(
+ NULL,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
+ SOCKET_ID_ANY);
+
+ if (!sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves]) {
+ CS_LOG_ERR("driver %s: Insufficient memory",
+ name);
+ return -ENOMEM;
+ }
+
+ strncpy(sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves],
+ init_params->slave_names[i],
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ sched_ctx->nb_init_slaves++;
+ }
+
+ /*
+ * Initialize capabilities structure as an empty structure,
+ * in case device information is requested when no slaves are attached
+ */
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities),
+ 0, SOCKET_ID_ANY);
+
+ if (!sched_ctx->capabilities) {
+ RTE_LOG(ERR, PMD, "Not enough memory for capability "
+ "information\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -EINVAL;
+
+ sched_ctx = dev->data->dev_private;
+
+ if (sched_ctx->nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
+ sched_ctx->slaves[i].dev_id);
+ }
+
+ RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
+ "socket %u\n", name, rte_socket_id());
+
+ return 0;
+}
+
+static uint8_t
+number_of_sockets(void)
+{
+ int sockets = 0;
+ int i;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
+ if (sockets < ms[i].socket_id)
+ sockets = ms[i].socket_id;
+ }
+
+ /* Number of sockets = maximum socket_id + 1 */
+ return ++sockets;
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CS_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_crypto_vdev_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** Parse slave */
+static int
+parse_slave_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
+ CS_LOG_ERR("Too many slaves.\n");
+ return -ENOMEM;
+ }
+
+ strncpy(param->slave_names[param->nb_slaves++], value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ return 0;
+}
+
+static int
+parse_mode_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (strcmp(value, scheduler_mode_map[i].name) == 0) {
+ param->mode = (enum rte_cryptodev_scheduler_mode)
+ scheduler_mode_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_mode_map)) {
+ CS_LOG_ERR("Unrecognized input.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_ordering_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (strcmp(value, scheduler_ordering_map[i].name) == 0) {
+ param->enable_ordering =
+ scheduler_ordering_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_ordering_map)) {
+ CS_LOG_ERR("Unrecognized input.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_parse_init_params(struct scheduler_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ scheduler_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
+ &parse_slave_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE,
+ &parse_mode_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
+ &parse_ordering_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ if (params->def_p.socket_id >= number_of_sockets()) {
+ CDEV_LOG_ERR("Invalid socket id specified to create "
+ "the virtual crypto device on");
+ goto free_kvlist;
+ }
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
+{
+ struct scheduler_init_params init_params = {
+ .def_p = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ ""
+ },
+ .nb_slaves = 0,
+ .mode = CDEV_SCHED_MODE_NOT_SET,
+ .enable_ordering = 0,
+ .slave_names = { {0} }
+ };
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ scheduler_parse_init_params(&init_params,
+ rte_vdev_device_args(vdev));
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
+ name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.def_p.max_nb_sessions);
+ if (init_params.def_p.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.def_p.name);
+
+ return cryptodev_scheduler_create(name,
+ &init_params);
+}
+
+static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
+ .probe = cryptodev_scheduler_probe,
+ .remove = cryptodev_scheduler_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
+ cryptodev_scheduler_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int> "
+ "slave=<name>");
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
new file mode 100644
index 00000000..2b5858df
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -0,0 +1,571 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_reorder.h>
+
+#include "scheduler_pmd_private.h"
+
+/** attaching the slaves predefined by scheduler's EAL options */
+static int
+scheduler_attach_init_slave(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t scheduler_id = dev->data->dev_id;
+ int i;
+
+ for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
+ const char *dev_name = sched_ctx->init_slave_names[i];
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_named_dev(dev_name);
+ int status;
+
+ if (!slave_dev) {
+ CS_LOG_ERR("Failed to locate slave dev %s",
+ dev_name);
+ return -EINVAL;
+ }
+
+ status = rte_cryptodev_scheduler_slave_attach(
+ scheduler_id, slave_dev->data->dev_id);
+
+ if (status < 0) {
+ CS_LOG_ERR("Failed to attach slave cryptodev %u",
+ slave_dev->data->dev_id);
+ return status;
+ }
+
+ CS_LOG_INFO("Scheduler %s attached slave %s\n",
+ dev->data->name,
+ sched_ctx->init_slave_names[i]);
+
+ rte_free(sched_ctx->init_slave_names[i]);
+
+ sched_ctx->nb_init_slaves -= 1;
+ }
+
+ return 0;
+}
+/** Configure device */
+static int
+scheduler_pmd_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_configure(slave_dev_id, config);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (sched_ctx->reordering_enabled) {
+ char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t buff_size = rte_align32pow2(
+ sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (!buff_size)
+ return 0;
+
+ if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+ dev->data->dev_id, qp_id) < 0) {
+ CS_LOG_ERR("failed to create unique reorder buffer "
+ "name");
+ return -ENOMEM;
+ }
+
+ qp_ctx->order_ring = rte_ring_create(order_ring_name,
+ buff_size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qp_ctx->order_ring) {
+ CS_LOG_ERR("failed to create order ring");
+ return -ENOMEM;
+ }
+ } else {
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+ }
+
+ return 0;
+}
+
+/** Start device */
+static int
+scheduler_pmd_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = update_order_ring(dev, i);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to update reorder buffer");
+ return ret;
+ }
+ }
+
+ if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
+ CS_LOG_ERR("Scheduler mode is not set");
+ return -1;
+ }
+
+ if (!sched_ctx->nb_slaves) {
+ CS_LOG_ERR("No slave in the scheduler");
+ return -1;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
+ CS_LOG_ERR("Failed to attach slave");
+ return -ENOTSUP;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
+
+ if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
+ CS_LOG_ERR("Scheduler start failed");
+ return -1;
+ }
+
+ /* start all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to start slave dev %u",
+ slave_dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Stop device */
+static void
+scheduler_pmd_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ if (!dev->data->dev_started)
+ return;
+
+ /* stop all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->dev_stop)(slave_dev);
+ }
+
+ if (*sched_ctx->ops.scheduler_stop)
+ (*sched_ctx->ops.scheduler_stop)(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if (*sched_ctx->ops.slave_detach)
+ (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+ }
+}
+
+/** Close device */
+static int
+scheduler_pmd_close(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* the dev should be stopped before being closed */
+ if (dev->data->dev_started)
+ return -EBUSY;
+
+ /* close all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (qp_ctx->private_qp_ctx) {
+ rte_free(qp_ctx->private_qp_ctx);
+ qp_ctx->private_qp_ctx = NULL;
+ }
+ }
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ if (sched_ctx->capabilities)
+ rte_free(sched_ctx->capabilities);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+scheduler_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+ struct rte_cryptodev_stats slave_stats = {0};
+
+ (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+
+ stats->enqueued_count += slave_stats.enqueued_count;
+ stats->dequeued_count += slave_stats.dequeued_count;
+
+ stats->enqueue_err_count += slave_stats.enqueue_err_count;
+ stats->dequeue_err_count += slave_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->stats_reset)(slave_dev);
+ }
+}
+
+/** Get device info */
+static void
+scheduler_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
+ UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
+ uint32_t i;
+
+ if (!dev_info)
+ return;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ scheduler_attach_init_slave(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev_info slave_info;
+
+ rte_cryptodev_info_get(slave_dev_id, &slave_info);
+ max_nb_sessions = slave_info.sym.max_nb_sessions <
+ max_nb_sessions ?
+ slave_info.sym.max_nb_sessions :
+ max_nb_sessions;
+ }
+
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = sched_ctx->capabilities;
+ dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = max_nb_sessions;
+}
+
+/** Release queue pair */
+static int
+scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (!qp_ctx)
+ return 0;
+
+ if (qp_ctx->order_ring)
+ rte_ring_free(qp_ctx->order_ring);
+ if (qp_ctx->private_qp_ctx)
+ rte_free(qp_ctx->private_qp_ctx);
+
+ rte_free(qp_ctx);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t i;
+ int ret;
+
+ if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "CRYTO_SCHE PMD %u QP %u",
+ dev->data->dev_id, qp_id) < 0) {
+ CS_LOG_ERR("Failed to create unique queue pair name");
+ return -EFAULT;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ scheduler_pmd_qp_release(dev, qp_id);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+ qp_conf, socket_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (qp_ctx == NULL)
+ return -ENOMEM;
+
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
+ dev->data->queue_pairs[qp_id] = qp_ctx;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to attach slave");
+ scheduler_pmd_qp_release(dev, qp_id);
+ return ret;
+ }
+
+ if (*sched_ctx->ops.config_queue_pair) {
+ if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
+ CS_LOG_ERR("Unable to configure queue pair");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** Start queue pair */
+static int
+scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+scheduler_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static uint32_t
+scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct scheduler_session);
+}
+
+static int
+config_slave_sess(struct scheduler_ctx *sched_ctx,
+ struct rte_crypto_sym_xform *xform,
+ struct scheduler_session *sess,
+ uint32_t create)
+{
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ if (sess->sessions[i]) {
+ if (create)
+ continue;
+ /* !create */
+ sess->sessions[i] = rte_cryptodev_sym_session_free(
+ slave->dev_id, sess->sessions[i]);
+ } else {
+ if (!create)
+ continue;
+ /* create */
+ sess->sessions[i] =
+ rte_cryptodev_sym_session_create(
+ slave->dev_id, xform);
+ if (!sess->sessions[i]) {
+ config_slave_sess(sched_ctx, NULL, sess, 0);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+ void *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+ config_slave_sess(sched_ctx, NULL, sess, 0);
+
+ memset(sess, 0, sizeof(struct scheduler_session));
+}
+
+static void *
+scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+ if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
+ CS_LOG_ERR("unabled to config sym session");
+ return NULL;
+ }
+
+ return sess;
+}
+
+struct rte_cryptodev_ops scheduler_pmd_ops = {
+ .dev_configure = scheduler_pmd_config,
+ .dev_start = scheduler_pmd_start,
+ .dev_stop = scheduler_pmd_stop,
+ .dev_close = scheduler_pmd_close,
+
+ .stats_get = scheduler_pmd_stats_get,
+ .stats_reset = scheduler_pmd_stats_reset,
+
+ .dev_infos_get = scheduler_pmd_info_get,
+
+ .queue_pair_setup = scheduler_pmd_qp_setup,
+ .queue_pair_release = scheduler_pmd_qp_release,
+ .queue_pair_start = scheduler_pmd_qp_start,
+ .queue_pair_stop = scheduler_pmd_qp_stop,
+ .queue_pair_count = scheduler_pmd_qp_count,
+
+ .session_get_size = scheduler_pmd_session_get_size,
+ .session_configure = scheduler_pmd_session_configure,
+ .session_clear = scheduler_pmd_session_clear,
+};
+
+struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
new file mode 100644
index 00000000..421dae37
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -0,0 +1,156 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCHEDULER_PMD_PRIVATE_H
+#define _SCHEDULER_PMD_PRIVATE_H
+
+#include "rte_cryptodev_scheduler.h"
+
+#define PER_SLAVE_BUFF_SIZE (256)
+
+#define CS_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
+#define CS_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CS_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CS_LOG_INFO(fmt, args...)
+#define CS_LOG_DBG(fmt, args...)
+#endif
+
+struct scheduler_slave {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint32_t nb_inflight_cops;
+
+ enum rte_cryptodev_type dev_type;
+};
+
+struct scheduler_ctx {
+ void *private_ctx;
+ /**< private scheduler context pointer */
+
+ struct rte_cryptodev_capabilities *capabilities;
+ uint32_t nb_capabilities;
+
+ uint32_t max_nb_queue_pairs;
+
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ enum rte_cryptodev_scheduler_mode mode;
+
+ struct rte_cryptodev_scheduler_ops ops;
+
+ uint8_t reordering_enabled;
+
+ char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+
+ char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ int nb_init_slaves;
+} __rte_cache_aligned;
+
+struct scheduler_qp_ctx {
+ void *private_qp_ctx;
+
+ uint32_t max_nb_objs;
+
+ struct rte_ring *order_ring;
+ uint32_t seqn;
+} __rte_cache_aligned;
+
+struct scheduler_session {
+ struct rte_cryptodev_sym_session *sessions[
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+};
+
+static inline uint16_t __attribute__((always_inline))
+get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
+{
+ uint32_t count = rte_ring_free_count(order_ring);
+
+ return count > nb_ops ? nb_ops : count;
+}
+
+static inline void __attribute__((always_inline))
+scheduler_order_insert(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
+}
+
+#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
+ struct rte_crypto_op **ring = (void *)&order_ring[1]; \
+ op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
+} while (0)
+
+static inline uint16_t __attribute__((always_inline))
+scheduler_order_drain(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ break;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq)
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+
+ return nb_ops_deqd;
+}
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+
+#endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
new file mode 100644
index 00000000..01162764
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -0,0 +1,281 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+struct rr_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_slave_idx;
+ uint32_t last_deq_slave_idx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+ struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->session);
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ slave->nb_inflight_cops += processed_ops;
+
+ rr_qp_ctx->last_enq_slave_idx += 1;
+ rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ /* recover session if enqueue is failed */
+ if (unlikely(processed_ops < nb_ops)) {
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+ }
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slave;
+ uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+ uint16_t nb_deq_ops;
+
+ if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+ do {
+ last_slave_idx += 1;
+
+ if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+ last_slave_idx = 0;
+ /* looped back, means no inflight cops in the queue */
+ if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+ return 0;
+ } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+ == 0);
+ }
+
+ slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ last_slave_idx += 1;
+ last_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+
+ slave->nb_inflight_cops -= nb_deq_ops;
+
+ return nb_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ rr_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ rr_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ rr_qp_ctx->last_enq_slave_idx = 0;
+ rr_qp_ctx->last_deq_slave_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx;
+
+ rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
+ rte_socket_id());
+ if (!rr_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler scheduler = {
+ .name = "roundrobin-scheduler",
+ .description = "scheduler which will round robin burst across "
+ "slave crypto devices",
+ .mode = CDEV_SCHED_MODE_ROUNDROBIN,
+ .ops = &scheduler_rr_ops
+};
+
+struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
index bea6760b..ecee80df 100644
--- a/drivers/crypto/snow3g/Makefile
+++ b/drivers/crypto/snow3g/Makefile
@@ -59,11 +59,4 @@ LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 0081fec5..960956ca 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -46,27 +46,6 @@
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
-/**
- * Global static parameter used to create a unique name for each SNOW 3G
- * crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
/** Get xform chain order. */
static enum snow3g_operation
snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -330,6 +309,21 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
unsigned i;
unsigned enqueued_ops, processed_ops;
+#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+ for (i = 0; i < num_ops; i++) {
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return 0;
+ }
+ }
+#endif
+
switch (session->op) {
case SNOW3G_OP_ONLY_CIPHER:
processed_ops = process_snow3g_cipher_op(ops,
@@ -369,7 +363,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
}
enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;
@@ -420,7 +414,7 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
}
enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)&op, processed_op);
+ (void **)&op, processed_op, NULL);
qp->qp_stats.enqueued_count += enqueued_op;
*accumulated_enqueued_ops += enqueued_op;
@@ -539,23 +533,27 @@ snow3g_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
-static int cryptodev_snow3g_remove(const char *name);
+static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
static int
cryptodev_snow3g_create(const char *name,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct snow3g_private *internals;
uint64_t cpu_flags = 0;
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
@@ -564,15 +562,7 @@ cryptodev_snow3g_create(const char *name,
return -EFAULT;
}
-
- /* Create a unique device name. */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- SNOW3G_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct snow3g_private), init_params->socket_id);
if (dev == NULL) {
SNOW3G_LOG_ERR("failed to create cryptodev vdev");
@@ -597,37 +587,51 @@ cryptodev_snow3g_create(const char *name,
return 0;
init_error:
- SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", name);
+ SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
+ init_params->name);
- cryptodev_snow3g_remove(crypto_dev_name);
+ cryptodev_snow3g_remove(vdev);
return -EFAULT;
}
static int
-cryptodev_snow3g_probe(const char *name,
- const char *input_args)
+cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_snow3g_create(name, &init_params);
+ return cryptodev_snow3g_create(name, vdev, &init_params);
}
static int
-cryptodev_snow3g_remove(const char *name)
+cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 4602dfd4..7ce96be9 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -89,7 +89,8 @@ static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
/** Configure device */
static int
-snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev)
+snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -198,7 +199,7 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
SNOW3G_LOG_INFO("Reusing existing ring %s"
" for processed packets",
qp->name);
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
index b15eb0f6..f543b407 100644
--- a/drivers/crypto/zuc/Makefile
+++ b/drivers/crypto/zuc/Makefile
@@ -59,11 +59,4 @@ LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_cryptodev
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index 7057fcac..1020544b 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -45,27 +45,6 @@
#define ZUC_MAX_BURST 8
#define BYTE_LEN 8
-/**
- * Global static parameter used to create a unique name for each ZUC
- * crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_ZUC_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
/** Get xform chain order. */
static enum zuc_operation
zuc_get_mode(const struct rte_crypto_sym_xform *xform)
@@ -213,6 +192,19 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
break;
}
+#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
dst[i] = ops[i]->sym->m_dst ?
@@ -347,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
}
enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
- (void **)ops, processed_ops);
+ (void **)ops, processed_ops, NULL);
qp->qp_stats.enqueued_count += enqueued_ops;
*accumulated_enqueued_ops += enqueued_ops;
@@ -441,23 +433,27 @@ zuc_pmd_dequeue_burst(void *queue_pair,
unsigned nb_dequeued;
nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
- (void **)c_ops, nb_ops);
+ (void **)c_ops, nb_ops, NULL);
qp->qp_stats.dequeued_count += nb_dequeued;
return nb_dequeued;
}
-static int cryptodev_zuc_remove(const char *name);
+static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
static int
cryptodev_zuc_create(const char *name,
+ struct rte_vdev_device *vdev,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct zuc_private *internals;
uint64_t cpu_flags = 0;
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
@@ -466,15 +462,7 @@ cryptodev_zuc_create(const char *name,
return -EFAULT;
}
-
- /* Create a unique device name. */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- ZUC_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct zuc_private), init_params->socket_id);
if (dev == NULL) {
ZUC_LOG_ERR("failed to create cryptodev vdev");
@@ -499,37 +487,51 @@ cryptodev_zuc_create(const char *name,
return 0;
init_error:
- ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed", name);
+ ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed",
+ init_params->name);
- cryptodev_zuc_remove(crypto_dev_name);
+ cryptodev_zuc_remove(vdev);
return -EFAULT;
}
static int
-cryptodev_zuc_probe(const char *name,
- const char *input_args)
+cryptodev_zuc_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return cryptodev_zuc_create(name, &init_params);
+ return cryptodev_zuc_create(name, vdev, &init_params);
}
static int
-cryptodev_zuc_remove(const char *name)
+cryptodev_zuc_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 2c886d51..e793459c 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -89,7 +89,8 @@ static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
/** Configure device */
static int
-zuc_pmd_config(__rte_unused struct rte_cryptodev *dev)
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
{
return 0;
}
@@ -198,7 +199,7 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
- if (r->prod.size >= ring_size) {
+ if (rte_ring_get_size(r) >= ring_size) {
ZUC_LOG_INFO("Reusing existing ring %s"
" for processed packets",
qp->name);
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
new file mode 100644
index 00000000..1cf389e8
--- /dev/null
+++ b/drivers/event/Makefile
@@ -0,0 +1,43 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_eventdev
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
+DEPDIRS-skeleton = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
+DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+DEPDIRS-octeontx = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
new file mode 100644
index 00000000..aca3d095
--- /dev/null
+++ b/drivers/event/octeontx/Makefile
@@ -0,0 +1,70 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx_ssovf.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_mbox.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ssovf_worker.o += -Ofast
+else
+CFLAGS_ssovf_worker.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_ssovf_worker.o += -Ofast
+endif
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)-include := rte_pmd_octeontx_ssovf.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
new file mode 100644
index 00000000..3da7cfdd
--- /dev/null
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -0,0 +1,61 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTE_PMD_OCTEONTX_SSOVF_H__
+#define __RTE_PMD_OCTEONTX_SSOVF_H__
+
+#include <rte_common.h>
+
+struct octeontx_ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum octeontx_ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
+void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
+int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __RTE_PMD_OCTEONTX_SSOVF_H__ */
diff --git a/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
new file mode 100644
index 00000000..3810a03f
--- /dev/null
+++ b/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
@@ -0,0 +1,9 @@
+DPDK_17.05 {
+ global:
+
+ octeontx_ssovf_info;
+ octeontx_ssovf_bar;
+ octeontx_ssovf_mbox_send;
+
+ local: *;
+};
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
new file mode 100644
index 00000000..c80a4437
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -0,0 +1,580 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_vdev.h>
+
+#include "ssovf_evdev.h"
+
+/* SSOPF Mailbox messages */
+
+struct ssovf_mbox_dev_info {
+ uint64_t min_deq_timeout_ns;
+ uint64_t max_deq_timeout_ns;
+ uint32_t max_num_events;
+};
+
+static int
+ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct ssovf_mbox_dev_info);
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GET_DEV_INFO;
+ hdr.vfid = 0;
+
+ memset(info, 0, len);
+ return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+struct ssovf_mbox_getwork_wait {
+ uint64_t wait_ns;
+};
+
+static int
+ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_getwork_wait tmo_set;
+ uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_SET_GETWORK_WAIT;
+ hdr.vfid = 0;
+
+ tmo_set.wait_ns = timeout_ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set getwork timeout(%d)", ret);
+
+ return ret;
+}
+
+struct ssovf_mbox_grp_pri {
+ uint8_t wgt_left; /* Read only */
+ uint8_t weight;
+ uint8_t affinity;
+ uint8_t priority;
+};
+
+static int
+ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_grp_pri grp;
+ uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GRP_SET_PRIORITY;
+ hdr.vfid = queue;
+
+ grp.weight = 0xff;
+ grp.affinity = 0xff;
+ grp.priority = prio / 32; /* Normalize to 0 to 7 */
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
+
+ return ret;
+}
+
+struct ssovf_mbox_convert_ns_getworks_iter {
+ uint64_t wait_ns;
+ uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
+};
+
+static int
+ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
+ uint16_t len = sizeof(ns2iter);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
+ hdr.vfid = 0;
+
+ memset(&ns2iter, 0, len);
+ ns2iter.wait_ns = ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ if (ret < 0 || (ret != len)) {
+ ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
+ return -EIO;
+ }
+
+ *tmo_ticks = ns2iter.getwork_iter;
+ return 0;
+}
+
+static void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->schedule = NULL;
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->dequeue = ssows_deq;
+ dev->dequeue_burst = ssows_deq_burst;
+
+ if (edev->is_timeout_deq) {
+ dev->dequeue = ssows_deq_timeout;
+ dev->dequeue_burst = ssows_deq_timeout_burst;
+ }
+}
+
+static void
+ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
+ dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
+ dev_info->max_event_queues = edev->max_event_queues;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 1;
+ dev_info->max_event_ports = edev->max_event_ports;
+ dev_info->max_event_port_dequeue_depth = 1;
+ dev_info->max_event_port_enqueue_depth = 1;
+ dev_info->max_num_events = edev->max_num_events;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+}
+
+static int
+ssovf_configure(const struct rte_eventdev *dev)
+{
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint64_t deq_tmo_ns;
+
+ ssovf_func_trace();
+ deq_tmo_ns = conf->dequeue_timeout_ns;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ edev->is_timeout_deq = 1;
+ deq_tmo_ns = edev->min_deq_timeout_ns;
+ }
+ edev->nb_event_queues = conf->nb_event_queues;
+ edev->nb_event_ports = conf->nb_event_ports;
+
+ return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
+}
+
+static void
+ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
+
+ return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
+}
+
+static void
+ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ RTE_SET_USED(port_id);
+ port_conf->new_event_threshold = edev->max_num_events;
+ port_conf->dequeue_depth = 1;
+ port_conf->enqueue_depth = 1;
+}
+
+static void
+ssovf_port_release(void *port)
+{
+ rte_free(port);
+}
+
+static int
+ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct ssows *ws;
+ uint32_t reg_off;
+ uint8_t q;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ ssovf_func_trace("port=%d", port_id);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ ssovf_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("eventdev ssows",
+ sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (ws == NULL) {
+ ssovf_log_err("Failed to alloc memory for port=%d", port_id);
+ return -ENOMEM;
+ }
+
+ ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ if (ws->base == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get hws base addr port=%d", port_id);
+ return -EINVAL;
+ }
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
+ reg_off |= 1 << 16; /* Wait */
+ ws->getwork = ws->base + reg_off;
+ ws->port = port_id;
+
+ for (q = 0; q < edev->nb_event_queues; q++) {
+ ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ if (ws->grps[q] == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get grp%d base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->ports[port_id] = ws;
+ ssovf_log_dbg("port=%d ws=%p", port_id, ws);
+ return 0;
+}
+
+static int
+ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links)
+{
+ uint16_t link;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ for (link = 0; link < nb_links; link++) {
+ val = queues[link];
+ val |= (1ULL << 24); /* Set membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_links;
+}
+
+static int
+ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ uint16_t unlink;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
+ RTE_SET_USED(dev);
+
+ for (unlink = 0; unlink < nb_unlinks; unlink++) {
+ val = queues[unlink];
+ val &= ~(1ULL << 24); /* Clear membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_unlinks;
+}
+
+static int
+ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
+{
+ RTE_SET_USED(dev);
+
+ return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
+}
+
+static void
+ssows_dump(struct ssows *ws, FILE *f)
+{
+ uint8_t *base = ws->base;
+ uint64_t val;
+
+ fprintf(f, "\t---------------port%d---------------\n", ws->port);
+ val = ssovf_read64(base + SSOW_VHWS_TAG);
+ fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
+ (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_WQP);
+ fprintf(f, "\twqp=0x%"PRIx64"\n", val);
+
+ val = ssovf_read64(base + SSOW_VHWS_LINKS);
+ fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
+ (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
+ (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
+ (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
+ fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
+ (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
+ fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
+}
+
+static void
+ssovf_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t port;
+
+ /* Dump SSOWVF debug registers */
+ for (port = 0; port < edev->nb_event_ports; port++)
+ ssows_dump(dev->data->ports[port], f);
+}
+
+static int
+ssovf_start(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(1, base); /* Enable SSO group */
+ }
+
+ ssovf_fastpath_fns_set(dev);
+ return 0;
+}
+
+static void
+ssovf_stop(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(0, base); /* Disable SSO group */
+ }
+}
+
+static int
+ssovf_close(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t i;
+
+ for (i = 0; i < edev->nb_event_queues; i++)
+ all_queues[i] = i;
+
+ for (i = 0; i < edev->nb_event_ports; i++)
+ ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
+ edev->nb_event_queues);
+ return 0;
+}
+
+/* Initialize and register event driver with DPDK Application */
+static const struct rte_eventdev_ops ssovf_ops = {
+ .dev_infos_get = ssovf_info_get,
+ .dev_configure = ssovf_configure,
+ .queue_def_conf = ssovf_queue_def_conf,
+ .queue_setup = ssovf_queue_setup,
+ .queue_release = ssovf_queue_release,
+ .port_def_conf = ssovf_port_def_conf,
+ .port_setup = ssovf_port_setup,
+ .port_release = ssovf_port_release,
+ .port_link = ssovf_port_link,
+ .port_unlink = ssovf_port_unlink,
+ .timeout_ticks = ssovf_timeout_ticks,
+ .dump = ssovf_dump,
+ .dev_start = ssovf_start,
+ .dev_stop = ssovf_stop,
+ .dev_close = ssovf_close
+};
+
+static int
+ssovf_vdev_probe(struct rte_vdev_device *vdev)
+{
+ struct octeontx_ssovf_info oinfo;
+ struct ssovf_mbox_dev_info info;
+ struct ssovf_evdev *edev;
+ struct rte_eventdev *eventdev;
+ static int ssovf_init_once;
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (ssovf_init_once) {
+ ssovf_log_err("Request to create >1 %s instance", name);
+ return -EINVAL;
+ }
+
+ eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ ssovf_log_err("Failed to create eventdev vdev %s", name);
+ return -ENOMEM;
+ }
+ eventdev->dev_ops = &ssovf_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ssovf_fastpath_fns_set(eventdev);
+ return 0;
+ }
+
+ ret = octeontx_ssovf_info(&oinfo);
+ if (ret) {
+ ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
+ goto error;
+ }
+
+ edev = ssovf_pmd_priv(eventdev);
+ edev->max_event_ports = oinfo.total_ssowvfs;
+ edev->max_event_queues = oinfo.total_ssovfs;
+ edev->is_timeout_deq = 0;
+
+ ret = ssovf_mbox_dev_info(&info);
+ if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
+ ssovf_log_err("Failed to get mbox devinfo %d", ret);
+ goto error;
+ }
+
+ edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
+ edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
+ edev->max_num_events = info.max_num_events;
+ ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
+ info.min_deq_timeout_ns, info.max_deq_timeout_ns,
+ info.max_num_events);
+
+ if (!edev->max_event_ports || !edev->max_event_queues) {
+ ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
+ edev->max_event_queues, edev->max_event_ports);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
+ name, oinfo.domain, edev->max_event_queues,
+ edev->max_event_ports);
+
+ ssovf_init_once = 1;
+ return 0;
+
+error:
+ rte_event_pmd_vdev_uninit(name);
+ return ret;
+}
+
+static int
+ssovf_vdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ ssovf_log_info("Closing %s", name);
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_ssovf_pmd = {
+ .probe = ssovf_vdev_probe,
+ .remove = ssovf_vdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
new file mode 100644
index 00000000..6e0a3521
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -0,0 +1,203 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SSOVF_EVDEV_H__
+#define __SSOVF_EVDEV_H__
+
+#include <rte_config.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_io.h>
+
+#include "rte_pmd_octeontx_ssovf.h"
+
+#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
+
+#ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
+#define ssovf_log_info(fmt, args...) \
+ RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#else
+#define ssovf_log_info(fmt, args...)
+#define ssovf_log_dbg(fmt, args...)
+#endif
+
+#define ssovf_func_trace ssovf_log_dbg
+#define ssovf_log_err(fmt, args...) \
+ RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+/* SSO VF register offsets */
+#define SSO_VHGRP_QCTL (0x010ULL)
+#define SSO_VHGRP_INT (0x100ULL)
+#define SSO_VHGRP_INT_W1S (0x108ULL)
+#define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
+#define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
+#define SSO_VHGRP_INT_THR (0x140ULL)
+#define SSO_VHGRP_INT_CNT (0x180ULL)
+#define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
+#define SSO_VHGRP_AQ_CNT (0x1C0ULL)
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+/* BAR2 */
+#define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
+#define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
+
+/* SSOW VF register offsets (BAR0) */
+#define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
+#define SSOW_VHWS_TAG (0x300ULL)
+#define SSOW_VHWS_WQP (0x308ULL)
+#define SSOW_VHWS_LINKS (0x310ULL)
+#define SSOW_VHWS_PENDTAG (0x340ULL)
+#define SSOW_VHWS_PENDWQP (0x348ULL)
+#define SSOW_VHWS_SWTP (0x400ULL)
+#define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
+#define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
+#define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
+#define SSOW_VHWS_OP_DESCHED (0x860ULL)
+#define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
+#define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
+#define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
+#define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
+#define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
+#define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
+#define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
+#define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
+#define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
+#define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
+
+#define SSOW_BAR4_LEN (64 * 1024)
+
+/* Mailbox message constants */
+#define SSO_COPROC 0x2
+
+#define SSO_GETDOMAINCFG 0x1
+#define SSO_IDENTIFY 0x2
+#define SSO_GET_DEV_INFO 0x3
+#define SSO_GET_GETWORK_WAIT 0x4
+#define SSO_SET_GETWORK_WAIT 0x5
+#define SSO_CONVERT_NS_GETWORK_ITER 0x6
+#define SSO_GRP_GET_PRIORITY 0x7
+#define SSO_GRP_SET_PRIORITY 0x8
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define ssovf_read64 rte_read64_relaxed
+#define ssovf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define ssovf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define ssovf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define ssovf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define ssovf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+
+struct ssovf_evdev {
+ uint8_t max_event_queues;
+ uint8_t max_event_ports;
+ uint8_t is_timeout_deq;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint32_t min_deq_timeout_ns;
+ uint32_t max_deq_timeout_ns;
+ int32_t max_num_events;
+} __rte_cache_aligned;
+
+/* Event port aka HWS */
+struct ssows {
+ uint8_t cur_tt;
+ uint8_t cur_grp;
+ uint8_t swtag_req;
+ uint8_t *base;
+ uint8_t *getwork;
+ uint8_t *grps[SSO_MAX_VHGRP];
+ uint8_t port;
+} __rte_cache_aligned;
+
+static inline struct ssovf_evdev *
+ssovf_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t ssows_enq(void *port, const struct rte_event *ev);
+uint16_t ssows_enq_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
+uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+void ssows_reset(struct ssows *ws);
+
+#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_mbox.c b/drivers/event/octeontx/ssovf_mbox.c
new file mode 100644
index 00000000..7394a3a9
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_mbox.c
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "ssovf_evdev.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ memcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_io_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ memcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ ssovf_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+static inline int
+mbox_setup(struct mbox *m)
+{
+ if (unlikely(m->init_once == 0)) {
+ rte_spinlock_init(&m->lock);
+ m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ m->reg += SSO_VHGRP_PF_MBOX(1);
+
+ if (m->ram_mbox_base == NULL || m->reg == NULL) {
+ ssovf_log_err("Invalid ram_mbox_base=%p or reg=%p",
+ m->ram_mbox_base, m->reg);
+ return -EINVAL;
+ }
+ m->init_once = 1;
+ }
+ return 0;
+}
+
+int
+octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
diff --git a/drivers/event/octeontx/ssovf_probe.c b/drivers/event/octeontx/ssovf_probe.c
new file mode 100644
index 00000000..b644ebde
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -0,0 +1,288 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+
+#include "ssovf_evdev.h"
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ ssovf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ ssovf_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ ssovf_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ ssovf_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ ssovf_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
new file mode 100644
index 00000000..ad3fe684
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -0,0 +1,252 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ssovf_worker.h"
+
+static force_inline void
+ssows_new_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t grp = ev->queue_id;
+
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+ const uint32_t tag = (uint32_t)ev->event;
+ /*
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED full full NOOP
+ */
+ if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
+ if (new_tt != SSO_SYNC_UNTAGGED) {
+ ssows_swtag_full(ws, ev->u64, tag,
+ new_tt, grp);
+ }
+ } else {
+ if (likely(new_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_norm(ws, tag, new_tt);
+ else
+ ssows_swtag_untag(ws);
+ }
+ ws->swtag_req = 1;
+}
+
+#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
+
+static force_inline void
+ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+
+ if (cur_tt == SSO_SYNC_ORDERED) {
+ /* Create unique tag based on custom event type and new grp */
+ uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
+
+ newtag |= grp << 20;
+ newtag |= tag;
+ ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
+ rte_smp_wmb();
+ ssows_swtag_wait(ws);
+ } else {
+ rte_smp_wmb();
+ }
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (ws->cur_grp == grp)
+ ssows_fwd_swtag(ws, ev, grp);
+ else
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ ssows_fwd_group(ws, ev, grp);
+}
+
+static force_inline void
+ssows_release_event(struct ssows *ws)
+{
+ if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_untag(ws);
+}
+
+force_inline uint16_t __hot
+ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+
+ RTE_SET_USED(timeout_ticks);
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ return 1;
+ } else {
+ return ssows_get_work(ws, ev);
+ }
+}
+
+force_inline uint16_t __hot
+ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+ uint64_t iter;
+ uint16_t ret = 1;
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ } else {
+ ret = ssows_get_work(ws, ev);
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+ ret = ssows_get_work(ws, ev);
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq(port, ev, timeout_ticks);
+}
+
+uint16_t __hot
+ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq_timeout(port, ev, timeout_ticks);
+}
+
+force_inline uint16_t __hot
+ssows_enq(void *port, const struct rte_event *ev)
+{
+ struct ssows *ws = port;
+ uint16_t ret = 1;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ ssows_new_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_FORWARD:
+ ssows_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ ssows_release_event(ws);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return ssows_enq(port, ev);
+}
+
+void
+ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+{
+ uint32_t reg_off;
+ uint64_t aq_cnt = 1;
+ uint64_t cq_ds_cnt = 1;
+ uint64_t enable, get_work0, get_work1;
+ uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+
+ enable = ssovf_read64(base + SSO_VHGRP_QCTL);
+ if (!enable)
+ return;
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 17; /* Grouped */
+ reg_off |= 1 << 16; /* WAIT */
+ reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
+ while (aq_cnt || cq_ds_cnt) {
+ aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
+ cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x1FFF1FFF0000;
+ ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+ }
+
+ RTE_SET_USED(get_work0);
+ RTE_SET_USED(get_work1);
+}
+
+void
+ssows_reset(struct ssows *ws)
+{
+ uint64_t tag;
+ uint64_t pend_tag;
+ uint8_t pend_tt;
+ uint8_t tt;
+
+ tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
+ pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
+
+ if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
+ pend_tt = (pend_tag >> 32) & 0x3;
+ if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
+ ssows_desched(ws);
+ } else {
+ tt = (tag >> 32) & 0x3;
+ if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
+ ssows_swtag_untag(ws);
+ }
+}
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
new file mode 100644
index 00000000..300dfae8
--- /dev/null
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -0,0 +1,139 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <rte_common.h>
+
+#include "ssovf_evdev.h"
+
+enum {
+ SSO_SYNC_ORDERED,
+ SSO_SYNC_ATOMIC,
+ SSO_SYNC_UNTAGGED,
+ SSO_SYNC_EMPTY
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* SSO Operations */
+
+static force_inline uint16_t
+ssows_get_work(struct ssows *ws, struct rte_event *ev)
+{
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+
+ ssovf_load_pair(get_work0, get_work1, ws->getwork);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+
+ ev->event = sched_type_queue | (get_work0 & 0xffffffff);
+ ev->u64 = get_work1;
+ return !!get_work1;
+}
+
+static force_inline void
+ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
+}
+
+static force_inline void
+ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t swtag_full0;
+
+ swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
+ ((uint64_t)grp << 34);
+ ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
+ SSOW_VHWS_OP_SWTAG_FULL0));
+}
+
+static force_inline void
+ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
+}
+
+static force_inline void
+ssows_swtag_untag(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
+ ws->cur_tt = SSO_SYNC_UNTAGGED;
+}
+
+static force_inline void
+ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
+{
+ ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
+ SSOW_VHWS_OP_UPD_WQP_GRP0));
+}
+
+static force_inline void
+ssows_desched(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_wait(struct ssows *ws)
+{
+ /* Wait for the SWTAG/SWTAG_FULL operation */
+ while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
+ ;
+}
diff --git a/drivers/event/skeleton/Makefile b/drivers/event/skeleton/Makefile
new file mode 100644
index 00000000..c2b2456b
--- /dev/null
+++ b/drivers/event/skeleton/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_skeleton_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_skeleton_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/skeleton/rte_pmd_skeleton_event_version.map b/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
new file mode 100644
index 00000000..800bd76e
--- /dev/null
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -0,0 +1,497 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_lcore.h>
+#include <rte_vdev.h>
+
+#include "skeleton_eventdev.h"
+
+#define EVENTDEV_NAME_SKELETON_PMD event_skeleton
+/**< Skeleton event device PMD name */
+
+static uint16_t
+skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+ RTE_SET_USED(nb_events);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(nb_events);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ dev_info->min_dequeue_timeout_ns = 1;
+ dev_info->max_dequeue_timeout_ns = 10000;
+ dev_info->dequeue_timeout_ns = 25;
+ dev_info->max_event_queues = 64;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 8;
+ dev_info->max_event_ports = 32;
+ dev_info->max_event_port_dequeue_depth = 16;
+ dev_info->max_event_port_enqueue_depth = 16;
+ dev_info->max_num_events = (1ULL << 20);
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_EVENT_QOS;
+}
+
+static int
+skeleton_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct rte_eventdev_data *data = dev->data;
+ struct rte_event_dev_config *conf = &data->dev_conf;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(conf);
+ RTE_SET_USED(skel);
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+skeleton_eventdev_start(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_stop(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+}
+
+static int
+skeleton_eventdev_close(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_conf);
+ RTE_SET_USED(queue_id);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 32 * 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+}
+
+static void
+skeleton_eventdev_port_release(void *port)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ rte_free(sp);
+}
+
+static int
+skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_port *sp;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ port_id);
+ skeleton_eventdev_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ sp = rte_zmalloc_socket("eventdev port",
+ sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (sp == NULL) {
+ PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
+ return -ENOMEM;
+ }
+
+ sp->port_id = port_id;
+
+ PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
+
+ dev->data->ports[port_id] = sp;
+ return 0;
+}
+
+static int
+skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+ RTE_SET_USED(priorities);
+
+ /* Linked all the queues */
+ return (int)nb_links;
+}
+
+static int
+skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+
+ /* Unlinked all the queues */
+ return (int)nb_unlinks;
+
+}
+
+static int
+skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(f);
+}
+
+
+/* Initialize and register event driver with DPDK Application */
+static const struct rte_eventdev_ops skeleton_eventdev_ops = {
+ .dev_infos_get = skeleton_eventdev_info_get,
+ .dev_configure = skeleton_eventdev_configure,
+ .dev_start = skeleton_eventdev_start,
+ .dev_stop = skeleton_eventdev_stop,
+ .dev_close = skeleton_eventdev_close,
+ .queue_def_conf = skeleton_eventdev_queue_def_conf,
+ .queue_setup = skeleton_eventdev_queue_setup,
+ .queue_release = skeleton_eventdev_queue_release,
+ .port_def_conf = skeleton_eventdev_port_def_conf,
+ .port_setup = skeleton_eventdev_port_setup,
+ .port_release = skeleton_eventdev_port_release,
+ .port_link = skeleton_eventdev_port_link,
+ .port_unlink = skeleton_eventdev_port_unlink,
+ .timeout_ticks = skeleton_eventdev_timeout_ticks,
+ .dump = skeleton_eventdev_dump
+};
+
+static int
+skeleton_eventdev_init(struct rte_eventdev *eventdev)
+{
+ struct rte_pci_device *pci_dev;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
+ int ret = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+ skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ if (!skel->reg_base) {
+ PMD_DRV_ERR("Failed to map BAR0");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ skel->device_id = pci_dev->id.device_id;
+ skel->vendor_id = pci_dev->id.vendor_id;
+ skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+
+ PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
+ eventdev->data->dev_id, eventdev->data->socket_id,
+ skel->vendor_id, skel->device_id);
+
+fail:
+ return ret;
+}
+
+/* PCI based event device */
+
+#define EVENTDEV_SKEL_VENDOR_ID 0x177d
+#define EVENTDEV_SKEL_PRODUCT_ID 0x0001
+
+static const struct rte_pci_id pci_id_skeleton_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
+ EVENTDEV_SKEL_PRODUCT_ID)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
+ .pci_drv = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_event_pmd_pci_probe,
+ .remove = rte_event_pmd_pci_remove,
+ },
+ .eventdev_init = skeleton_eventdev_init,
+ .dev_private_size = sizeof(struct skeleton_eventdev),
+};
+
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
+
+/* VDEV based event device */
+
+static int
+skeleton_eventdev_create(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct skeleton_eventdev), socket_id);
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+skeleton_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
+ rte_socket_id());
+ return skeleton_eventdev_create(name, rte_socket_id());
+}
+
+static int
+skeleton_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
+ .probe = skeleton_eventdev_probe,
+ .remove = skeleton_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
diff --git a/lib/librte_eal/common/include/arch/tile/rte_memcpy.h b/drivers/event/skeleton/skeleton_eventdev.h
index e606957c..1ce62da7 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_memcpy.h
+++ b/drivers/event/skeleton/skeleton_eventdev.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright (C) Cavium networks Ltd. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +13,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Cavium networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,60 +28,41 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#ifndef _RTE_MEMCPY_TILE_H_
-#define _RTE_MEMCPY_TILE_H_
+#ifndef __SKELETON_EVENTDEV_H__
+#define __SKELETON_EVENTDEV_H__
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <string.h>
+#include <rte_eventdev_pmd.h>
-#include "generic/rte_memcpy.h"
+#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
-static inline void
-rte_mov16(uint8_t *dst, const uint8_t *src)
-{
- memcpy(dst, src, 16);
-}
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
-static inline void
-rte_mov32(uint8_t *dst, const uint8_t *src)
-{
- memcpy(dst, src, 32);
-}
+struct skeleton_eventdev {
+ uintptr_t reg_base;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+} __rte_cache_aligned;
-static inline void
-rte_mov48(uint8_t *dst, const uint8_t *src)
-{
- memcpy(dst, src, 48);
-}
+struct skeleton_port {
+ uint8_t port_id;
+} __rte_cache_aligned;
-static inline void
-rte_mov64(uint8_t *dst, const uint8_t *src)
+static inline struct skeleton_eventdev *
+skeleton_pmd_priv(const struct rte_eventdev *eventdev)
{
- memcpy(dst, src, 64);
+ return eventdev->data->dev_private;
}
-static inline void
-rte_mov128(uint8_t *dst, const uint8_t *src)
-{
- memcpy(dst, src, 128);
-}
-
-static inline void
-rte_mov256(uint8_t *dst, const uint8_t *src)
-{
- memcpy(dst, src, 256);
-}
-
-#define rte_memcpy(d, s, n) memcpy((d), (s), (n))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_MEMCPY_TILE_H_ */
+#endif /* __SKELETON_EVENTDEV_H__ */
diff --git a/drivers/event/sw/Makefile b/drivers/event/sw/Makefile
new file mode 100644
index 00000000..857a87cc
--- /dev/null
+++ b/drivers/event/sw/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_sw_event.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_evdev_sw_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/sw/event_ring.h b/drivers/event/sw/event_ring.h
new file mode 100644
index 00000000..cdaee95d
--- /dev/null
+++ b/drivers/event/sw/event_ring.h
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Generic ring structure for passing events from one core to another.
+ *
+ * Used by the software scheduler for the producer and consumer rings for
+ * each port, i.e. for passing events from worker cores to scheduler and
+ * vice-versa. Designed for single-producer, single-consumer use with two
+ * cores working on each ring.
+ */
+
+#ifndef _EVENT_RING_
+#define _EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+#define QE_RING_NAMESIZE 32
+
+struct qe_ring {
+ char name[QE_RING_NAMESIZE] __rte_cache_aligned;
+ uint32_t ring_size; /* size of memory block allocated to the ring */
+ uint32_t mask; /* mask for read/write values == ring_size -1 */
+ uint32_t size; /* actual usable space in the ring */
+ volatile uint32_t write_idx __rte_cache_aligned;
+ volatile uint32_t read_idx __rte_cache_aligned;
+
+ struct rte_event ring[0] __rte_cache_aligned;
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+static inline struct qe_ring *
+qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
+{
+ struct qe_ring *retval;
+ const uint32_t ring_size = rte_align32pow2(size + 1);
+ size_t memsize = sizeof(*retval) +
+ (ring_size * sizeof(retval->ring[0]));
+
+ retval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);
+ if (retval == NULL)
+ goto end;
+
+ snprintf(retval->name, sizeof(retval->name), "EVDEV_RG_%s", name);
+ retval->ring_size = ring_size;
+ retval->mask = ring_size - 1;
+ retval->size = size;
+end:
+ return retval;
+}
+
+static inline void
+qe_ring_destroy(struct qe_ring *r)
+{
+ rte_free(r);
+}
+
+static force_inline unsigned int
+qe_ring_count(const struct qe_ring *r)
+{
+ return r->write_idx - r->read_idx;
+}
+
+static force_inline unsigned int
+qe_ring_free_count(const struct qe_ring *r)
+{
+ return r->size - qe_ring_count(r);
+}
+
+static force_inline unsigned int
+qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint16_t *free_count)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++)
+ r->ring[write & mask] = qes[i];
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ *free_count = space - nb_qes;
+
+ return nb_qes;
+}
+
+static force_inline unsigned int
+qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint8_t *ops)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++) {
+ r->ring[write & mask] = qes[i];
+ r->ring[write & mask].op = ops[i];
+ }
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ return nb_qes;
+}
+
+static force_inline unsigned int
+qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
+ unsigned int nb_qes)
+{
+ const uint32_t mask = r->mask;
+ uint32_t read = r->read_idx;
+ const uint32_t write = r->write_idx;
+ const uint32_t items = write - read;
+ uint32_t i;
+
+ if (items < nb_qes)
+ nb_qes = items;
+
+
+ for (i = 0; i < nb_qes; i++, read++)
+ qes[i] = r->ring[read & mask];
+
+ rte_smp_rmb();
+
+ if (nb_qes != 0)
+ r->read_idx += nb_qes;
+
+ return nb_qes;
+}
+
+#endif
diff --git a/drivers/event/sw/iq_ring.h b/drivers/event/sw/iq_ring.h
new file mode 100644
index 00000000..d480d156
--- /dev/null
+++ b/drivers/event/sw/iq_ring.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Ring structure definitions used for the internal ring buffers of the
+ * SW eventdev implementation. These are designed for single-core use only.
+ */
+#ifndef _IQ_RING_
+#define _IQ_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_eventdev.h>
+
+#define IQ_RING_NAMESIZE 12
+#define QID_IQ_DEPTH 512
+#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
+
+struct iq_ring {
+ char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
+ uint16_t write_idx;
+ uint16_t read_idx;
+
+ struct rte_event ring[QID_IQ_DEPTH];
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+static inline struct iq_ring *
+iq_ring_create(const char *name, unsigned int socket_id)
+{
+ struct iq_ring *retval;
+
+ retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
+ if (retval == NULL)
+ goto end;
+
+ snprintf(retval->name, sizeof(retval->name), "%s", name);
+ retval->write_idx = retval->read_idx = 0;
+end:
+ return retval;
+}
+
+static inline void
+iq_ring_destroy(struct iq_ring *r)
+{
+ rte_free(r);
+}
+
+static force_inline uint16_t
+iq_ring_count(const struct iq_ring *r)
+{
+ return r->write_idx - r->read_idx;
+}
+
+static force_inline uint16_t
+iq_ring_free_count(const struct iq_ring *r)
+{
+ return QID_IQ_MASK - iq_ring_count(r);
+}
+
+static force_inline uint16_t
+iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ const uint16_t read = r->read_idx;
+ uint16_t write = r->write_idx;
+ const uint16_t space = read + QID_IQ_MASK - write;
+ uint16_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++)
+ r->ring[write & QID_IQ_MASK] = qes[i];
+
+ r->write_idx = write;
+
+ return nb_qes;
+}
+
+static force_inline uint16_t
+iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ uint16_t read = r->read_idx;
+ const uint16_t write = r->write_idx;
+ const uint16_t items = write - read;
+ uint16_t i;
+
+ for (i = 0; i < nb_qes; i++, read++)
+ qes[i] = r->ring[read & QID_IQ_MASK];
+
+ if (items < nb_qes)
+ nb_qes = items;
+
+ r->read_idx += nb_qes;
+
+ return nb_qes;
+}
+
+/* assumes there is space, from a previous dequeue_burst */
+static force_inline uint16_t
+iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ uint16_t i, read = r->read_idx;
+
+ for (i = nb_qes; i-- > 0; )
+ r->ring[--read & QID_IQ_MASK] = qes[i];
+
+ r->read_idx = read;
+ return nb_qes;
+}
+
+static force_inline const struct rte_event *
+iq_ring_peek(const struct iq_ring *r)
+{
+ return &r->ring[r->read_idx & QID_IQ_MASK];
+}
+
+static force_inline void
+iq_ring_pop(struct iq_ring *r)
+{
+ r->read_idx++;
+}
+
+static force_inline int
+iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
+{
+ const uint16_t read = r->read_idx;
+ const uint16_t write = r->write_idx;
+ const uint16_t space = read + QID_IQ_MASK - write;
+
+ if (space == 0)
+ return -1;
+
+ r->ring[write & QID_IQ_MASK] = *qe;
+
+ r->write_idx = write + 1;
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/event/sw/rte_pmd_evdev_sw_version.map b/drivers/event/sw/rte_pmd_evdev_sw_version.map
new file mode 100644
index 00000000..5352e7e3
--- /dev/null
+++ b/drivers/event/sw/rte_pmd_evdev_sw_version.map
@@ -0,0 +1,3 @@
+DPDK_17.05 {
+ local: *;
+};
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
new file mode 100644
index 00000000..a31aaa66
--- /dev/null
+++ b/drivers/event/sw/sw_evdev.c
@@ -0,0 +1,833 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_vdev.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define NUMA_NODE_ARG "numa_node"
+#define SCHED_QUANTA_ARG "sched_quanta"
+#define CREDIT_QUANTA_ARG "credit_quanta"
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+static int
+sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t num)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int i;
+
+ RTE_SET_USED(priorities);
+ for (i = 0; i < num; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+
+ /* check for qid map overflow */
+ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ if (p->is_directed && p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ if (q->type == SW_SCHED_TYPE_DIRECT) {
+ /* check directed qids only map to one port */
+ if (p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+ /* check port only takes a directed flow */
+ if (num > 1) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ p->is_directed = 1;
+ p->num_qids_mapped = 1;
+ } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
+ p->num_ordered_qids++;
+ p->num_qids_mapped++;
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ p->num_qids_mapped++;
+ }
+
+ q->cq_map[q->cq_num_mapped_cqs] = p->id;
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs++;
+ }
+ return i;
+}
+
+static int
+sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ int unlinked = 0;
+ for (i = 0; i < nb_unlinks; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id) {
+ q->cq_map[j] =
+ q->cq_map[q->cq_num_mapped_cqs - 1];
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs--;
+ unlinked++;
+
+ p->num_qids_mapped--;
+
+ if (q->type == RTE_SCHED_TYPE_ORDERED)
+ p->num_ordered_qids--;
+
+ continue;
+ }
+ }
+ }
+ return unlinked;
+}
+
+static int
+sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_port *p = &sw->ports[port_id];
+ char buf[QE_RING_NAMESIZE];
+ unsigned int i;
+
+ struct rte_event_dev_info info;
+ sw_info_get(dev, &info);
+
+ /* detect re-configuring and return credits to instance if needed */
+ if (p->initialized) {
+ /* taking credits from pool is done one quanta at a time, and
+ * credits may be spend (counted in p->inflights) or still
+ * available in the port (p->inflight_credits). We must return
+ * the sum to no leak credits
+ */
+ int possible_inflights = p->inflight_credits + p->inflights;
+ rte_atomic32_sub(&sw->inflights, possible_inflights);
+ }
+
+ *p = (struct sw_port){0}; /* zero entire structure */
+ p->id = port_id;
+ p->sw = sw;
+
+ snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+ "rx_worker_ring");
+ p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id);
+ if (p->rx_worker_ring == NULL) {
+ SW_LOG_ERR("Error creating RX worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+
+ p->inflight_max = conf->new_event_threshold;
+
+ snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+ "cq_worker_ring");
+ p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id);
+ if (p->cq_worker_ring == NULL) {
+ qe_ring_destroy(p->rx_worker_ring);
+ SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+ sw->cq_ring_space[port_id] = conf->dequeue_depth;
+
+ /* set hist list contents to empty */
+ for (i = 0; i < SW_PORT_HIST_LIST; i++) {
+ p->hist_list[i].fid = -1;
+ p->hist_list[i].qid = -1;
+ }
+ dev->data->ports[port_id] = p;
+
+ rte_smp_wmb();
+ p->initialized = 1;
+ return 0;
+}
+
+static void
+sw_port_release(void *port)
+{
+ struct sw_port *p = (void *)port;
+ if (p == NULL)
+ return;
+
+ qe_ring_destroy(p->rx_worker_ring);
+ qe_ring_destroy(p->cq_worker_ring);
+ memset(p, 0, sizeof(*p));
+}
+
+static int32_t
+qid_init(struct sw_evdev *sw, unsigned int idx, int type,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ unsigned int i;
+ int dev_id = sw->data->dev_id;
+ int socket_id = sw->data->socket_id;
+ char buf[IQ_RING_NAMESIZE];
+ struct sw_qid *qid = &sw->qids[idx];
+
+ for (i = 0; i < SW_IQS_MAX; i++) {
+ snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
+ qid->iq[i] = iq_ring_create(buf, socket_id);
+ if (!qid->iq[i]) {
+ SW_LOG_DBG("ring create failed");
+ goto cleanup;
+ }
+ }
+
+ /* Initialize the FID structures to no pinning (-1), and zero packets */
+ const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ qid->fids[i] = fid;
+
+ qid->id = idx;
+ qid->type = type;
+ qid->priority = queue_conf->priority;
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ char ring_name[RTE_RING_NAMESIZE];
+ uint32_t window_size;
+
+ /* rte_ring and window_size_mask require require window_size to
+ * be a power-of-2.
+ */
+ window_size = rte_align32pow2(
+ queue_conf->nb_atomic_order_sequences);
+
+ qid->window_size = window_size - 1;
+
+ if (!window_size) {
+ SW_LOG_DBG(
+ "invalid reorder_window_size for ordered queue\n"
+ );
+ goto cleanup;
+ }
+
+ snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
+ qid->reorder_buffer = rte_zmalloc_socket(buf,
+ window_size * sizeof(qid->reorder_buffer[0]),
+ 0, socket_id);
+ if (!qid->reorder_buffer) {
+ SW_LOG_DBG("reorder_buffer malloc failed\n");
+ goto cleanup;
+ }
+
+ memset(&qid->reorder_buffer[0],
+ 0,
+ window_size * sizeof(qid->reorder_buffer[0]));
+
+ snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
+ dev_id, idx);
+
+ /* lookup the ring, and if it already exists, free it */
+ struct rte_ring *cleanup = rte_ring_lookup(ring_name);
+ if (cleanup)
+ rte_ring_free(cleanup);
+
+ qid->reorder_buffer_freelist = rte_ring_create(ring_name,
+ window_size,
+ socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qid->reorder_buffer_freelist) {
+ SW_LOG_DBG("freelist ring create failed");
+ goto cleanup;
+ }
+
+ /* Populate the freelist with reorder buffer entries. Enqueue
+ * 'window_size - 1' entries because the rte_ring holds only
+ * that many.
+ */
+ for (i = 0; i < window_size - 1; i++) {
+ if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
+ &qid->reorder_buffer[i]) < 0)
+ goto cleanup;
+ }
+
+ qid->reorder_buffer_index = 0;
+ qid->cq_next_tx = 0;
+ }
+
+ qid->initialized = 1;
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < SW_IQS_MAX; i++) {
+ if (qid->iq[i])
+ iq_ring_destroy(qid->iq[i]);
+ }
+
+ if (qid->reorder_buffer) {
+ rte_free(qid->reorder_buffer);
+ qid->reorder_buffer = NULL;
+ }
+
+ if (qid->reorder_buffer_freelist) {
+ rte_ring_free(qid->reorder_buffer_freelist);
+ qid->reorder_buffer_freelist = NULL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ int type;
+
+ /* SINGLE_LINK can be OR-ed with other types, so handle first */
+ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
+ type = SW_SCHED_TYPE_DIRECT;
+ } else {
+ switch (conf->event_queue_cfg) {
+ case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
+ type = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ type = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ type = RTE_SCHED_TYPE_PARALLEL;
+ break;
+ case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
+ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+ return -ENOTSUP;
+ default:
+ SW_LOG_ERR("Unknown queue type %d requested\n",
+ conf->event_queue_cfg);
+ return -EINVAL;
+ }
+ }
+
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ return qid_init(sw, queue_id, type, conf);
+}
+
+static void
+sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_qid *qid = &sw->qids[id];
+ uint32_t i;
+
+ for (i = 0; i < SW_IQS_MAX; i++)
+ iq_ring_destroy(qid->iq[i]);
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ rte_free(qid->reorder_buffer);
+ rte_ring_free(qid->reorder_buffer_freelist);
+ }
+ memset(qid, 0, sizeof(*qid));
+}
+
+static void
+sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ static const struct rte_event_queue_conf default_conf = {
+ .nb_atomic_flows = 4096,
+ .nb_atomic_order_sequences = 1,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ };
+
+ *conf = default_conf;
+}
+
+static void
+sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+}
+
+static int
+sw_dev_configure(const struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *conf = &data->dev_conf;
+
+ sw->qid_count = conf->nb_event_queues;
+ sw->port_count = conf->nb_event_ports;
+ sw->nb_events_limit = conf->nb_events_limit;
+ rte_atomic32_set(&sw->inflights, 0);
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+ RTE_SET_USED(dev);
+
+ static const struct rte_event_dev_info evdev_sw_info = {
+ .driver_name = SW_PMD_NAME,
+ .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+ .max_event_queue_flows = SW_QID_NUM_FIDS,
+ .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
+ .max_event_priority_levels = SW_IQS_MAX,
+ .max_event_ports = SW_PORTS_MAX,
+ .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
+ .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
+ .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
+ .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_EVENT_QOS),
+ };
+
+ *info = evdev_sw_info;
+}
+
+static void
+sw_dump(struct rte_eventdev *dev, FILE *f)
+{
+ const struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ static const char * const q_type_strings[] = {
+ "Ordered", "Atomic", "Parallel", "Directed"
+ };
+ uint32_t i;
+ fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
+ sw->port_count, sw->qid_count);
+
+ fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
+ sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
+ fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
+ fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
+ fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
+ fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
+ uint32_t inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credits = sw->nb_events_limit - inflights;
+ fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
+
+#define COL_RED "\x1b[31m"
+#define COL_RESET "\x1b[0m"
+
+ for (i = 0; i < sw->port_count; i++) {
+ int max, j;
+ const struct sw_port *p = &sw->ports[i];
+ if (!p->initialized) {
+ fprintf(f, " %sPort %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ fprintf(f, " Port %d %s\n", i,
+ p->is_directed ? " (SingleCons)" : "");
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
+ "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
+ sw->ports[i].stats.rx_dropped,
+ sw->ports[i].stats.tx_pkts,
+ (p->inflights == p->inflight_max) ?
+ COL_RED : COL_RESET,
+ sw->ports[i].inflights, COL_RESET);
+
+ fprintf(f, "\tMax New: %u"
+ "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
+ sw->ports[i].inflight_max,
+ sw->ports[i].avg_pkt_ticks,
+ sw->ports[i].inflight_credits);
+ fprintf(f, "\tReceive burst distribution:\n");
+ float zp_percent = p->zero_polls * 100.0 / p->total_polls;
+ fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
+ zp_percent);
+ for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
+ if (p->poll_buckets[max] != 0)
+ break;
+ for (j = 0; j <= max; j++) {
+ if (p->poll_buckets[j] != 0) {
+ float poll_pc = p->poll_buckets[j] * 100.0 /
+ p->total_polls;
+ fprintf(f, "%u-%u:%.02f%% ",
+ ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
+ ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
+ poll_pc);
+ }
+ }
+ fprintf(f, "\n");
+
+ if (p->rx_worker_ring) {
+ uint64_t used = qe_ring_count(p->rx_worker_ring);
+ uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\trx ring not initialized.\n");
+
+ if (p->cq_worker_ring) {
+ uint64_t used = qe_ring_count(p->cq_worker_ring);
+ uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\tcq ring not initialized.\n");
+ }
+
+ for (i = 0; i < sw->qid_count; i++) {
+ const struct sw_qid *qid = &sw->qids[i];
+ if (!qid->initialized) {
+ fprintf(f, " %sQueue %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ int affinities_per_port[SW_PORTS_MAX] = {0};
+ uint32_t inflights = 0;
+
+ fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
+ qid->stats.rx_pkts, qid->stats.rx_dropped,
+ qid->stats.tx_pkts);
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ struct rte_ring *rob_buf_free =
+ qid->reorder_buffer_freelist;
+ if (rob_buf_free)
+ fprintf(f, "\tReorder entries in use: %u\n",
+ rte_ring_free_count(rob_buf_free));
+ else
+ fprintf(f,
+ "\tReorder buffer not initialized\n");
+ }
+
+ uint32_t flow;
+ for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
+ if (qid->fids[flow].cq != -1) {
+ affinities_per_port[qid->fids[flow].cq]++;
+ inflights += qid->fids[flow].pcount;
+ }
+
+ uint32_t cq;
+ fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
+ inflights);
+ for (cq = 0; cq < sw->port_count; cq++)
+ fprintf(f, "%d ", affinities_per_port[cq]);
+ fprintf(f, "\n");
+
+ uint32_t iq;
+ uint32_t iq_printed = 0;
+ for (iq = 0; iq < SW_IQS_MAX; iq++) {
+ if (!qid->iq[iq]) {
+ fprintf(f, "\tiq %d is not initialized.\n", iq);
+ iq_printed = 1;
+ continue;
+ }
+ uint32_t used = iq_ring_count(qid->iq[iq]);
+ uint32_t free = iq_ring_free_count(qid->iq[iq]);
+ const char *col = (free == 0) ? COL_RED : COL_RESET;
+ if (used > 0) {
+ fprintf(f, "\t%siq %d: Used %d\tFree %d"
+ COL_RESET"\n", col, iq, used, free);
+ iq_printed = 1;
+ }
+ }
+ if (iq_printed == 0)
+ fprintf(f, "\t-- iqs empty --\n");
+ }
+}
+
+static int
+sw_start(struct rte_eventdev *dev)
+{
+ unsigned int i, j;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ /* check all ports are set up */
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].rx_worker_ring == NULL) {
+ SW_LOG_ERR("Port %d not configured\n", i);
+ return -ESTALE;
+ }
+
+ /* check all queues are configured and mapped to ports*/
+ for (i = 0; i < sw->qid_count; i++)
+ if (sw->qids[i].iq[0] == NULL ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
+ SW_LOG_ERR("Queue %d not configured\n", i);
+ return -ENOLINK;
+ }
+
+ /* build up our prioritized array of qids */
+ /* We don't use qsort here, as if all/multiple entries have the same
+ * priority, the result is non-deterministic. From "man 3 qsort":
+ * "If two members compare as equal, their order in the sorted
+ * array is undefined."
+ */
+ uint32_t qidx = 0;
+ for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
+ for (i = 0; i < sw->qid_count; i++) {
+ if (sw->qids[i].priority == j) {
+ sw->qids_prioritized[qidx] = &sw->qids[i];
+ qidx++;
+ }
+ }
+ }
+
+ if (sw_xstats_init(sw) < 0)
+ return -EINVAL;
+
+ rte_smp_wmb();
+ sw->started = 1;
+
+ return 0;
+}
+
+static void
+sw_stop(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_xstats_uninit(sw);
+ sw->started = 0;
+ rte_smp_wmb();
+}
+
+static int
+sw_close(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < sw->qid_count; i++)
+ sw_queue_release(dev, i);
+ sw->qid_count = 0;
+
+ for (i = 0; i < sw->port_count; i++)
+ sw_port_release(&sw->ports[i]);
+ sw->port_count = 0;
+
+ memset(&sw->stats, 0, sizeof(sw->stats));
+ sw->sched_called = 0;
+ sw->sched_no_iq_enqueues = 0;
+ sw->sched_no_cq_enqueues = 0;
+ sw->sched_cq_qid_called = 0;
+
+ return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *socket_id = opaque;
+ *socket_id = atoi(value);
+ if (*socket_id >= RTE_MAX_NUMA_NODES)
+ return -1;
+ return 0;
+}
+
+static int
+set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *quanta = opaque;
+ *quanta = atoi(value);
+ if (*quanta < 0 || *quanta >= 4096)
+ return -1;
+ return 0;
+}
+
+static int
+set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *credit = opaque;
+ *credit = atoi(value);
+ if (*credit < 0 || *credit >= 128)
+ return -1;
+ return 0;
+}
+
+static int
+sw_probe(struct rte_vdev_device *vdev)
+{
+ static const struct rte_eventdev_ops evdev_sw_ops = {
+ .dev_configure = sw_dev_configure,
+ .dev_infos_get = sw_info_get,
+ .dev_close = sw_close,
+ .dev_start = sw_start,
+ .dev_stop = sw_stop,
+ .dump = sw_dump,
+
+ .queue_def_conf = sw_queue_def_conf,
+ .queue_setup = sw_queue_setup,
+ .queue_release = sw_queue_release,
+ .port_def_conf = sw_port_def_conf,
+ .port_setup = sw_port_setup,
+ .port_release = sw_port_release,
+ .port_link = sw_port_link,
+ .port_unlink = sw_port_unlink,
+
+ .xstats_get = sw_xstats_get,
+ .xstats_get_names = sw_xstats_get_names,
+ .xstats_get_by_name = sw_xstats_get_by_name,
+ .xstats_reset = sw_xstats_reset,
+ };
+
+ static const char *const args[] = {
+ NUMA_NODE_ARG,
+ SCHED_QUANTA_ARG,
+ CREDIT_QUANTA_ARG,
+ NULL
+ };
+ const char *name;
+ const char *params;
+ struct rte_eventdev *dev;
+ struct sw_evdev *sw;
+ int socket_id = rte_socket_id();
+ int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
+ int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SW_LOG_INFO(
+ "Ignoring unsupported parameters when creating device '%s'\n",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+ assign_numa_node, &socket_id);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing numa node parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
+ set_sched_quanta, &sched_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing sched quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
+ set_credit_quanta, &credit_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing credit quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+ }
+
+ SW_LOG_INFO(
+ "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
+ name, socket_id, sched_quanta, credit_quanta);
+
+ dev = rte_event_pmd_vdev_init(name,
+ sizeof(struct sw_evdev), socket_id);
+ if (dev == NULL) {
+ SW_LOG_ERR("eventdev vdev init() failed");
+ return -EFAULT;
+ }
+ dev->dev_ops = &evdev_sw_ops;
+ dev->enqueue = sw_event_enqueue;
+ dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->dequeue = sw_event_dequeue;
+ dev->dequeue_burst = sw_event_dequeue_burst;
+ dev->schedule = sw_event_schedule;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ sw = dev->data->dev_private;
+ sw->data = dev->data;
+
+ /* copy values passed from vdev command line to instance */
+ sw->credit_update_quanta = credit_quanta;
+ sw->sched_quanta = sched_quanta;
+
+ return 0;
+}
+
+static int
+sw_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ SW_LOG_INFO("Closing eventdev sw device %s\n", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_sw_pmd_drv = {
+ .probe = sw_probe,
+ .remove = sw_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
+ SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
new file mode 100644
index 00000000..61c671d6
--- /dev/null
+++ b/drivers/event/sw/sw_evdev.h
@@ -0,0 +1,318 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SW_EVDEV_H_
+#define _SW_EVDEV_H_
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_atomic.h>
+
+#define SW_DEFAULT_CREDIT_QUANTA 32
+#define SW_DEFAULT_SCHED_QUANTA 128
+#define SW_QID_NUM_FIDS 16384
+#define SW_IQS_MAX 4
+#define SW_Q_PRIORITY_MAX 255
+#define SW_PORTS_MAX 64
+#define MAX_SW_CONS_Q_DEPTH 128
+#define SW_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define MAX_SW_PROD_Q_DEPTH 4096
+#define SW_FRAGMENTS_MAX 16
+
+/* report dequeue burst sizes in buckets */
+#define SW_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+#define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
+#define NUM_SAMPLES 64 /* how many data points use for average stats */
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define SW_PMD_NAME RTE_STR(event_sw)
+
+#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+ QE_FLAG_VALID_SHIFT = 0,
+ QE_FLAG_COMPLETE_SHIFT,
+ QE_FLAG_NOT_EOP_SHIFT,
+ _QE_FLAG_COUNT
+};
+
+#define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
+#define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
+
+static const uint8_t sw_qe_flag_map[] = {
+ QE_FLAG_VALID /* NEW Event */,
+ QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+ QE_FLAG_COMPLETE /* RELEASE Event */,
+
+ /* Values which can be used for future support for partial
+ * events, i.e. where one event comes back to the scheduler
+ * as multiple which need to be tracked together
+ */
+ QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
+#define SW_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+
+#define SW_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+#else
+#define SW_LOG_INFO(fmt, args...)
+#define SW_LOG_DBG(fmt, args...)
+#endif
+
+#define SW_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+
+/* Records basic event stats at a given point. Used in port and qid structs */
+struct sw_point_stats {
+ uint64_t rx_pkts;
+ uint64_t rx_dropped;
+ uint64_t tx_pkts;
+};
+
+/* structure used to track what port a flow (FID) is pinned to */
+struct sw_fid_t {
+ /* which CQ this FID is currently pinned to */
+ int32_t cq;
+ /* number of packets gone to the CQ with this FID */
+ uint32_t pcount;
+};
+
+struct reorder_buffer_entry {
+ uint16_t num_fragments; /**< Number of packet fragments */
+ uint16_t fragment_index; /**< Points to the oldest valid frag */
+ uint8_t ready; /**< Entry is ready to be reordered */
+ struct rte_event fragments[SW_FRAGMENTS_MAX];
+};
+
+struct sw_qid {
+ /* set when the QID has been initialized */
+ uint8_t initialized;
+ /* The type of this QID */
+ int8_t type;
+ /* Integer ID representing the queue. This is used in history lists,
+ * to identify the stage of processing.
+ */
+ uint32_t id;
+ struct sw_point_stats stats;
+
+ /* Internal priority rings for packets */
+ struct iq_ring *iq[SW_IQS_MAX];
+ uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
+ uint64_t iq_pkt_count[SW_IQS_MAX];
+
+ /* Information on what CQs are polling this IQ */
+ uint32_t cq_num_mapped_cqs;
+ uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
+ uint32_t cq_map[SW_PORTS_MAX];
+
+ /* Track flow ids for atomic load balancing */
+ struct sw_fid_t fids[SW_QID_NUM_FIDS];
+
+ /* Track packet order for reordering when needed */
+ struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
+ struct rte_ring *reorder_buffer_freelist; /* available reorder slots */
+ uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
+ uint32_t window_size; /* Used to wrap reorder_buffer_index */
+
+ uint8_t priority;
+};
+
+struct sw_hist_list_entry {
+ int32_t qid;
+ int32_t fid;
+ struct reorder_buffer_entry *rob_entry;
+};
+
+struct sw_evdev;
+
+struct sw_port {
+ /* new enqueue / dequeue API doesn't have an instance pointer, only the
+ * pointer to the port being enqueue/dequeued from
+ */
+ struct sw_evdev *sw;
+
+ /* set when the port is initialized */
+ uint8_t initialized;
+ /* A numeric ID for the port */
+ uint8_t id;
+
+ int16_t is_directed; /** Takes from a single directed QID */
+ /**
+ * For loadbalanced we can optimise pulling packets from
+ * producers if there is no reordering involved
+ */
+ int16_t num_ordered_qids;
+
+ /** Ring and buffer for pulling events from workers for scheduling */
+ struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ /** Ring and buffer for pushing packets to workers after scheduling */
+ struct qe_ring *cq_worker_ring;
+
+ /* hole */
+
+ /* num releases yet to be completed on this port */
+ uint16_t outstanding_releases __rte_cache_aligned;
+ uint16_t inflight_max; /* app requested max inflights for this port */
+ uint16_t inflight_credits; /* num credits this port has right now */
+
+ uint16_t last_dequeue_burst_sz; /* how big the burst was */
+ uint64_t last_dequeue_ticks; /* used to track burst processing time */
+ uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
+ uint64_t total_polls; /* how many polls were counted in stats */
+ uint64_t zero_polls; /* tracks polls returning nothing */
+ uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
+ /* bucket values in 4s for shorter reporting */
+
+ /* History list structs, containing info on pkts egressed to worker */
+ uint16_t hist_head __rte_cache_aligned;
+ uint16_t hist_tail;
+ uint16_t inflights;
+ struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
+
+ /* track packets in and out of this port */
+ struct sw_point_stats stats;
+
+
+ uint32_t pp_buf_start;
+ uint32_t pp_buf_count;
+ uint16_t cq_buf_count;
+ struct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];
+ struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
+
+ uint8_t num_qids_mapped;
+};
+
+struct sw_evdev {
+ struct rte_eventdev_data *data;
+
+ uint32_t port_count;
+ uint32_t qid_count;
+ uint32_t xstats_count;
+ struct sw_xstats_entry *xstats;
+ uint32_t xstats_count_mode_dev;
+ uint32_t xstats_count_mode_port;
+ uint32_t xstats_count_mode_queue;
+
+ /* Contains all ports - load balanced and directed */
+ struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
+
+ rte_atomic32_t inflights __rte_cache_aligned;
+
+ /*
+ * max events in this instance. Cached here for performance.
+ * (also available in data->conf.nb_events_limit)
+ */
+ uint32_t nb_events_limit;
+
+ /* Internal queues - one per logical queue */
+ struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+
+ /* Cache how many packets are in each cq */
+ uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
+
+ /* Array of pointers to load-balanced QIDs sorted by priority level */
+ struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ /* Stats */
+ struct sw_point_stats stats __rte_cache_aligned;
+ uint64_t sched_called;
+ int32_t sched_quanta;
+ uint64_t sched_no_iq_enqueues;
+ uint64_t sched_no_cq_enqueues;
+ uint64_t sched_cq_qid_called;
+
+ uint8_t started;
+ uint32_t credit_update_quanta;
+
+ /* store num stats and offset of the stats for each port */
+ uint16_t xstats_count_per_port[SW_PORTS_MAX];
+ uint16_t xstats_offset_for_port[SW_PORTS_MAX];
+ /* store num stats and offset of the stats for each queue */
+ uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+};
+
+static inline struct sw_evdev *
+sw_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+static inline const struct sw_evdev *
+sw_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t num);
+
+uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait);
+void sw_event_schedule(struct rte_eventdev *dev);
+int sw_xstats_init(struct sw_evdev *dev);
+int sw_xstats_uninit(struct sw_evdev *dev);
+int sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+
+#endif /* _SW_EVDEV_H_ */
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
new file mode 100644
index 00000000..a333a6f0
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -0,0 +1,601 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ring.h>
+#include <rte_hash_crc.h>
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+#define SW_IQS_MASK (SW_IQS_MAX-1)
+
+/* Retrieve the highest priority IQ or -1 if no pkts available. Doing the
+ * CLZ twice is faster than caching the value due to data dependencies
+ */
+#define PKT_MASK_TO_IQ(pkts) \
+ (__builtin_ctz(pkts | (1 << SW_IQS_MAX)))
+
+#if SW_IQS_MAX != 4
+#error Misconfigured PRIO_TO_IQ caused by SW_IQS_MAX value change
+#endif
+#define PRIO_TO_IQ(prio) (prio >> 6)
+
+#define MAX_PER_IQ_DEQUEUE 48
+#define FLOWID_MASK (SW_QID_NUM_FIDS-1)
+/* use cheap bit mixing, we only need to lose a few bits */
+#define SW_HASH_FLOWID(f) (((f) ^ (f >> 10)) & FLOWID_MASK)
+
+static inline uint32_t
+sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count)
+{
+ struct rte_event qes[MAX_PER_IQ_DEQUEUE]; /* count <= MAX */
+ struct rte_event blocked_qes[MAX_PER_IQ_DEQUEUE];
+ uint32_t nb_blocked = 0;
+ uint32_t i;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ iq_ring_dequeue_burst(qid->iq[iq_num], qes, count);
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = &qes[i];
+ const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
+ struct sw_fid_t *fid = &qid->fids[flow_id];
+ int cq = fid->cq;
+
+ if (cq < 0) {
+ uint32_t cq_idx = qid->cq_next_tx++;
+ if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+ qid->cq_next_tx = 0;
+ cq = qid->cq_map[cq_idx];
+
+ /* find least used */
+ int cq_free_cnt = sw->cq_ring_space[cq];
+ for (cq_idx = 0; cq_idx < qid->cq_num_mapped_cqs;
+ cq_idx++) {
+ int test_cq = qid->cq_map[cq_idx];
+ int test_cq_free = sw->cq_ring_space[test_cq];
+ if (test_cq_free > cq_free_cnt) {
+ cq = test_cq;
+ cq_free_cnt = test_cq_free;
+ }
+ }
+
+ fid->cq = cq; /* this pins early */
+ }
+
+ if (sw->cq_ring_space[cq] == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST) {
+ blocked_qes[nb_blocked++] = *qe;
+ continue;
+ }
+
+ struct sw_port *p = &sw->ports[cq];
+
+ /* at this point we can queue up the packet on the cq_buf */
+ fid->pcount++;
+ p->cq_buf[p->cq_buf_count++] = *qe;
+ p->inflights++;
+ sw->cq_ring_space[cq]--;
+
+ int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = flow_id;
+ p->hist_list[head].qid = qid_id;
+
+ p->stats.tx_pkts++;
+ qid->stats.tx_pkts++;
+
+ /* if we just filled in the last slot, flush the buffer */
+ if (sw->cq_ring_space[cq] == 0) {
+ struct qe_ring *worker = p->cq_worker_ring;
+ qe_ring_enqueue_burst(worker, p->cq_buf,
+ p->cq_buf_count,
+ &sw->cq_ring_space[cq]);
+ p->cq_buf_count = 0;
+ }
+ }
+ iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked);
+
+ return count - nb_blocked;
+}
+
+static inline uint32_t
+sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count, int keep_order)
+{
+ uint32_t i;
+ uint32_t cq_idx = qid->cq_next_tx;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ if (keep_order)
+ /* only schedule as many as we have reorder buffer entries */
+ count = RTE_MIN(count,
+ rte_ring_count(qid->reorder_buffer_freelist));
+
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
+ uint32_t cq_check_count = 0;
+ uint32_t cq;
+
+ /*
+ * for parallel, just send to next available CQ in round-robin
+ * fashion. So scan for an available CQ. If all CQs are full
+ * just return and move on to next QID
+ */
+ do {
+ if (++cq_check_count > qid->cq_num_mapped_cqs)
+ goto exit;
+ cq = qid->cq_map[cq_idx];
+ if (++cq_idx == qid->cq_num_mapped_cqs)
+ cq_idx = 0;
+ } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST);
+
+ struct sw_port *p = &sw->ports[cq];
+ if (sw->cq_ring_space[cq] == 0 ||
+ p->inflights == SW_PORT_HIST_LIST)
+ break;
+
+ sw->cq_ring_space[cq]--;
+
+ qid->stats.tx_pkts++;
+
+ const int head = (p->hist_head & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id);
+ p->hist_list[head].qid = qid_id;
+
+ if (keep_order)
+ rte_ring_sc_dequeue(qid->reorder_buffer_freelist,
+ (void *)&p->hist_list[head].rob_entry);
+
+ sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
+ iq_ring_pop(qid->iq[iq_num]);
+
+ rte_compiler_barrier();
+ p->inflights++;
+ p->stats.tx_pkts++;
+ p->hist_head++;
+ }
+exit:
+ qid->cq_next_tx = cq_idx;
+ return i;
+}
+
+static uint32_t
+sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count __rte_unused)
+{
+ uint32_t cq_id = qid->cq_map[0];
+ struct sw_port *port = &sw->ports[cq_id];
+
+ /* get max burst enq size for cq_ring */
+ uint32_t count_free = sw->cq_ring_space[cq_id];
+ if (count_free == 0)
+ return 0;
+
+ /* burst dequeue from the QID IQ ring */
+ struct iq_ring *ring = qid->iq[iq_num];
+ uint32_t ret = iq_ring_dequeue_burst(ring,
+ &port->cq_buf[port->cq_buf_count], count_free);
+ port->cq_buf_count += ret;
+
+ /* Update QID, Port and Total TX stats */
+ qid->stats.tx_pkts += ret;
+ port->stats.tx_pkts += ret;
+
+ /* Subtract credits from cached value */
+ sw->cq_ring_space[cq_id] -= ret;
+
+ return ret;
+}
+
+static uint32_t
+sw_schedule_qid_to_cq(struct sw_evdev *sw)
+{
+ uint32_t pkts = 0;
+ uint32_t qid_idx;
+
+ sw->sched_cq_qid_called++;
+
+ for (qid_idx = 0; qid_idx < sw->qid_count; qid_idx++) {
+ struct sw_qid *qid = sw->qids_prioritized[qid_idx];
+
+ int type = qid->type;
+ int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
+
+ /* zero mapped CQs indicates directed */
+ if (iq_num >= SW_IQS_MAX)
+ continue;
+
+ uint32_t pkts_done = 0;
+ uint32_t count = iq_ring_count(qid->iq[iq_num]);
+
+ if (count > 0) {
+ if (type == SW_SCHED_TYPE_DIRECT)
+ pkts_done += sw_schedule_dir_to_cq(sw, qid,
+ iq_num, count);
+ else if (type == RTE_SCHED_TYPE_ATOMIC)
+ pkts_done += sw_schedule_atomic_to_cq(sw, qid,
+ iq_num, count);
+ else
+ pkts_done += sw_schedule_parallel_to_cq(sw, qid,
+ iq_num, count,
+ type == RTE_SCHED_TYPE_ORDERED);
+ }
+
+ /* Check if the IQ that was polled is now empty, and unset it
+ * in the IQ mask if its empty.
+ */
+ int all_done = (pkts_done == count);
+
+ qid->iq_pkt_mask &= ~(all_done << (iq_num));
+ pkts += pkts_done;
+ }
+
+ return pkts;
+}
+
+/* This function will perform re-ordering of packets, and injecting into
+ * the appropriate QID IQ. As LB and DIR QIDs are in the same array, but *NOT*
+ * contiguous in that array, this function accepts a "range" of QIDs to scan.
+ */
+static uint16_t
+sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
+{
+ /* Perform egress reordering */
+ struct rte_event *qe;
+ uint32_t pkts_iter = 0;
+
+ for (; qid_start < qid_end; qid_start++) {
+ struct sw_qid *qid = &sw->qids[qid_start];
+ int i, num_entries_in_use;
+
+ if (qid->type != RTE_SCHED_TYPE_ORDERED)
+ continue;
+
+ num_entries_in_use = rte_ring_free_count(
+ qid->reorder_buffer_freelist);
+
+ for (i = 0; i < num_entries_in_use; i++) {
+ struct reorder_buffer_entry *entry;
+ int j;
+
+ entry = &qid->reorder_buffer[qid->reorder_buffer_index];
+
+ if (!entry->ready)
+ break;
+
+ for (j = 0; j < entry->num_fragments; j++) {
+ uint16_t dest_qid;
+ uint16_t dest_iq;
+
+ int idx = entry->fragment_index + j;
+ qe = &entry->fragments[idx];
+
+ dest_qid = qe->queue_id;
+ dest_iq = PRIO_TO_IQ(qe->priority);
+
+ if (dest_qid >= sw->qid_count) {
+ sw->stats.rx_dropped++;
+ continue;
+ }
+
+ struct sw_qid *dest_qid_ptr =
+ &sw->qids[dest_qid];
+ const struct iq_ring *dest_iq_ptr =
+ dest_qid_ptr->iq[dest_iq];
+ if (iq_ring_free_count(dest_iq_ptr) == 0)
+ break;
+
+ pkts_iter++;
+
+ struct sw_qid *q = &sw->qids[dest_qid];
+ struct iq_ring *r = q->iq[dest_iq];
+
+ /* we checked for space above, so enqueue must
+ * succeed
+ */
+ iq_ring_enqueue(r, qe);
+ q->iq_pkt_mask |= (1 << (dest_iq));
+ q->iq_pkt_count[dest_iq]++;
+ q->stats.rx_pkts++;
+ }
+
+ entry->ready = (j != entry->num_fragments);
+ entry->num_fragments -= j;
+ entry->fragment_index += j;
+
+ if (!entry->ready) {
+ entry->fragment_index = 0;
+
+ rte_ring_sp_enqueue(
+ qid->reorder_buffer_freelist,
+ entry);
+
+ qid->reorder_buffer_index++;
+ qid->reorder_buffer_index %= qid->window_size;
+ }
+ }
+ }
+ return pkts_iter;
+}
+
+static inline void __attribute__((always_inline))
+sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
+{
+ RTE_SET_USED(sw);
+ struct qe_ring *worker = port->rx_worker_ring;
+ port->pp_buf_start = 0;
+ port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf));
+}
+
+static inline uint32_t __attribute__((always_inline))
+__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
+{
+ static struct reorder_buffer_entry dummy_rob;
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ struct sw_hist_list_entry *hist_entry = NULL;
+ uint8_t flags = qe->op;
+ const uint16_t eop = !(flags & QE_FLAG_NOT_EOP);
+ int needs_reorder = 0;
+ /* if no-reordering, having PARTIAL == NEW */
+ if (!allow_reorder && !eop)
+ flags = QE_FLAG_VALID;
+
+ /*
+ * if we don't have space for this packet in an IQ,
+ * then move on to next queue. Technically, for a
+ * packet that needs reordering, we don't need to check
+ * here, but it simplifies things not to special-case
+ */
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+
+ if ((flags & QE_FLAG_VALID) &&
+ iq_ring_free_count(qid->iq[iq_num]) == 0)
+ break;
+
+ /* now process based on flags. Note that for directed
+ * queues, the enqueue_flush masks off all but the
+ * valid flag. This makes FWD and PARTIAL enqueues just
+ * NEW type, and makes DROPS no-op calls.
+ */
+ if ((flags & QE_FLAG_COMPLETE) && port->inflights > 0) {
+ const uint32_t hist_tail = port->hist_tail &
+ (SW_PORT_HIST_LIST - 1);
+
+ hist_entry = &port->hist_list[hist_tail];
+ const uint32_t hist_qid = hist_entry->qid;
+ const uint32_t hist_fid = hist_entry->fid;
+
+ struct sw_fid_t *fid =
+ &sw->qids[hist_qid].fids[hist_fid];
+ fid->pcount -= eop;
+ if (fid->pcount == 0)
+ fid->cq = -1;
+
+ if (allow_reorder) {
+ /* set reorder ready if an ordered QID */
+ uintptr_t rob_ptr =
+ (uintptr_t)hist_entry->rob_entry;
+ const uintptr_t valid = (rob_ptr != 0);
+ needs_reorder = valid;
+ rob_ptr |=
+ ((valid - 1) & (uintptr_t)&dummy_rob);
+ struct reorder_buffer_entry *tmp_rob_ptr =
+ (struct reorder_buffer_entry *)rob_ptr;
+ tmp_rob_ptr->ready = eop * needs_reorder;
+ }
+
+ port->inflights -= eop;
+ port->hist_tail += eop;
+ }
+ if (flags & QE_FLAG_VALID) {
+ port->stats.rx_pkts++;
+
+ if (allow_reorder && needs_reorder) {
+ struct reorder_buffer_entry *rob_entry =
+ hist_entry->rob_entry;
+
+ hist_entry->rob_entry = NULL;
+ /* Although fragmentation not currently
+ * supported by eventdev API, we support it
+ * here. Open: How do we alert the user that
+ * they've exceeded max frags?
+ */
+ int num_frag = rob_entry->num_fragments;
+ if (num_frag == SW_FRAGMENTS_MAX)
+ sw->stats.rx_dropped++;
+ else {
+ int idx = rob_entry->num_fragments++;
+ rob_entry->fragments[idx] = *qe;
+ }
+ goto end_qe;
+ }
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_ring_enqueue(qid->iq[iq_num], qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+ }
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while (avail_qes) */
+
+ return pkts_iter;
+}
+
+static uint32_t
+sw_schedule_pull_port_lb(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 1);
+}
+
+static uint32_t
+sw_schedule_pull_port_no_reorder(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 0);
+}
+
+static uint32_t
+sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
+{
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ uint8_t flags = qe->op;
+
+ if ((flags & QE_FLAG_VALID) == 0)
+ goto end_qe;
+
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+ struct iq_ring *iq_ring = qid->iq[iq_num];
+
+ if (iq_ring_free_count(iq_ring) == 0)
+ break; /* move to next port */
+
+ port->stats.rx_pkts++;
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_ring_enqueue(iq_ring, qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while port->pp_buf_count */
+
+ return pkts_iter;
+}
+
+void
+sw_event_schedule(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t in_pkts, out_pkts;
+ uint32_t out_pkts_total = 0, in_pkts_total = 0;
+ int32_t sched_quanta = sw->sched_quanta;
+ uint32_t i;
+
+ sw->sched_called++;
+ if (!sw->started)
+ return;
+
+ do {
+ uint32_t in_pkts_this_iteration = 0;
+
+ /* Pull from rx_ring for ports */
+ do {
+ in_pkts = 0;
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].is_directed)
+ in_pkts += sw_schedule_pull_port_dir(sw, i);
+ else if (sw->ports[i].num_ordered_qids > 0)
+ in_pkts += sw_schedule_pull_port_lb(sw, i);
+ else
+ in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
+
+ /* QID scan for re-ordered */
+ in_pkts += sw_schedule_reorder(sw, 0,
+ sw->qid_count);
+ in_pkts_this_iteration += in_pkts;
+ } while (in_pkts > 4 &&
+ (int)in_pkts_this_iteration < sched_quanta);
+
+ out_pkts = 0;
+ out_pkts += sw_schedule_qid_to_cq(sw);
+ out_pkts_total += out_pkts;
+ in_pkts_total += in_pkts_this_iteration;
+
+ if (in_pkts == 0 && out_pkts == 0)
+ break;
+ } while ((int)out_pkts_total < sched_quanta);
+
+ /* push all the internal buffered QEs in port->cq_ring to the
+ * worker cores: aka, do the ring transfers batched.
+ */
+ for (i = 0; i < sw->port_count; i++) {
+ struct qe_ring *worker = sw->ports[i].cq_worker_ring;
+ qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ sw->ports[i].cq_buf_count,
+ &sw->cq_ring_space[i]);
+ sw->ports[i].cq_buf_count = 0;
+ }
+
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
+}
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
new file mode 100644
index 00000000..9cb6bef5
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+
+#include "sw_evdev.h"
+#include "event_ring.h"
+
+#define PORT_ENQUEUE_MAX_BURST_SIZE 64
+
+static inline void
+sw_event_release(struct sw_port *p, uint8_t index)
+{
+ /*
+ * Drops the next outstanding event in our history. Used on dequeue
+ * to clear any history before dequeuing more events.
+ */
+ RTE_SET_USED(index);
+
+ /* create drop message */
+ struct rte_event ev;
+ ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
+
+ uint16_t free_count;
+ qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+
+ /* each release returns one credit */
+ p->outstanding_releases--;
+ p->inflight_credits++;
+}
+
+uint16_t
+sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
+{
+ int32_t i;
+ uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
+ struct sw_port *p = port;
+ struct sw_evdev *sw = (void *)p->sw;
+ uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
+
+ if (unlikely(p->inflight_max < sw_inflights))
+ return 0;
+
+ if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
+ num = PORT_ENQUEUE_MAX_BURST_SIZE;
+
+ if (p->inflight_credits < num) {
+ /* check if event enqueue brings port over max threshold */
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+ if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
+ return 0;
+
+ rte_atomic32_add(&sw->inflights, credit_update_quanta);
+ p->inflight_credits += (credit_update_quanta);
+
+ if (p->inflight_credits < num)
+ return 0;
+ }
+
+ for (i = 0; i < num; i++) {
+ int op = ev[i].op;
+ int outstanding = p->outstanding_releases > 0;
+ const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
+
+ p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
+ p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
+ outstanding;
+
+ new_ops[i] = sw_qe_flag_map[op];
+ new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
+
+ /* FWD and RELEASE packets will both resolve to taken (assuming
+ * correct usage of the API), providing very high correct
+ * prediction rate.
+ */
+ if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
+ p->outstanding_releases--;
+
+ /* error case: branch to avoid touching p->stats */
+ if (unlikely(invalid_qid)) {
+ p->stats.rx_dropped++;
+ p->inflight_credits++;
+ }
+ }
+
+ /* returns number of events actually enqueued */
+ uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ new_ops);
+ if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
+ uint64_t burst_ticks = rte_get_timer_cycles() -
+ p->last_dequeue_ticks;
+ uint64_t burst_pkt_ticks =
+ burst_ticks / p->last_dequeue_burst_sz;
+ p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
+ p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
+ p->last_dequeue_ticks = 0;
+ }
+ return enq;
+}
+
+uint16_t
+sw_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return sw_event_enqueue_burst(port, ev, 1);
+}
+
+uint16_t
+sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait)
+{
+ RTE_SET_USED(wait);
+ struct sw_port *p = (void *)port;
+ struct sw_evdev *sw = (void *)p->sw;
+ struct qe_ring *ring = p->cq_worker_ring;
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+
+ /* check that all previous dequeues have been released */
+ if (!p->is_directed) {
+ uint16_t out_rels = p->outstanding_releases;
+ uint16_t i;
+ for (i = 0; i < out_rels; i++)
+ sw_event_release(p, i);
+ }
+
+ /* returns number of events actually dequeued */
+ uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+ if (unlikely(ndeq == 0)) {
+ p->outstanding_releases = 0;
+ p->zero_polls++;
+ p->total_polls++;
+ goto end;
+ }
+
+ /* only add credits for directed ports - LB ports send RELEASEs */
+ p->inflight_credits += ndeq * p->is_directed;
+ p->outstanding_releases = ndeq;
+ p->last_dequeue_burst_sz = ndeq;
+ p->last_dequeue_ticks = rte_get_timer_cycles();
+ p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
+ p->total_polls++;
+
+end:
+ if (p->inflight_credits >= credit_update_quanta * 2 &&
+ p->inflight_credits > credit_update_quanta + ndeq) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
+ return ndeq;
+}
+
+uint16_t
+sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
+{
+ return sw_event_dequeue_burst(port, ev, 1, wait);
+}
diff --git a/drivers/event/sw/sw_evdev_xstats.c b/drivers/event/sw/sw_evdev_xstats.c
new file mode 100644
index 00000000..c7b1abe8
--- /dev/null
+++ b/drivers/event/sw/sw_evdev_xstats.c
@@ -0,0 +1,674 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+enum xstats_type {
+ /* common stats */
+ rx,
+ tx,
+ dropped,
+ inflight,
+ calls,
+ credits,
+ /* device instance specific */
+ no_iq_enq,
+ no_cq_enq,
+ /* port_specific */
+ rx_used,
+ rx_free,
+ tx_used,
+ tx_free,
+ pkt_cycles,
+ poll_return, /* for zero-count and used also for port bucket loop */
+ /* qid_specific */
+ iq_size,
+ iq_used,
+ /* qid port mapping specific */
+ pinned,
+};
+
+typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
+ uint16_t obj_idx, /* port or queue id */
+ enum xstats_type stat, int extra_arg);
+
+struct sw_xstats_entry {
+ struct rte_event_dev_xstats_name name;
+ xstats_fn fn;
+ uint16_t obj_idx;
+ enum xstats_type stat;
+ enum rte_event_dev_xstats_mode mode;
+ int extra_arg;
+ uint8_t reset_allowed; /* when set, this value can be reset */
+ uint64_t reset_value; /* an offset to be taken away to emulate resets */
+};
+
+static uint64_t
+get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ switch (type) {
+ case rx: return sw->stats.rx_pkts;
+ case tx: return sw->stats.tx_pkts;
+ case dropped: return sw->stats.rx_dropped;
+ case calls: return sw->sched_called;
+ case no_iq_enq: return sw->sched_no_iq_enqueues;
+ case no_cq_enq: return sw->sched_no_cq_enqueues;
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case rx: return p->stats.rx_pkts;
+ case tx: return p->stats.tx_pkts;
+ case dropped: return p->stats.rx_dropped;
+ case inflight: return p->inflights;
+ case pkt_cycles: return p->avg_pkt_ticks;
+ case calls: return p->total_polls;
+ case credits: return p->inflight_credits;
+ case poll_return: return p->zero_polls;
+ case rx_used: return qe_ring_count(p->rx_worker_ring);
+ case rx_free: return qe_ring_free_count(p->rx_worker_ring);
+ case tx_used: return qe_ring_count(p->cq_worker_ring);
+ case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case poll_return: return p->poll_buckets[extra_arg];
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+
+ switch (type) {
+ case rx: return qid->stats.rx_pkts;
+ case tx: return qid->stats.tx_pkts;
+ case dropped: return qid->stats.rx_dropped;
+ case inflight:
+ do {
+ uint64_t infl = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ infl += qid->fids[i].pcount;
+ return infl;
+ } while (0);
+ break;
+ case iq_size: return RTE_DIM(qid->iq[0]->ring);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ const int iq_idx = extra_arg;
+
+ switch (type) {
+ case iq_used: return iq_ring_count(qid->iq[iq_idx]);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ uint16_t port = extra_arg;
+
+ switch (type) {
+ case pinned:
+ do {
+ uint64_t pin = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ if (qid->fids[i].cq == port)
+ pin++;
+ return pin;
+ } while (0);
+ break;
+ default: return -1;
+ }
+}
+
+int
+sw_xstats_init(struct sw_evdev *sw)
+{
+ /*
+ * define the stats names and types. Used to build up the device
+ * xstats array
+ * There are multiple set of stats:
+ * - device-level,
+ * - per-port,
+ * - per-port-dequeue-burst-sizes
+ * - per-qid,
+ * - per-iq
+ * - per-port-per-qid
+ *
+ * For each of these sets, we have three parallel arrays, one for the
+ * names, the other for the stat type parameter to be passed in the fn
+ * call to get that stat. The third array allows resetting or not.
+ * All these arrays must be kept in sync
+ */
+ static const char * const dev_stats[] = { "rx", "tx", "drop",
+ "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+ };
+ static const enum xstats_type dev_types[] = { rx, tx, dropped,
+ calls, no_iq_enq, no_cq_enq,
+ };
+ /* all device stats are allowed to be reset */
+
+ static const char * const port_stats[] = {"rx", "tx", "drop",
+ "inflight", "avg_pkt_cycles", "credits",
+ "rx_ring_used", "rx_ring_free",
+ "cq_ring_used", "cq_ring_free",
+ "dequeue_calls", "dequeues_returning_0",
+ };
+ static const enum xstats_type port_types[] = { rx, tx, dropped,
+ inflight, pkt_cycles, credits,
+ rx_used, rx_free, tx_used, tx_free,
+ calls, poll_return,
+ };
+ static const uint8_t port_reset_allowed[] = {1, 1, 1,
+ 0, 1, 0,
+ 0, 0, 0, 0,
+ 1, 1,
+ };
+
+ static const char * const port_bucket_stats[] = {
+ "dequeues_returning" };
+ static const enum xstats_type port_bucket_types[] = { poll_return };
+ /* all bucket dequeues are allowed to be reset, handled in loop below */
+
+ static const char * const qid_stats[] = {"rx", "tx", "drop",
+ "inflight", "iq_size"
+ };
+ static const enum xstats_type qid_types[] = { rx, tx, dropped,
+ inflight, iq_size
+ };
+ static const uint8_t qid_reset_allowed[] = {1, 1, 1,
+ 0, 0
+ };
+
+ static const char * const qid_iq_stats[] = { "used" };
+ static const enum xstats_type qid_iq_types[] = { iq_used };
+ /* reset allowed */
+
+ static const char * const qid_port_stats[] = { "pinned_flows" };
+ static const enum xstats_type qid_port_types[] = { pinned };
+ /* reset allowed */
+ /* ---- end of stat definitions ---- */
+
+ /* check sizes, since a missed comma can lead to strings being
+ * joined by the compiler.
+ */
+ RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
+ RTE_DIM(port_bucket_types));
+
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
+
+ /* other vars */
+ const uint32_t cons_bkt_shift =
+ (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
+ const unsigned int count = RTE_DIM(dev_stats) +
+ sw->port_count * RTE_DIM(port_stats) +
+ sw->port_count * RTE_DIM(port_bucket_stats) *
+ (cons_bkt_shift + 1) +
+ sw->qid_count * RTE_DIM(qid_stats) +
+ sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
+ sw->qid_count * sw->port_count *
+ RTE_DIM(qid_port_stats);
+ unsigned int i, port, qid, iq, bkt, stat = 0;
+
+ sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
+ sw->data->socket_id);
+ if (sw->xstats == NULL)
+ return -ENOMEM;
+
+#define sname sw->xstats[stat].name.name
+ for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_dev_stat,
+ .stat = dev_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+ }
+ sw->xstats_count_mode_dev = stat;
+
+ for (port = 0; port < sw->port_count; port++) {
+ sw->xstats_offset_for_port[port] = stat;
+
+ uint32_t count_offset = stat;
+
+ for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_stat,
+ .obj_idx = port,
+ .stat = port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .reset_allowed = port_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "port_%u_%s",
+ port, port_stats[i]);
+ }
+
+ for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_bucket_stat,
+ .obj_idx = port,
+ .stat = port_bucket_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .extra_arg = bkt,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname),
+ "port_%u_%s_%u-%u",
+ port, port_bucket_stats[i],
+ (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
+ (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
+ stat++;
+ }
+ }
+
+ sw->xstats_count_per_port[port] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
+
+ for (qid = 0; qid < sw->qid_count; qid++) {
+ uint32_t count_offset = stat;
+ sw->xstats_offset_for_qid[qid] = stat;
+
+ for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_stat,
+ .obj_idx = qid,
+ .stat = qid_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .reset_allowed = qid_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "qid_%u_%s",
+ qid, qid_stats[i]);
+ }
+ for (iq = 0; iq < SW_IQS_MAX; iq++)
+ for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_iq_stat,
+ .obj_idx = qid,
+ .stat = qid_iq_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = iq,
+ .reset_allowed = 0,
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_iq_%u_%s",
+ qid, iq,
+ qid_iq_stats[i]);
+ }
+
+ for (port = 0; port < sw->port_count; port++)
+ for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_port_stat,
+ .obj_idx = qid,
+ .stat = qid_port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = port,
+ .reset_allowed = 0,
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_port_%u_%s",
+ qid, port,
+ qid_port_stats[i]);
+ }
+
+ sw->xstats_count_per_qid[qid] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_queue = stat -
+ (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
+#undef sname
+
+ sw->xstats_count = stat;
+
+ return stat;
+}
+
+int
+sw_xstats_uninit(struct sw_evdev *sw)
+{
+ rte_free(sw->xstats);
+ sw->xstats_count = 0;
+ return 0;
+}
+
+int
+sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ start_offset = sw->xstats_offset_for_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ start_offset = sw->xstats_offset_for_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
+ return -EINVAL;
+ };
+
+ if (xstats_mode_count > size || !ids || !xstats_names)
+ return xstats_mode_count;
+
+ for (i = 0; i < sw->xstats_count && xidx < size; i++) {
+ if (sw->xstats[i].mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != sw->xstats[i].obj_idx)
+ continue;
+
+ xstats_names[xidx] = sw->xstats[i].name;
+ if (ids)
+ ids[xidx] = start_offset + xidx;
+ xidx++;
+ }
+ return xidx;
+}
+
+static int
+sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n, const uint32_t reset,
+ const uint32_t ret_if_n_lt_nstats)
+{
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
+ goto invalid_value;
+ };
+
+ /* this function can check num stats and return them (xstats_get() style
+ * behaviour) or ignore n for reset() of a single stat style behaviour.
+ */
+ if (ret_if_n_lt_nstats && xstats_mode_count > n)
+ return xstats_mode_count;
+
+ for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
+ if (ids[i] > sw->xstats_count || xs->mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != xs->obj_idx)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+
+ if (values)
+ values[xidx] = val;
+
+ if (xs->reset_allowed && reset)
+ xs->reset_value = val;
+
+ xidx++;
+ }
+
+ return xidx;
+invalid_value:
+ return -EINVAL;
+}
+
+int
+sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const uint32_t reset = 0;
+ const uint32_t ret_n_lt_stats = 1;
+ return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
+ reset, ret_n_lt_stats);
+}
+
+uint64_t
+sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+
+ for (i = 0; i < sw->xstats_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (strncmp(xs->name.name, name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+ if (id != NULL)
+ *id = i;
+ return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ }
+ }
+ if (id != NULL)
+ *id = (uint32_t)-1;
+ return (uint64_t)-1;
+}
+
+static void
+sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
+{
+ uint32_t i;
+ for (i = start; i < start + num; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (!xs->reset_allowed)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ xs->reset_value = val;
+ }
+}
+
+static int
+sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue_id, ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ if (ids == NULL)
+ sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
+ sw->xstats_count_per_qid[queue_id]);
+
+ return 0;
+}
+
+static int
+sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ int offset = sw->xstats_offset_for_port[port_id];
+ int nb_stat = sw->xstats_count_per_port[port_id];
+
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_PORT, port_id,
+ ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ sw_xstats_reset_range(sw, offset, nb_stat);
+ return 0;
+}
+
+static int
+sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
+{
+ uint32_t i;
+ if (ids) {
+ for (i = 0; i < nb_ids; i++) {
+ uint32_t id = ids[i];
+ if (id >= sw->xstats_count_mode_dev)
+ return -EINVAL;
+ sw_xstats_reset_range(sw, id, 1);
+ }
+ } else {
+ for (i = 0; i < sw->xstats_count_mode_dev; i++)
+ sw_xstats_reset_range(sw, i, 1);
+ }
+
+ return 0;
+}
+
+int
+sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i, err;
+
+ /* handle -1 for queue_port_id here, looping over all ports/queues */
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ sw_xstats_reset_dev(sw, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->port_count; i++) {
+ err = sw_xstats_reset_port(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->port_count)
+ sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->qid_count; i++) {
+ err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->qid_count)
+ sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);
+ break;
+ };
+
+ return 0;
+}
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
new file mode 100644
index 00000000..8fd40e1b
--- /dev/null
+++ b/drivers/mempool/Makefile
@@ -0,0 +1,43 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 NXP. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_mempool librte_ring
+
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs)
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
+DEPDIRS-ring = $(core-libs)
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
+DEPDIRS-stack = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
new file mode 100644
index 00000000..26ccebde
--- /dev/null
+++ b/drivers/mempool/dpaa2/Makefile
@@ -0,0 +1,63 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 NXP. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa2.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa2_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
+
+LDLIBS += -lrte_bus_fslmc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
new file mode 100644
index 00000000..5a5d6aaa
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -0,0 +1,373 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <mc/fsl_dpbp.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_hw_mempool.h"
+
+struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+static struct dpaa2_bp_list *h_bp_list;
+
+static int
+rte_hw_mbuf_create_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_list *bp_list;
+ struct dpaa2_dpbp_dev *avail_dpbp;
+ struct dpbp_attr dpbp_attr;
+ uint32_t bpid;
+ int ret, p_ret;
+
+ avail_dpbp = dpaa2_alloc_dpbp_dev();
+
+ if (!avail_dpbp) {
+ PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+ return -ENOENT;
+ }
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return ret;
+ }
+ }
+
+ ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Resource enable failure with"
+ " err code: %d\n", ret);
+ return ret;
+ }
+
+ ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
+ avail_dpbp->token, &dpbp_attr);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Resource read failure with"
+ " err code: %d\n", ret);
+ p_ret = ret;
+ ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
+ avail_dpbp->token);
+ return p_ret;
+ }
+
+ /* Allocate the bp_list which will be added into global_bp_list */
+ bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
+ RTE_CACHE_LINE_SIZE);
+ if (!bp_list) {
+ PMD_INIT_LOG(ERR, "No heap memory available");
+ return -ENOMEM;
+ }
+
+ /* Set parameters of buffer pool list */
+ bp_list->buf_pool.num_bufs = mp->size;
+ bp_list->buf_pool.size = mp->elt_size
+ - sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
+ bp_list->buf_pool.bpid = dpbp_attr.bpid;
+ bp_list->buf_pool.h_bpool_mem = NULL;
+ bp_list->buf_pool.dpbp_node = avail_dpbp;
+ /* Identification for our offloaded pool_data structure */
+ bp_list->dpaa2_ops_index = mp->ops_index;
+ bp_list->next = h_bp_list;
+ bp_list->mp = mp;
+
+ bpid = dpbp_attr.bpid;
+
+ rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+ + rte_pktmbuf_priv_size(mp);
+ rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
+ rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+ mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid];
+
+ PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+
+ h_bp_list = bp_list;
+ return 0;
+}
+
+static void
+rte_hw_mbuf_free_pool(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bpinfo;
+ struct dpaa2_bp_list *bp;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp->pool_data) {
+ PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+ return;
+ }
+
+ bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
+ bp = bpinfo->bp_list;
+ dpbp_node = bp->buf_pool.dpbp_node;
+
+ dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
+
+ if (h_bp_list == bp) {
+ h_bp_list = h_bp_list->next;
+ } else { /* if it is not the first node */
+ struct dpaa2_bp_list *prev = h_bp_list, *temp;
+ temp = h_bp_list->next;
+ while (temp) {
+ if (temp == bp) {
+ prev->next = temp->next;
+ free(bp);
+ break;
+ }
+ prev = temp;
+ temp = temp->next;
+ }
+ }
+
+ dpaa2_free_dpbp_dev(dpbp_node);
+}
+
+static void
+rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
+ void * const *obj_table,
+ uint32_t bpid,
+ uint32_t meta_data_size,
+ int count)
+{
+ struct qbman_release_desc releasedesc;
+ struct qbman_swp *swp;
+ int ret;
+ int i, n;
+ uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ return;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ /* Create a release descriptor required for releasing
+ * buffers into QBMAN
+ */
+ qbman_release_desc_clear(&releasedesc);
+ qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+ n = count % DPAA2_MBUF_MAX_ACQ_REL;
+ if (unlikely(!n))
+ goto aligned;
+
+ /* convert mbuf to buffers for the remainder */
+ for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
+#endif
+ }
+
+ /* feed them to bman */
+ do {
+ ret = qbman_swp_release(swp, &releasedesc, bufs, n);
+ } while (ret == -EBUSY);
+
+aligned:
+ /* if there are more buffers to free */
+ while (n < count) {
+ /* convert mbuf to buffers */
+ for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ bufs[i] = (uint64_t)
+ rte_mempool_virt2phy(pool, obj_table[n + i])
+ + meta_data_size;
+#else
+ bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+ }
+
+ do {
+ ret = qbman_swp_release(swp, &releasedesc, bufs,
+ DPAA2_MBUF_MAX_ACQ_REL);
+ } while (ret == -EBUSY);
+ n += DPAA2_MBUF_MAX_ACQ_REL;
+ }
+}
+
+int
+rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count)
+{
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ static int alloc;
+#endif
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ int i, ret;
+ unsigned int n = 0;
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOENT;
+ }
+
+ bpid = bp_info->bpid;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret != 0) {
+ RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+ return ret;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ while (n < count) {
+ /* Acquire is all-or-nothing, so we drain in 7s,
+ * then the remainder.
+ */
+ if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
+ ret = qbman_swp_acquire(swp, bpid, bufs,
+ DPAA2_MBUF_MAX_ACQ_REL);
+ } else {
+ ret = qbman_swp_acquire(swp, bpid, bufs,
+ count - n);
+ }
+ /* In case of less than requested number of buffers available
+ * in pool, qbman_swp_acquire returns 0
+ */
+ if (ret <= 0) {
+ PMD_TX_LOG(ERR, "Buffer acquire failed with"
+ " err code: %d", ret);
+ /* The API expect the exact number of requested bufs */
+ /* Releasing all buffers allocated */
+ rte_dpaa2_mbuf_release(pool, obj_table, bpid,
+ bp_info->meta_data_size, n);
+ return ret;
+ }
+ /* assigning mbuf from the acquired objects */
+ for (i = 0; (i < ret) && bufs[i]; i++) {
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+ obj_table[n] = (struct rte_mbuf *)
+ (bufs[i] - bp_info->meta_data_size);
+ PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+ (void *)bufs[i], (void *)obj_table[n]);
+ n++;
+ }
+ }
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+ alloc += n;
+ PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
+ alloc, count, n);
+#endif
+ return 0;
+}
+
+static int
+rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
+ void * const *obj_table, unsigned int n)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(pool);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured");
+ return -ENOENT;
+ }
+ rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
+ bp_info->meta_data_size, n);
+
+ return 0;
+}
+
+static unsigned int
+rte_hw_mbuf_get_count(const struct rte_mempool *mp)
+{
+ int ret;
+ unsigned int num_of_bufs = 0;
+ struct dpaa2_bp_info *bp_info;
+ struct dpaa2_dpbp_dev *dpbp_node;
+
+ if (!mp || !mp->pool_data) {
+ RTE_LOG(ERR, PMD, "Invalid mempool provided");
+ return 0;
+ }
+
+ bp_info = (struct dpaa2_bp_info *)mp->pool_data;
+ dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
+
+ ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+ dpbp_node->token, &num_of_bufs);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)",
+ ret);
+ return 0;
+ }
+
+ RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs);
+
+ return num_of_bufs;
+}
+
+struct rte_mempool_ops dpaa2_mpool_ops = {
+ .name = "dpaa2",
+ .alloc = rte_hw_mbuf_create_pool,
+ .free = rte_hw_mbuf_free_pool,
+ .enqueue = rte_hw_mbuf_free_bulk,
+ .dequeue = rte_dpaa2_mbuf_alloc_bulk,
+ .get_count = rte_hw_mbuf_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
new file mode 100644
index 00000000..c4d7fc02
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -0,0 +1,91 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_HW_DPBP_H_
+#define _DPAA2_HW_DPBP_H_
+
+#define DPAA2_MAX_BUF_POOLS 8
+
+struct buf_pool_cfg {
+ void *addr;
+ /**< The address from where DPAA2 will carve out the buffers */
+ phys_addr_t phys_addr;
+ /**< Physical address of the memory provided in addr */
+ uint32_t num;
+ /**< Number of buffers */
+ uint32_t size;
+ /**< Size including headroom for each buffer */
+ uint16_t align;
+ /**< Buffer alignment (in bytes) */
+ uint16_t bpid;
+ /**< Autogenerated buffer pool ID for internal use */
+};
+
+struct buf_pool {
+ uint32_t size; /**< Size of the Pool */
+ uint32_t num_bufs; /**< Number of buffers in Pool */
+ uint16_t bpid; /**< Pool ID, from pool configuration */
+ uint8_t *h_bpool_mem; /**< Internal context data */
+ struct dpaa2_dpbp_dev *dpbp_node; /**< Hardware context */
+};
+
+/*!
+ * Buffer pool list configuration structure. User need to give DPAA2 the
+ * valid number of 'num_buf_pools'.
+ */
+struct dpaa2_bp_list_cfg {
+ struct buf_pool_cfg buf_pool; /* Configuration of each buffer pool*/
+};
+
+struct dpaa2_bp_list {
+ struct dpaa2_bp_list *next;
+ struct rte_mempool *mp; /**< DPDK RTE EAL pool reference */
+ int32_t dpaa2_ops_index; /**< Index into DPDK Mempool ops table */
+ struct buf_pool buf_pool;
+};
+
+struct dpaa2_bp_info {
+ uint32_t meta_data_size;
+ uint32_t bpid;
+ struct dpaa2_bp_list *bp_list;
+};
+
+#define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
+#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
+
+extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+
+int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+ void **obj_table, unsigned int count);
+
+#endif /* _DPAA2_HW_DPBP_H_ */
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
new file mode 100644
index 00000000..a8aa685c
--- /dev/null
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -0,0 +1,8 @@
+DPDK_17.05 {
+ global:
+
+ rte_dpaa2_bpid_info;
+ rte_dpaa2_mbuf_alloc_bulk;
+
+ local: *;
+};
diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile
new file mode 100644
index 00000000..54630d99
--- /dev/null
+++ b/drivers/mempool/ring/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 NXP.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_ring.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_mempool_ring_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += rte_mempool_ring.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mempool/rte_mempool_ring.c b/drivers/mempool/ring/rte_mempool_ring.c
index b9aa64dd..5c132bf6 100644
--- a/lib/librte_mempool/rte_mempool_ring.c
+++ b/drivers/mempool/ring/rte_mempool_ring.c
@@ -42,26 +42,30 @@ static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_mp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
unsigned n)
{
- return rte_ring_sp_enqueue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sp_enqueue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_mc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_mc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
- return rte_ring_sc_dequeue_bulk(mp->pool_data, obj_table, n);
+ return rte_ring_sc_dequeue_bulk(mp->pool_data,
+ obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}
static unsigned
diff --git a/drivers/mempool/ring/rte_mempool_ring_version.map b/drivers/mempool/ring/rte_mempool_ring_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/mempool/ring/rte_mempool_ring_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile
new file mode 100644
index 00000000..8f3125c7
--- /dev/null
+++ b/drivers/mempool/stack/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 NXP.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of NXP nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_stack.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# Headers
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+
+EXPORT_MAP := rte_mempool_stack_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += rte_mempool_stack.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mempool/rte_mempool_stack.c b/drivers/mempool/stack/rte_mempool_stack.c
index 817f77e6..817f77e6 100644
--- a/lib/librte_mempool/rte_mempool_stack.c
+++ b/drivers/mempool/stack/rte_mempool_stack.c
diff --git a/drivers/mempool/stack/rte_mempool_stack_version.map b/drivers/mempool/stack/rte_mempool_stack_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/mempool/stack/rte_mempool_stack_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index bc932309..35ed8135 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,33 +31,81 @@
include $(RTE_SDK)/mk/rte.vars.mk
+# set in mk/toolchain/xxx/rte.toolchain-compat.mk
+ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d)
+ $(warning thunderx pmd is not supported by old compilers)
+endif
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
+core-libs += librte_net librte_kvargs
+
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+DEPDIRS-af_packet = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DEPDIRS-ark = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
+DEPDIRS-avp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
+DEPDIRS-bnx2x = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
+DEPDIRS-bonding = $(core-libs) librte_cmdline
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+DEPDIRS-cxgbe = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
+DEPDIRS-e1000 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
+DEPDIRS-ena = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
+DEPDIRS-enic = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
+DEPDIRS-fm10k = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
+DEPDIRS-i40e = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
+DEPDIRS-ixgbe = $(core-libs) librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
+DEPDIRS-liquidio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
+DEPDIRS-mlx4 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
-DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe
+DEPDIRS-mlx5 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
+DEPDIRS-nfp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
+DEPDIRS-bnxt = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
+DEPDIRS-null = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap
+DEPDIRS-pcap = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede
+DEPDIRS-qede = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring
+DEPDIRS-ring = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
+DEPDIRS-sfc = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
+DEPDIRS-szedata2 = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
+DEPDIRS-tap = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
+DEPDIRS-thunderx = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
+DEPDIRS-virtio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
+DEPDIRS-vmxnet3 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
+DEPDIRS-xenvirt = $(core-libs) librte_cmdline
+
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += kni
+endif
+DEPDIRS-kni = $(core-libs) librte_kni
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
endif # $(CONFIG_RTE_LIBRTE_VHOST)
+DEPDIRS-vhost = $(core-libs) librte_vhost
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index e14d6d0c..70d517c1 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -50,11 +50,4 @@ CFLAGS += $(WERROR_FLAGS)
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 45c6519f..68de45c3 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -38,6 +38,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
#include <rte_vdev.h>
@@ -83,6 +84,7 @@ struct pkt_rx_queue {
struct pkt_tx_queue {
int sockfd;
+ unsigned int frame_data_size;
struct iovec *rd;
uint8_t *map;
@@ -98,6 +100,7 @@ struct pmd_internals {
unsigned nb_queues;
int if_index;
+ char *if_name;
struct ether_addr eth_addr;
struct tpacket_req req;
@@ -115,8 +118,6 @@ static const char *valid_arguments[] = {
NULL
};
-static const char *drivername = "AF_PACKET PMD";
-
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -161,6 +162,12 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pbuf = (uint8_t *) ppd + ppd->tp_mac;
memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
+ /* check for vlan info */
+ if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
+ mbuf->vlan_tci = ppd->tp_vlan_tci;
+ mbuf->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ }
+
/* release incoming frame and advance ring buffer */
ppd->tp_status = TP_STATUS_KERNEL;
if (++framenum >= framecount)
@@ -206,13 +213,28 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
framenum = pkt_q->framenum;
ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
for (i = 0; i < nb_pkts; i++) {
+ mbuf = *bufs++;
+
+ /* drop oversized packets */
+ if (rte_pktmbuf_data_len(mbuf) > pkt_q->frame_data_size) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ /* insert vlan info if necessary */
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (rte_vlan_insert(&mbuf)) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ }
+
/* point at the next incoming frame */
if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
(poll(&pfd, 1, -1) < 0))
- continue;
+ break;
/* copy the tx frame data */
- mbuf = bufs[num_tx];
pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
sizeof(struct sockaddr_ll);
memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
@@ -231,13 +253,13 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* kick-off transmits */
if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
- return 0; /* error sending -- no packets transmitted */
+ num_tx = 0; /* error sending -- no packets transmitted */
pkt_q->framenum = framenum;
pkt_q->tx_pkts += num_tx;
- pkt_q->err_pkts += nb_pkts - num_tx;
+ pkt_q->err_pkts += i - num_tx;
pkt_q->tx_bytes += num_tx_bytes;
- return num_tx;
+ return i;
}
static int
@@ -287,14 +309,12 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -377,18 +397,20 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
- uint16_t buf_size;
+ unsigned int buf_size, data_size;
pkt_q->mb_pool = mb_pool;
/* Now get the space available for data in the mbuf */
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ data_size = internals->req.tp_frame_size;
+ data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
- if (ETH_FRAME_LEN > buf_size) {
+ if (data_size > buf_size) {
RTE_LOG(ERR, PMD,
"%s: %d bytes will not fit in mbuf (%d bytes)\n",
- dev->data->name, ETH_FRAME_LEN, buf_size);
+ dev->data->name, data_size, buf_size);
return -ENOMEM;
}
@@ -412,12 +434,80 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
+static int
+eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int ret;
+ int s;
+ unsigned int data_size = internals->req.tp_frame_size -
+ TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+
+ if (mtu > data_size)
+ return -EINVAL;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return -EINVAL;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
+ ret = ioctl(s, SIOCSIFMTU, &ifr);
+ close(s);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
+{
+ struct ifreq ifr;
+ int s;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
+ if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
+ goto out;
+ ifr.ifr_flags &= mask;
+ ifr.ifr_flags |= flags;
+ if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
+ goto out;
+out:
+ close(s);
+}
+
+static void
+eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
+}
+
+static void
+eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
.dev_close = eth_dev_close,
.dev_configure = eth_dev_configure,
.dev_infos_get = eth_dev_info,
+ .mtu_set = eth_dev_mtu_set,
+ .promiscuous_enable = eth_dev_promiscuous_enable,
+ .promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_queue_release,
@@ -447,19 +537,22 @@ open_packet_iface(const char *key __rte_unused,
return 0;
}
+static struct rte_vdev_driver pmd_af_packet_drv;
+
static int
-rte_pmd_init_internals(const char *name,
+rte_pmd_init_internals(struct rte_vdev_device *dev,
const int sockfd,
const unsigned nb_queues,
unsigned int blocksize,
unsigned int blockcnt,
unsigned int framesize,
unsigned int framecnt,
- const unsigned numa_node,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev,
struct rte_kvargs *kvlist)
{
+ const char *name = rte_vdev_device_name(dev);
+ const unsigned int numa_node = dev->device.numa_node;
struct rte_eth_dev_data *data = NULL;
struct rte_kvargs_pair *pair = NULL;
struct ifreq ifr;
@@ -531,6 +624,7 @@ rte_pmd_init_internals(const char *name,
name);
goto error_early;
}
+ (*internals)->if_name = strdup(pair->value);
(*internals)->if_index = ifr.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
@@ -640,6 +734,9 @@ rte_pmd_init_internals(const char *name,
tx_queue = &((*internals)->tx_queue[q]);
tx_queue->framecount = req->tp_frame_nr;
+ tx_queue->frame_data_size = req->tp_frame_size;
+ tx_queue->frame_data_size -= TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
@@ -673,7 +770,7 @@ rte_pmd_init_internals(const char *name,
}
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
+ *eth_dev = rte_eth_vdev_allocate(dev, 0);
if (*eth_dev == NULL)
goto error;
@@ -687,22 +784,16 @@ rte_pmd_init_internals(const char *name,
(*internals)->nb_queues = nb_queues;
+ rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
data->dev_private = *internals;
- data->port_id = (*eth_dev)->data->port_id;
data->nb_rx_queues = (uint16_t)nb_queues;
data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
- strncpy(data->name,
- (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->driver = NULL;
(*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- (*eth_dev)->data->drv_name = drivername;
- (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
- (*eth_dev)->data->numa_node = numa_node;
return 0;
@@ -719,6 +810,7 @@ error:
((*internals)->rx_queue[q].sockfd != qsockfd))
close((*internals)->rx_queue[q].sockfd);
}
+ free((*internals)->if_name);
rte_free(*internals);
error_early:
rte_free(data);
@@ -726,11 +818,11 @@ error_early:
}
static int
-rte_eth_from_packet(const char *name,
+rte_eth_from_packet(struct rte_vdev_device *dev,
int const *sockfd,
- const unsigned numa_node,
struct rte_kvargs *kvlist)
{
+ const char *name = rte_vdev_device_name(dev);
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct rte_kvargs_pair *pair = NULL;
@@ -813,11 +905,11 @@ rte_eth_from_packet(const char *name,
RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
- if (rte_pmd_init_internals(name, *sockfd, qpairs,
- blocksize, blockcount,
- framesize, framecount,
- numa_node, &internals, &eth_dev,
- kvlist) < 0)
+ if (rte_pmd_init_internals(dev, *sockfd, qpairs,
+ blocksize, blockcount,
+ framesize, framecount,
+ &internals, &eth_dev,
+ kvlist) < 0)
return -1;
eth_dev->rx_pkt_burst = eth_af_packet_rx;
@@ -827,18 +919,16 @@ rte_eth_from_packet(const char *name,
}
static int
-rte_pmd_af_packet_probe(const char *name, const char *params)
+rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
{
- unsigned numa_node;
int ret = 0;
struct rte_kvargs *kvlist;
int sockfd = -1;
- RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", name);
+ RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
+ rte_vdev_device_name(dev));
- numa_node = rte_socket_id();
-
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL) {
ret = -1;
goto exit;
@@ -856,7 +946,10 @@ rte_pmd_af_packet_probe(const char *name, const char *params)
goto exit;
}
- ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist);
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ ret = rte_eth_from_packet(dev, &sockfd, kvlist);
close(sockfd); /* no longer needed */
exit:
@@ -865,7 +958,7 @@ exit:
}
static int
-rte_pmd_af_packet_remove(const char *name)
+rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
@@ -874,11 +967,11 @@ rte_pmd_af_packet_remove(const char *name)
RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
rte_socket_id());
- if (name == NULL)
+ if (dev == NULL)
return -1;
/* find the ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
@@ -887,6 +980,7 @@ rte_pmd_af_packet_remove(const char *name)
rte_free(internals->rx_queue[q].rd);
rte_free(internals->tx_queue[q].rd);
}
+ free(internals->if_name);
rte_free(eth_dev->data->dev_private);
rte_free(eth_dev->data);
diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile
new file mode 100644
index 00000000..ca64b195
--- /dev/null
+++ b/drivers/net/ark/Makefile
@@ -0,0 +1,66 @@
+# BSD LICENSE
+#
+# Copyright (c) 2015-2017 Atomic Rules LLC
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ark.a
+
+CFLAGS += -O3 -I./
+CFLAGS += $(WERROR_FLAGS) -Werror
+
+EXPORT_MAP := rte_pmd_ark_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c
+
+# this lib depends upon:
+LDLIBS += -lpthread
+ifdef CONFIG_RTE_EXEC_ENV_LINUXAPP
+LDLIBS += -ldl
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
new file mode 100644
index 00000000..221460c7
--- /dev/null
+++ b/drivers/net/ark/ark_ddm.c
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_ddm.h"
+
+/* ************************************************************************* */
+int
+ark_ddm_verify(struct ark_ddm_t *ddm)
+{
+ if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
+ PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n",
+ ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
+ return -1;
+ }
+
+ if (ddm->cfg.const0 != ARK_DDM_CONST) {
+ PMD_DRV_LOG(ERR, "ARK: DDM module not found as expected 0x%08x\n",
+ ddm->cfg.const0);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_ddm_start(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.command = 1;
+}
+
+int
+ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+{
+ int cnt = 0;
+
+ ddm->cfg.command = 2;
+ while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+void
+ark_ddm_reset(struct ark_ddm_t *ddm)
+{
+ int status;
+
+ /* reset only works if ddm has stopped properly. */
+ status = ark_ddm_stop(ddm, 1);
+
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ ddm->cfg.command = 4;
+ usleep(10);
+ }
+ ddm->cfg.command = 3;
+}
+
+void
+ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr, uint32_t interval)
+{
+ ddm->setup.cons_write_index_addr = cons_addr;
+ ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
+}
+
+void
+ark_ddm_stats_reset(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.tlp_stats_clear = 1;
+}
+
+void
+ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
+{
+ PMD_FUNC_LOG(DEBUG, "%s Stopped: %d\n", msg,
+ ark_ddm_is_stopped(ddm)
+ );
+}
+
+void
+ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
+{
+ struct ark_ddm_stats_t *stats = &ddm->stats;
+
+ PMD_STATS_LOG(INFO, "DDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64
+ "\n", msg,
+ "Bytes:", stats->tx_byte_count,
+ "Packets:", stats->tx_pkt_count,
+ "MBufs", stats->tx_mbuf_count);
+}
+
+int
+ark_ddm_is_stopped(struct ark_ddm_t *ddm)
+{
+ return (ddm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.byte_count;
+}
+
+uint64_t
+ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.pkt_count;
+}
+
+void
+ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm)
+{
+ ddm->queue_stats.byte_count = 1;
+}
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
new file mode 100644
index 00000000..de61926c
--- /dev/null
+++ b/drivers/net/ark/ark_ddm.h
@@ -0,0 +1,177 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DDM_H_
+#define _ARK_DDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+
+/* The DDM or Downstream Data Mover is an internal Arkville hardware
+ * module for moving packet from host memory to the TX packet streams.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */
+struct ark_tx_meta {
+ uint64_t physaddr;
+ uint32_t delta_ns;
+ uint16_t data_len; /* of this MBUF */
+#define ARK_DDM_EOP 0x01
+#define ARK_DDM_SOP 0x02
+ uint8_t flags; /* bit 0 indicates last mbuf in chain. */
+ uint8_t reserved[1];
+};
+
+
+/*
+ * DDM core hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+#define ARK_DDM_CFG 0x0000
+#define ARK_DDM_CONST 0xfacecafe
+struct ark_ddm_cfg_t {
+ uint32_t r0;
+ volatile uint32_t tlp_stats_clear;
+ uint32_t const0;
+ volatile uint32_t tag_max;
+ volatile uint32_t command;
+ volatile uint32_t stop_flushed;
+};
+
+#define ARK_DDM_STATS 0x0020
+struct ark_ddm_stats_t {
+ volatile uint64_t tx_byte_count;
+ volatile uint64_t tx_pkt_count;
+ volatile uint64_t tx_mbuf_count;
+};
+
+#define ARK_DDM_MRDQ 0x0040
+struct ark_ddm_mrdq_t {
+ volatile uint32_t mrd_q1;
+ volatile uint32_t mrd_q2;
+ volatile uint32_t mrd_q3;
+ volatile uint32_t mrd_q4;
+ volatile uint32_t mrd_full;
+};
+
+#define ARK_DDM_CPLDQ 0x0068
+struct ark_ddm_cpldq_t {
+ volatile uint32_t cpld_q1;
+ volatile uint32_t cpld_q2;
+ volatile uint32_t cpld_q3;
+ volatile uint32_t cpld_q4;
+ volatile uint32_t cpld_full;
+};
+
+#define ARK_DDM_MRD_PS 0x0090
+struct ark_ddm_mrd_ps_t {
+ volatile uint32_t mrd_ps_min;
+ volatile uint32_t mrd_ps_max;
+ volatile uint32_t mrd_full_ps_min;
+ volatile uint32_t mrd_full_ps_max;
+ volatile uint32_t mrd_dw_ps_min;
+ volatile uint32_t mrd_dw_ps_max;
+};
+
+#define ARK_DDM_QUEUE_STATS 0x00a8
+struct ark_ddm_qstats_t {
+ volatile uint64_t byte_count;
+ volatile uint64_t pkt_count;
+ volatile uint64_t mbuf_count;
+};
+
+#define ARK_DDM_CPLD_PS 0x00c0
+struct ark_ddm_cpld_ps_t {
+ volatile uint32_t cpld_ps_min;
+ volatile uint32_t cpld_ps_max;
+ volatile uint32_t cpld_full_ps_min;
+ volatile uint32_t cpld_full_ps_max;
+ volatile uint32_t cpld_dw_ps_min;
+ volatile uint32_t cpld_dw_ps_max;
+};
+
+#define ARK_DDM_SETUP 0x00e0
+struct ark_ddm_setup_t {
+ phys_addr_t cons_write_index_addr;
+ uint32_t write_index_interval; /* 4ns each */
+ volatile uint32_t cons_index;
+};
+
+#define ARK_DDM_EXPECTED_SIZE 256
+#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE
+/* Consolidated structure */
+struct ark_ddm_t {
+ struct ark_ddm_cfg_t cfg;
+ uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
+ sizeof(struct ark_ddm_cfg_t)];
+ struct ark_ddm_stats_t stats;
+ uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ sizeof(struct ark_ddm_stats_t)];
+ struct ark_ddm_mrdq_t mrdq;
+ uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
+ sizeof(struct ark_ddm_mrdq_t)];
+ struct ark_ddm_cpldq_t cpldq;
+ uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
+ sizeof(struct ark_ddm_cpldq_t)];
+ struct ark_ddm_mrd_ps_t mrd_ps;
+ struct ark_ddm_qstats_t queue_stats;
+ struct ark_ddm_cpld_ps_t cpld_ps;
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
+ sizeof(struct ark_ddm_cpld_ps_t)];
+ struct ark_ddm_setup_t setup;
+ uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) -
+ sizeof(struct ark_ddm_setup_t)];
+};
+
+
+/* DDM function prototype */
+int ark_ddm_verify(struct ark_ddm_t *ddm);
+void ark_ddm_start(struct ark_ddm_t *ddm);
+int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+void ark_ddm_reset(struct ark_ddm_t *ddm);
+void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+void ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr,
+ uint32_t interval);
+void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
new file mode 100644
index 00000000..995c93d3
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev.c
@@ -0,0 +1,1001 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
+#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
+
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev.h"
+#include "ark_ethdev_tx.h"
+#include "ark_ethdev_rx.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_udm.h"
+#include "ark_rqp.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+/* Internal prototypes */
+static int eth_ark_check_args(struct ark_adapter *ark, const char *params);
+static int eth_ark_dev_init(struct rte_eth_dev *dev);
+static int ark_config_device(struct rte_eth_dev *dev);
+static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev);
+static int eth_ark_dev_configure(struct rte_eth_dev *dev);
+static int eth_ark_dev_start(struct rte_eth_dev *dev);
+static void eth_ark_dev_stop(struct rte_eth_dev *dev);
+static void eth_ark_dev_close(struct rte_eth_dev *dev);
+static void eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
+static void eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
+static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
+ uint32_t index);
+
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing. It is not intended for
+ * nominal use.
+ */
+#define ARK_PKTDIR_ARG "Pkt_dir"
+
+/* Devinfo configurations */
+#define ARK_RX_MAX_QUEUE (4096 * 4)
+#define ARK_RX_MIN_QUEUE (512)
+#define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128)
+#define ARK_RX_MIN_BUFSIZE (1024)
+
+#define ARK_TX_MAX_QUEUE (4096 * 4)
+#define ARK_TX_MIN_QUEUE (256)
+
+static const char * const valid_arguments[] = {
+ ARK_PKTGEN_ARG,
+ ARK_PKTCHKR_ARG,
+ ARK_PKTDIR_ARG,
+ NULL
+};
+
+static const struct rte_pci_id pci_id_ark_map[] = {
+ {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
+ {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
+ {.vendor_id = 0, /* sentinel */ },
+};
+
+static int
+eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter));
+
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_ark_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_ark_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ark_pmd = {
+ .id_table = pci_id_ark_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ark_pci_probe,
+ .remove = eth_ark_pci_remove,
+};
+
+static const struct eth_dev_ops ark_eth_dev_ops = {
+ .dev_configure = eth_ark_dev_configure,
+ .dev_start = eth_ark_dev_start,
+ .dev_stop = eth_ark_dev_stop,
+ .dev_close = eth_ark_dev_close,
+
+ .dev_infos_get = eth_ark_dev_info_get,
+
+ .rx_queue_setup = eth_ark_dev_rx_queue_setup,
+ .rx_queue_count = eth_ark_dev_rx_queue_count,
+ .tx_queue_setup = eth_ark_tx_queue_setup,
+
+ .link_update = eth_ark_dev_link_update,
+ .dev_set_link_up = eth_ark_dev_set_link_up,
+ .dev_set_link_down = eth_ark_dev_set_link_down,
+
+ .rx_queue_start = eth_ark_rx_start_queue,
+ .rx_queue_stop = eth_ark_rx_stop_queue,
+
+ .tx_queue_start = eth_ark_tx_queue_start,
+ .tx_queue_stop = eth_ark_tx_queue_stop,
+
+ .stats_get = eth_ark_dev_stats_get,
+ .stats_reset = eth_ark_dev_stats_reset,
+
+ .mac_addr_add = eth_ark_macaddr_add,
+ .mac_addr_remove = eth_ark_macaddr_remove,
+ .mac_addr_set = eth_ark_set_default_mac_addr,
+};
+
+static int
+check_for_ext(struct ark_adapter *ark)
+{
+ int found = 0;
+
+ /* Get the env */
+ const char *dllpath = getenv("ARK_EXT_PATH");
+
+ if (dllpath == NULL) {
+ PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n");
+ return 0;
+ }
+ PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->d_handle == NULL) {
+ PMD_DRV_LOG(ERR, "Could not load user extension %s\n",
+ dllpath);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n",
+ dllpath);
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_eth_dev *, void *, int))
+ dlsym(ark->d_handle, "dev_init");
+ PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n",
+ ark->user_ext.dev_init);
+ ark->user_ext.dev_get_port_count =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_get_port_count");
+ ark->user_ext.dev_uninit =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_uninit");
+ ark->user_ext.dev_configure =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_configure");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_start");
+ ark->user_ext.dev_stop =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_stop");
+ ark->user_ext.dev_close =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_close");
+ ark->user_ext.link_update =
+ (int (*)(struct rte_eth_dev *, int, void *))
+ dlsym(ark->d_handle, "link_update");
+ ark->user_ext.dev_set_link_up =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_up");
+ ark->user_ext.dev_set_link_down =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_down");
+ ark->user_ext.stats_get =
+ (void (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ void *))
+ dlsym(ark->d_handle, "stats_get");
+ ark->user_ext.stats_reset =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "stats_reset");
+ ark->user_ext.mac_addr_add =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_add");
+ ark->user_ext.mac_addr_remove =
+ (void (*)(struct rte_eth_dev *, uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_remove");
+ ark->user_ext.mac_addr_set =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *,
+ void *))
+ dlsym(ark->d_handle, "mac_addr_set");
+
+ return found;
+}
+
+static int
+eth_ark_dev_init(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ int port_count = 1;
+ int p;
+
+ ark->eth_dev = dev;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* Check to see if there is an extension that we need to load */
+ ret = check_for_ext(ark);
+ if (ret)
+ return ret;
+ pci_dev = ARK_DEV_TO_PCI(dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ /* Use dummy function until setup */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+ ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+ ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE];
+ ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE];
+ ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE];
+ ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE];
+ ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE];
+ ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE];
+ ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE];
+ ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE];
+ ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
+ ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
+
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ ark->started = 0;
+
+ PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
+ ark->sysctrl.t32[4],
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+ PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n",
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark->sysctrl.t32[4] != 0xcafef00d) {
+ PMD_DRV_LOG(ERR,
+ "HW Sanity test has failed, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d,
+ ark->sysctrl.t32[4], __func__);
+ return -1;
+ }
+ if (ark->sysctrl.t32[3] != 0) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ PMD_DRV_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ PMD_DRV_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
+ }
+
+ PMD_DRV_LOG(INFO,
+ "HW Sanity test has PASSED, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+
+ /* We are a single function multi-port device. */
+ ret = ark_config_device(dev);
+ dev->dev_ops = &ark_eth_dev_ops;
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocated memory for storing mac address"
+ );
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data = ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data) {
+ PMD_DRV_LOG(INFO,
+ "Failed to initialize PMD extension!"
+ " continuing without it\n");
+ memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
+ dlclose(ark->d_handle);
+ }
+ }
+
+ if (pci_dev->device.devargs)
+ ret = eth_ark_check_args(ark, pci_dev->device.devargs->args);
+ else
+ PMD_DRV_LOG(INFO, "No Device args found\n");
+
+ if (ret)
+ goto error;
+ /*
+ * We will create additional devices based on the number of requested
+ * ports
+ */
+ if (ark->user_ext.dev_get_port_count)
+ port_count =
+ ark->user_ext.dev_get_port_count(dev, ark->user_data);
+ ark->num_ports = port_count;
+
+ for (p = 0; p < port_count; p++) {
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "arketh%d",
+ dev->data->port_id + p);
+
+ if (p == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = ark->eth_dev;
+ continue;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate eth_dev for port %d\n",
+ p);
+ goto error;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ eth_dev->data->dev_private = ark;
+ eth_dev->dev_ops = ark->eth_dev->dev_ops;
+ eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst;
+ eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Memory allocation for MAC failed!"
+ " Exiting.\n");
+ goto error;
+ }
+
+ if (ark->user_ext.dev_init)
+ ark->user_data =
+ ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
+
+ return ret;
+
+ error:
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ return -1;
+}
+
+/*
+ *Initial device configuration when device is opened
+ * setup the DDM, and UDM
+ * Called once per PCIE device
+ */
+static int
+ark_config_device(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t num_q, i;
+ struct ark_mpu_t *mpu;
+
+ /*
+ * Make sure that the packet director, generator and checker are in a
+ * known state
+ */
+ ark->start_pg = 0;
+ ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
+ ark_pktgen_reset(ark->pg);
+ ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1);
+ ark_pktchkr_stop(ark->pc);
+ ark->pd = ark_pktdir_init(ark->pktdir.v);
+
+ /* Verify HW */
+ if (ark_udm_verify(ark->udm.v))
+ return -1;
+ if (ark_ddm_verify(ark->ddm.v))
+ return -1;
+
+ /* UDM */
+ if (ark_udm_reset(ark->udm.v)) {
+ PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark->mpurx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->rx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_udm_stop(ark->udm.v, 0);
+ ark_udm_configure(ark->udm.v,
+ RTE_PKTMBUF_HEADROOM,
+ RTE_MBUF_DEFAULT_DATAROOM,
+ ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(ark->udm.v);
+ ark_udm_stop(ark->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark->ddm.v, 1))
+ PMD_DRV_LOG(ERR, "Unable to stop DDM\n");
+
+ mpu = ark->mputx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->tx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark->ddm.v);
+ ark_ddm_stats_reset(ark->ddm.v);
+
+ ark_ddm_stop(ark->ddm.v, 0);
+ ark_rqp_stats_reset(ark->rqpacing);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (ark->user_ext.dev_uninit)
+ ark->user_ext.dev_uninit(dev, ark->user_data);
+
+ ark_pktgen_uninit(ark->pg);
+ ark_pktchkr_uninit(ark->pc);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ if (dev->data)
+ rte_free(dev->data);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_configure(struct rte_eth_dev *dev)
+{
+ PMD_FUNC_LOG(DEBUG, "\n");
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ eth_ark_dev_set_link_up(dev);
+ if (ark->user_ext.dev_configure)
+ return ark->user_ext.dev_configure(dev, ark->user_data);
+ return 0;
+}
+
+static void *
+delay_pg_start(void *arg)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)arg;
+
+ /* This function is used exclusively for regression testing, We
+ * perform a blind sleep here to ensure that the external test
+ * application has time to setup the test before we generate packets
+ */
+ usleep(100000);
+ ark_pktgen_run(ark->pg);
+ return NULL;
+}
+
+static int
+eth_ark_dev_start(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ int i;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* RX Side */
+ /* start UDM */
+ ark_udm_start(ark->udm.v);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_start_queue(dev, i);
+
+ /* TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_tx_queue_start(dev, i);
+
+ /* start DDM */
+ ark_ddm_start(ark->ddm.v);
+
+ ark->started = 1;
+ /* set xmit and receive function */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts;
+
+ if (ark->start_pg)
+ ark_pktchkr_run(ark->pc);
+
+ if (ark->start_pg && (dev->data->port_id == 0)) {
+ pthread_t thread;
+
+ /* Delay packet generatpr start allow the hardware to be ready
+ * This is only used for sanity checking with internal generator
+ */
+ pthread_create(&thread, NULL, delay_pg_start, ark);
+ }
+
+ if (ark->user_ext.dev_start)
+ ark->user_ext.dev_start(dev, ark->user_data);
+
+ return 0;
+}
+
+static void
+eth_ark_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ int status;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *mpu;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ if (ark->started == 0)
+ return;
+ ark->started = 0;
+
+ /* Stop the extension first */
+ if (ark->user_ext.dev_stop)
+ ark->user_ext.dev_stop(dev, ark->user_data);
+
+ /* Stop the packet generator */
+ if (ark->start_pg)
+ ark_pktgen_pause(ark->pg);
+
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ /* STOP TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ status = eth_ark_tx_queue_stop(dev, i);
+ if (status != 0) {
+ uint8_t port = dev->data->port_id;
+ PMD_DRV_LOG(ERR,
+ "tx_queue stop anomaly"
+ " port %u, queue %u\n",
+ port, i);
+ }
+ }
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is upto 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "DDM stop anomaly. status:"
+ " %d iter: %u. (%s)\n",
+ status,
+ i,
+ __func__);
+ ark_ddm_dump(ark->ddm.v, "Stop anomaly");
+
+ mpu = ark->mputx.v;
+ for (i = 0; i < ark->tx_queues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ /* STOP RX Side */
+ /* Stop UDM multiple tries attempted */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_udm_dump(ark->udm.v, "Stop anomaly");
+
+ mpu = ark->mpurx.v;
+ for (i = 0; i < ark->rx_queues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark->udm.v, "Post stop");
+ ark_udm_dump_perf(ark->udm.v, "Post stop");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_rx_dump_queue(dev, i, __func__);
+
+ /* Stop the packet checker if it is running */
+ if (ark->start_pg) {
+ ark_pktchkr_dump_stats(ark->pc);
+ ark_pktchkr_stop(ark->pc);
+ }
+}
+
+static void
+eth_ark_dev_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t i;
+
+ if (ark->user_ext.dev_close)
+ ark->user_ext.dev_close(dev, ark->user_data);
+
+ eth_ark_dev_stop(dev);
+ eth_ark_udm_force_close(dev);
+
+ /*
+ * TODO This should only be called once for the device during shutdown
+ */
+ ark_rqp_dump(ark->rqpacing);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+}
+
+static void
+eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE);
+ struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE);
+ uint16_t ports = ark->num_ports;
+
+ dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN;
+ dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE;
+
+ dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports);
+ dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports);
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_RX_MAX_QUEUE,
+ .nb_min = ARK_RX_MIN_QUEUE,
+ .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_TX_MAX_QUEUE,
+ .nb_min = ARK_TX_MIN_QUEUE,
+ .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
+
+ /* ARK PMD supports all line rates, how do we indicate that here ?? */
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G);
+ dev_info->pci_dev = ARK_DEV_TO_PCI(dev);
+}
+
+static int
+eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ PMD_DEBUG_LOG(DEBUG, "link status = %d\n",
+ dev->data->dev_link.link_status);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.link_update) {
+ return ark->user_ext.link_update
+ (dev, wait_to_complete,
+ ark->user_data);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_up)
+ return ark->user_ext.dev_set_link_up(dev, ark->user_data);
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_down)
+ return ark->user_ext.dev_set_link_down(dev, ark->user_data);
+ return 0;
+}
+
+static void
+eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+ stats->imissed = 0;
+ stats->oerrors = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_get(dev->data->tx_queues[i], stats);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
+ if (ark->user_ext.stats_get)
+ ark->user_ext.stats_get(dev, stats, ark->user_data);
+}
+
+static void
+eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
+ if (ark->user_ext.stats_reset)
+ ark->user_ext.stats_reset(dev, ark->user_data);
+}
+
+static int
+eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_add) {
+ ark->user_ext.mac_addr_add(dev,
+ mac_addr,
+ index,
+ pool,
+ ark->user_data);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_remove)
+ ark->user_ext.mac_addr_remove(dev, index, ark->user_data);
+}
+
+static void
+eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_set)
+ ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data);
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)extra_args;
+
+ ark->pkt_dir_v = strtol(value, NULL, 16);
+ PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ char *args = (char *)extra_args;
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[ARK_MAX_ARG_LEN];
+ int size = 0;
+ int first = 1;
+
+ while (fgets(line, sizeof(line), file)) {
+ size += strlen(line);
+ if (size >= ARK_MAX_ARG_LEN) {
+ PMD_DRV_LOG(ERR, "Unable to parse file %s args, "
+ "parameter list is too long\n", value);
+ fclose(file);
+ return -1;
+ }
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ PMD_FUNC_LOG(DEBUG, "file = %s\n", args);
+ fclose(file);
+ return 0;
+}
+
+static int
+eth_ark_check_args(struct ark_adapter *ark, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+ int ret = -1;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return 0;
+
+ ark->pkt_gen_args[0] = 0;
+ ark->pkt_chkr_args[0] = 0;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
+ pair->key,
+ pair->value);
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTDIR_ARG,
+ &process_pktdir_arg,
+ ark) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTGEN_ARG,
+ &process_file_args,
+ ark->pkt_gen_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTCHKR_ARG,
+ &process_file_args,
+ ark->pkt_chkr_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ goto free_kvlist;
+ }
+
+ PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
+ /* Setup the packet director */
+ ark_pktdir_setup(ark->pd, ark->pkt_dir_v);
+
+ /* Setup the packet generator */
+ if (ark->pkt_gen_args[0]) {
+ PMD_DRV_LOG(INFO, "Setting up the packet generator\n");
+ ark_pktgen_parse(ark->pkt_gen_args);
+ ark_pktgen_reset(ark->pg);
+ ark_pktgen_setup(ark->pg);
+ ark->start_pg = 1;
+ }
+
+ /* Setup the packet checker */
+ if (ark->pkt_chkr_args[0]) {
+ ark_pktchkr_parse(ark->pkt_chkr_args);
+ ark_pktchkr_setup(ark->pc);
+ }
+
+ ret = 0;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic ");
+RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map);
+RTE_PMD_REGISTER_PARAM_STRING(net_ark,
+ ARK_PKTGEN_ARG "=<filename> "
+ ARK_PKTCHKR_ARG "=<filename> "
+ ARK_PKTDIR_ARG "=<bitmap>");
diff --git a/drivers/net/ark/ark_ethdev.h b/drivers/net/ark/ark_ethdev.h
new file mode 100644
index 00000000..9f8d32fc
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_H_
+#define _ARK_ETHDEV_H_
+
+#define ARK_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
new file mode 100644
index 00000000..f39e6f68
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -0,0 +1,673 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_rx.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev.h"
+#include "ark_mpu.h"
+#include "ark_udm.h"
+
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+/* Forward declarations */
+struct ark_rx_queue;
+struct ark_rx_meta;
+
+static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
+static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
+static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index);
+static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+
+/* ************************************************************************* */
+struct ark_rx_queue {
+ /* array of mbufs to populate */
+ struct rte_mbuf **reserve_q;
+ /* array of physical addresses of the mbuf data pointer */
+ /* This point is a virtual address */
+ phys_addr_t *paddress_q;
+ struct rte_mempool *mb_pool;
+
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *mpu;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ uint32_t seed_index; /* step 1 set with empty mbuf */
+ uint32_t cons_index; /* step 3 consumed by driver */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+
+ /* The queue Index is used within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad1;
+
+ /* separate cache line */
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ volatile uint32_t prod_index; /* step 2 filled by FPGA */
+} __rte_cache_aligned;
+
+
+/* ************************************************************************* */
+static int
+eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
+ struct ark_rx_queue *queue,
+ uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
+{
+ phys_addr_t queue_base;
+ phys_addr_t phys_addr_q_base;
+ phys_addr_t phys_addr_prod_index;
+
+ queue_base = rte_malloc_virt2phy(queue);
+ phys_addr_prod_index = queue_base +
+ offsetof(struct ark_rx_queue, prod_index);
+
+ phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+
+ /* Verify HW */
+ if (ark_mpu_verify(queue->mpu, sizeof(phys_addr_t))) {
+ PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
+ return -1;
+ }
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
+
+ ark_udm_write_addr(queue->udm, phys_addr_prod_index);
+
+ /* advance the valid pointer, but don't start until the queue starts */
+ ark_mpu_reset_stats(queue->mpu);
+
+ /* The seed is the producer index for the HW */
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline void
+eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
+{
+ queue->cons_index = cons_index;
+ eth_ark_rx_seed_mbufs(queue);
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ static int warning1; /* = 0 */
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+
+ struct ark_rx_queue *queue;
+ uint32_t i;
+ int status;
+
+ /* Future works: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ /* We may already be setup, free memory prior to re-allocation */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ if (rx_conf != NULL && warning1 == 0) {
+ warning1 = 1;
+ PMD_DRV_LOG(INFO,
+ "Arkville ignores rte_eth_rxconf argument.\n");
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
+ ARK_RX_META_SIZE, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_rxqueue",
+ sizeof(struct ark_rx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* NOTE zmalloc is used, no need to 0 indexes, etc. */
+ queue->mb_pool = mb_pool;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+
+ queue->reserve_q =
+ rte_zmalloc_socket("Ark_rx_queue mbuf",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+ queue->paddress_q =
+ rte_zmalloc_socket("Ark_rx_queue paddr",
+ nb_desc * sizeof(phys_addr_t),
+ 64,
+ socket_id);
+
+ if (queue->reserve_q == 0 || queue->paddress_q == 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate queue memory in %s\n",
+ __func__);
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = queue;
+ queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+
+ /* populate mbuf reserve */
+ status = eth_ark_rx_seed_mbufs(queue);
+
+ /* MPU Setup */
+ if (status == 0)
+ status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
+
+ if (unlikely(status != 0)) {
+ struct rte_mbuf *mbuf;
+
+ PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
+ qidx,
+ __func__);
+ /* Free the mbufs allocated */
+ for (i = 0, mbuf = queue->reserve_q[0];
+ i < nb_desc; ++i, mbuf++) {
+ rte_pktmbuf_free(mbuf);
+ }
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ark_rx_queue *queue;
+ register uint32_t cons_index, prod_index;
+ uint16_t nb;
+ struct rte_mbuf *mbuf;
+ struct ark_rx_meta *meta;
+
+ queue = (struct ark_rx_queue *)rx_queue;
+ if (unlikely(queue == 0))
+ return 0;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ prod_index = queue->prod_index;
+ cons_index = queue->cons_index;
+ nb = 0;
+
+ while (prod_index != cons_index) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ /* prefetch mbuf */
+ rte_mbuf_prefetch_part1(mbuf);
+ rte_mbuf_prefetch_part2(mbuf);
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->port = meta->port;
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+ mbuf->timestamp = meta->timestamp;
+ mbuf->udata64 = meta->user_data;
+
+ if (ARK_RX_DEBUG) { /* debug sanity checks */
+ if ((meta->pkt_len > (1024 * 16)) ||
+ (meta->pkt_len == 0)) {
+ PMD_RX_LOG(DEBUG, "RX: Bad Meta Q: %u"
+ " cons: %" PRIU32
+ " prod: %" PRIU32
+ " seed_index %" PRIU32
+ "\n",
+ queue->phys_qid,
+ cons_index,
+ queue->prod_index,
+ queue->seed_index);
+
+
+ PMD_RX_LOG(DEBUG, " : UDM"
+ " prod: %" PRIU32
+ " len: %u\n",
+ queue->udm->rt_cfg.prod_idx,
+ meta->pkt_len);
+ ark_mpu_dump(queue->mpu,
+ " ",
+ queue->phys_qid);
+ dump_mbuf_data(mbuf, 0, 256);
+ /* its FUBAR so fix it */
+ mbuf->pkt_len = 63;
+ meta->pkt_len = 63;
+ }
+ /* seqn is only set under debug */
+ mbuf->seqn = cons_index;
+ }
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+ cons_index = eth_ark_rx_jumbo
+ (queue, meta, mbuf, cons_index + 1);
+ else
+ cons_index += 1;
+
+ rx_pkts[nb] = mbuf;
+ nb++;
+ if (nb >= nb_pkts)
+ break;
+ }
+
+ if (unlikely(nb != 0))
+ /* report next free to FPGA */
+ eth_ark_rx_update_cons_index(queue, cons_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index)
+{
+ struct rte_mbuf *mbuf_prev;
+ struct rte_mbuf *mbuf;
+
+ uint16_t remaining;
+ uint16_t data_len;
+ uint8_t segments;
+
+ /* first buf populated by called */
+ mbuf_prev = mbuf0;
+ segments = 1;
+ data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = meta->pkt_len - data_len;
+ mbuf0->data_len = data_len;
+
+ /* HW guarantees that the data does not exceed prod_index! */
+ while (remaining != 0) {
+ data_len = RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM +
+ RTE_PKTMBUF_HEADROOM);
+
+ remaining -= data_len;
+ segments += 1;
+
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ mbuf_prev->next = mbuf;
+ mbuf_prev = mbuf;
+ mbuf->data_len = data_len;
+ mbuf->data_off = 0;
+ if (ARK_RX_DEBUG)
+ mbuf->seqn = cons_index; /* for debug only */
+
+ cons_index += 1;
+ }
+
+ mbuf0->nb_segs = segments;
+ return cons_index;
+}
+
+/* Drain the internal queue allowing hw to clear out. */
+static void
+eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
+{
+ register uint32_t cons_index;
+ struct rte_mbuf *mbuf;
+
+ cons_index = queue->cons_index;
+
+ /* NOT performance optimized, since this is a one-shot call */
+ while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ rte_pktmbuf_free(mbuf);
+ cons_index++;
+ eth_ark_rx_update_cons_index(queue, cons_index);
+ }
+}
+
+uint32_t
+eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ return (queue->prod_index - queue->cons_index); /* mod arith */
+}
+
+/* ************************************************************************* */
+int
+eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ ark_mpu_start(queue->mpu);
+
+ ark_udm_queue_enable(queue->udm, 1);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+
+/* Queue can be restarted. data remains
+ */
+int
+eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ ark_udm_queue_enable(queue->udm, 0);
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static inline int
+eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
+{
+ uint32_t limit = queue->cons_index + queue->queue_size;
+ uint32_t seed_index = queue->seed_index;
+
+ uint32_t count = 0;
+ uint32_t seed_m = queue->seed_index & queue->queue_mask;
+
+ uint32_t nb = limit - seed_index;
+
+ /* Handle wrap around -- remainder is filled on the next call */
+ if (unlikely(seed_m + nb > queue->queue_size))
+ nb = queue->queue_size - seed_m;
+
+ struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
+ int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+
+ if (unlikely(status != 0))
+ return -1;
+
+ if (ARK_RX_DEBUG) { /* DEBUG */
+ while (count != nb) {
+ struct rte_mbuf *mbuf_init =
+ queue->reserve_q[seed_m + count];
+
+ memset(mbuf_init->buf_addr, -1, 512);
+ *((uint32_t *)mbuf_init->buf_addr) =
+ seed_index + count;
+ *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
+ queue->phys_qid;
+ count++;
+ }
+ count = 0;
+ } /* DEBUG */
+ queue->seed_index += nb;
+
+ /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
+ switch (nb % 4) {
+ case 0:
+ while (count != nb) {
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 3:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 2:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 1:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+
+ } /* while (count != nb) */
+ } /* switch */
+
+ return 0;
+}
+
+void
+eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+
+ ark_ethdev_rx_dump(msg, queue);
+}
+
+/* ************************************************************************* */
+/* Call on device closed no user API, queue is stopped */
+void
+eth_ark_dev_rx_queue_release(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+ uint32_t i;
+
+ queue = (struct ark_rx_queue *)vqueue;
+ if (queue == 0)
+ return;
+
+ ark_udm_queue_enable(queue->udm, 0);
+ /* Stop the MPU since pointer are going away */
+ ark_mpu_stop(queue->mpu);
+
+ /* Need to clear out mbufs here, dropping packets along the way */
+ eth_ark_rx_queue_drain(queue);
+
+ for (i = 0; i < queue->queue_size; ++i)
+ rte_pktmbuf_free(queue->reserve_q[i]);
+
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+}
+
+void
+eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_rx_queue *queue;
+ struct ark_udm_t *udm;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+ udm = queue->udm;
+
+ uint64_t ibytes = ark_udm_bytes(udm);
+ uint64_t ipackets = ark_udm_packets(udm);
+ uint64_t idropped = ark_udm_dropped(queue->udm);
+
+ stats->q_ipackets[queue->queue_index] = ipackets;
+ stats->q_ibytes[queue->queue_index] = ibytes;
+ stats->q_errors[queue->queue_index] = idropped;
+ stats->ipackets += ipackets;
+ stats->ibytes += ibytes;
+ stats->imissed += idropped;
+}
+
+void
+eth_rx_queue_stats_reset(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+
+ ark_mpu_reset_stats(queue->mpu);
+ ark_udm_queue_stats_reset(queue->udm);
+}
+
+void
+eth_ark_udm_force_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_rx_queue *queue;
+ uint32_t index;
+ uint16_t i;
+
+ if (!ark_udm_is_flushed(ark->udm.v)) {
+ /* restart the MPUs */
+ PMD_DRV_LOG(ERR, "ARK: %s UDM not flushed\n", __func__);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
+ if (queue == 0)
+ continue;
+
+ ark_mpu_start(queue->mpu);
+ /* Add some buffers */
+ index = 100000 + queue->seed_index;
+ ark_mpu_set_producer(queue->mpu, index);
+ }
+ /* Wait to allow data to pass */
+ usleep(100);
+
+ PMD_DEBUG_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
+ }
+ ark_udm_reset(ark->udm.v);
+}
+
+static void
+ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
+{
+ if (queue == NULL)
+ return;
+ PMD_DEBUG_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
+ PMD_DEBUG_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ "queue_size", queue->queue_size,
+ "seed_index", queue->seed_index,
+ "prod_index", queue->prod_index,
+ "cons_index", queue->cons_index);
+
+ ark_mpu_dump(queue->mpu, name, queue->phys_qid);
+ ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
+ ark_udm_dump(queue->udm, name);
+ ark_udm_dump_setup(queue->udm, queue->phys_qid);
+}
+
+/* Only used in debug.
+ * This function is a raw memory dump of a portion of an mbuf's memory
+ * region. The usual function, rte_pktmbuf_dump() only shows data
+ * with respect to the data_off field. This function show data
+ * anywhere in the mbuf's buffer. This is useful for examining
+ * data in the headroom or tailroom portion of an mbuf.
+ */
+static void
+dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
+{
+ uint16_t i, j;
+
+ PMD_DRV_LOG(INFO, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n", mbuf,
+ mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ for (i = lo; i < hi; i += 16) {
+ uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
+
+ PMD_DRV_LOG(INFO, " %6d: ", i);
+ for (j = 0; j < 16; j++)
+ PMD_DRV_LOG(INFO, " %02x", dp[j]);
+
+ PMD_DRV_LOG(INFO, "\n");
+ }
+}
diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h
new file mode 100644
index 00000000..3a54a4c9
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_rx.h
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_RX_H_
+#define _ARK_ETHDEV_RX_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+
+int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void eth_ark_dev_rx_queue_release(void *rx_queue);
+void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_rx_queue_stats_reset(void *vqueue);
+void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg);
+void eth_ark_udm_force_close(struct rte_eth_dev *dev);
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
new file mode 100644
index 00000000..9ae7ae0e
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -0,0 +1,468 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_tx.h"
+#include "ark_global.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_ethdev.h"
+#include "ark_logs.h"
+
+#define ARK_TX_META_SIZE 32
+#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
+#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+
+/* ************************************************************************* */
+struct ark_tx_queue {
+ struct ark_tx_meta *meta_q;
+ struct rte_mbuf **bufs;
+
+ /* handles for hw objects */
+ struct ark_mpu_t *mpu;
+ struct ark_ddm_t *ddm;
+
+ /* Stats HW tracks bytes and packets, need to count send errors */
+ uint64_t tx_errors;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ /* 3 indexes to the paired data rings. */
+ uint32_t prod_index; /* where to put the next one */
+ uint32_t free_index; /* mbuf has been freed */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+ /* The queue Index within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad[1];
+
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+ uint32_t cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+/* Forward declarations */
+static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf);
+static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
+static void free_completed_tx(struct ark_tx_queue *queue);
+
+static inline void
+ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
+{
+ ark_mpu_stop(queue->mpu);
+}
+
+/* ************************************************************************* */
+static inline void
+eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
+ const struct rte_mbuf *mbuf,
+ uint8_t flags)
+{
+ meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->delta_ns = 0;
+ meta->data_len = rte_pktmbuf_data_len(mbuf);
+ meta->flags = flags;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct ark_tx_queue *queue;
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+
+ uint32_t idx;
+ uint32_t prod_index_limit;
+ int stat;
+ uint16_t nb;
+
+ queue = (struct ark_tx_queue *)vtxq;
+
+ /* free any packets after the HW is done with them */
+ free_completed_tx(queue);
+
+ prod_index_limit = queue->queue_size + queue->free_index;
+
+ for (nb = 0;
+ (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
+ ++nb) {
+ mbuf = tx_pkts[nb];
+
+ if (ARK_TX_PAD_TO_60) {
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end mbuf
+ */
+ uint16_t to_add =
+ 60 - rte_pktmbuf_pkt_len(mbuf);
+ char *appended =
+ rte_pktmbuf_append(mbuf, to_add);
+
+ if (appended == 0) {
+ /* This packet is in error,
+ * we cannot send it so just
+ * count it and delete it.
+ */
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ memset(appended, 0, to_add);
+ }
+ }
+
+ if (unlikely(mbuf->nb_segs != 1)) {
+ stat = eth_ark_tx_jumbo(queue, mbuf);
+ if (unlikely(stat != 0))
+ break; /* Queue is full */
+ } else {
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+ eth_ark_tx_meta_from_mbuf(meta,
+ mbuf,
+ ARK_DDM_SOP |
+ ARK_DDM_EOP);
+ queue->prod_index++;
+ }
+ }
+
+ if (ARK_TX_DEBUG && (nb != nb_pkts)) {
+ PMD_TX_LOG(DEBUG, "TX: Failure to send:"
+ " req: %" PRIU32
+ " sent: %" PRIU32
+ " prod: %" PRIU32
+ " cons: %" PRIU32
+ " free: %" PRIU32 "\n",
+ nb_pkts, nb,
+ queue->prod_index,
+ queue->cons_index,
+ queue->free_index);
+ ark_mpu_dump(queue->mpu,
+ "TX Failure MPU: ",
+ queue->phys_qid);
+ }
+
+ /* let FPGA know producer index. */
+ if (likely(nb != 0))
+ ark_mpu_set_producer(queue->mpu, queue->prod_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+ struct ark_tx_meta *meta;
+ uint32_t free_queue_space;
+ uint32_t idx;
+ uint8_t flags = ARK_DDM_SOP;
+
+ free_queue_space = queue->queue_mask -
+ (queue->prod_index - queue->free_index);
+ if (unlikely(free_queue_space < mbuf->nb_segs))
+ return -1;
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+ eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
+ queue->prod_index++;
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ mbuf = next;
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_tx_queue *queue;
+ int status;
+
+ /* Future: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size"
+ " must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1;
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_txqueue",
+ sizeof(struct ark_tx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate tx "
+ "queue memory in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* we use zmalloc no need to initialize fields */
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ dev->data->tx_queues[queue_idx] = queue;
+
+ queue->meta_q =
+ rte_zmalloc_socket("Ark_txqueue meta",
+ nb_desc * sizeof(struct ark_tx_meta),
+ 64,
+ socket_id);
+ queue->bufs =
+ rte_zmalloc_socket("Ark_txqueue bufs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+
+ if (queue->meta_q == 0 || queue->bufs == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate "
+ "queue memory in %s\n", __func__);
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
+
+ status = eth_ark_tx_hw_queue_config(queue);
+
+ if (unlikely(status != 0)) {
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static int
+eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
+{
+ phys_addr_t queue_base, ring_base, cons_index_addr;
+ uint32_t write_interval_ns;
+
+ /* Verify HW -- MPU */
+ if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ return -1;
+
+ queue_base = rte_malloc_virt2phy(queue);
+ ring_base = rte_malloc_virt2phy(queue->meta_q);
+ cons_index_addr =
+ queue_base + offsetof(struct ark_tx_queue, cons_index);
+
+ ark_mpu_stop(queue->mpu);
+ ark_mpu_reset(queue->mpu);
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
+
+ /*
+ * Adjust the write interval based on queue size --
+ * increase pcie traffic when low mbuf count
+ * Queue sizes less than 128 are not allowed
+ */
+ switch (queue->queue_size) {
+ case 128:
+ write_interval_ns = 500;
+ break;
+ case 256:
+ write_interval_ns = 500;
+ break;
+ case 512:
+ write_interval_ns = 1000;
+ break;
+ default:
+ write_interval_ns = 2000;
+ break;
+ }
+
+ /* Completion address in UDM */
+ ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+void
+eth_ark_tx_queue_release(void *vtx_queue)
+{
+ struct ark_tx_queue *queue;
+
+ queue = (struct ark_tx_queue *)vtx_queue;
+
+ ark_tx_hw_queue_stop(queue);
+
+ queue->cons_index = queue->prod_index;
+ free_completed_tx(queue);
+
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+ int cnt = 0;
+
+ queue = dev->data->tx_queues[queue_id];
+
+ /* Wait for DDM to send out all packets. */
+ while (queue->cons_index != queue->prod_index) {
+ usleep(100);
+ if (cnt++ > 10000)
+ return -1;
+ }
+
+ ark_mpu_stop(queue->mpu);
+ free_completed_tx(queue);
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+
+ queue = dev->data->tx_queues[queue_id];
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ ark_mpu_start(queue->mpu);
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static void
+free_completed_tx(struct ark_tx_queue *queue)
+{
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+ uint32_t top_index;
+
+ top_index = queue->cons_index; /* read once */
+ while (queue->free_index != top_index) {
+ meta = &queue->meta_q[queue->free_index & queue->queue_mask];
+ mbuf = queue->bufs[queue->free_index & queue->queue_mask];
+
+ if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ /* ref count of the mbuf is checked in this call. */
+ rte_pktmbuf_free(mbuf);
+ }
+ queue->free_index++;
+ }
+}
+
+/* ************************************************************************* */
+void
+eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+ uint64_t bytes, pkts;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ bytes = ark_ddm_queue_byte_count(ddm);
+ pkts = ark_ddm_queue_pkt_count(ddm);
+
+ stats->q_opackets[queue->queue_index] = pkts;
+ stats->q_obytes[queue->queue_index] = bytes;
+ stats->opackets += pkts;
+ stats->obytes += bytes;
+ stats->oerrors += queue->tx_errors;
+}
+
+void
+eth_tx_queue_stats_reset(void *vqueue)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ ark_ddm_queue_reset_stats(ddm);
+ queue->tx_errors = 0;
+}
diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h
new file mode 100644
index 00000000..8aaafc22
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_tx.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_TX_H_
+#define _ARK_ETHDEV_TX_H_
+
+#include <stdint.h>
+
+#include <rte_ethdev.h>
+
+
+uint16_t eth_ark_xmit_pkts_noop(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_xmit_pkts(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void eth_ark_tx_queue_release(void *vtx_queue);
+int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
+void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_tx_queue_stats_reset(void *vqueue);
+
+#endif
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
new file mode 100644
index 00000000..f805f64f
--- /dev/null
+++ b/drivers/net/ark/ark_ext.h
@@ -0,0 +1,115 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_EXT_H_
+#define _ARK_EXT_H_
+
+#include <rte_ethdev.h>
+
+/*
+ * This is the template file for users who which to define a dynamic
+ * extension to the Arkville PMD. User's who create an extension
+ * should include this file and define the necessary and desired
+ * functions.
+ * Only 1 function is required for an extension, dev_init(); all other
+ * functions prototyped in this file are optional.
+ */
+
+/*
+ * Called post PMD init.
+ * The implementation returns its private data that gets passed into
+ * all other functions as user_data
+ * The ARK extension implementation MUST implement this function
+ */
+void *dev_init(struct rte_eth_dev *dev, void *a_bar, int port_id);
+
+/* Called during device shutdown */
+void dev_uninit(struct rte_eth_dev *dev, void *user_data);
+
+/* This call is optional and allows the
+ * extension to specify the number of supported ports.
+ */
+uint8_t dev_get_port_count(struct rte_eth_dev *dev,
+ void *user_data);
+
+/*
+ * The following functions are optional and are directly mapped
+ * from the DPDK PMD ops structure.
+ * Each function if implemented is called after the ARK PMD
+ * implementation executes.
+ */
+
+int dev_configure(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_start(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_stop(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_close(struct rte_eth_dev *dev,
+ void *user_data);
+
+int link_update(struct rte_eth_dev *dev,
+ int wait_to_complete,
+ void *user_data);
+
+int dev_set_link_up(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_set_link_down(struct rte_eth_dev *dev,
+ void *user_data);
+
+void stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats,
+ void *user_data);
+
+void stats_reset(struct rte_eth_dev *dev,
+ void *user_data);
+
+void mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *macadr,
+ uint32_t index,
+ uint32_t pool,
+ void *user_data);
+
+void mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ void *user_data);
+
+void mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ void *user_data);
+
+#endif
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
new file mode 100644
index 00000000..a2e9e8ff
--- /dev/null
+++ b/drivers/net/ark/ark_global.h
@@ -0,0 +1,161 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_GLOBAL_H_
+#define _ARK_GLOBAL_H_
+
+#include <time.h>
+#include <assert.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_version.h>
+
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+#define ETH_ARK_ARG_MAXLEN 64
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPU_RX_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPU_TX_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_CMAC_BASE 0x80000
+#define ARK_PKTDIR_BASE 0xa0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_EXTERNAL_BASE 0x100000
+#define ARK_MPU_QOFFSET 0x00100
+#define ARK_MAX_PORTS 8
+
+#define offset8(n) n
+#define offset16(n) ((n) / 2)
+#define offset32(n) ((n) / 4)
+#define offset64(n) ((n) / 8)
+
+/* Maximum length of arg list in bytes */
+#define ARK_MAX_ARG_LEN 256
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+#define def_ptr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+struct ark_user_ext {
+ void *(*dev_init)(struct rte_eth_dev *, void *abar, int port_id);
+ void (*dev_uninit)(struct rte_eth_dev *, void *);
+ int (*dev_get_port_count)(struct rte_eth_dev *, void *);
+ int (*dev_configure)(struct rte_eth_dev *, void *);
+ int (*dev_start)(struct rte_eth_dev *, void *);
+ void (*dev_stop)(struct rte_eth_dev *, void *);
+ void (*dev_close)(struct rte_eth_dev *, void *);
+ int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *);
+ int (*dev_set_link_up)(struct rte_eth_dev *, void *);
+ int (*dev_set_link_down)(struct rte_eth_dev *, void *);
+ void (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ void (*stats_reset)(struct rte_eth_dev *, void *);
+ void (*mac_addr_add)(struct rte_eth_dev *,
+ struct ether_addr *,
+ uint32_t,
+ uint32_t,
+ void *);
+ void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
+ void (*mac_addr_set)(struct rte_eth_dev *, struct ether_addr *, void *);
+};
+
+struct ark_adapter {
+ /* User extension private data */
+ void *user_data;
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ark_pkt_gen_t pg;
+ ark_pkt_chkr_t pc;
+ ark_pkt_dir_t pd;
+
+ int num_ports;
+
+ /* Packet generator/checker args */
+ char pkt_gen_args[ARK_MAX_ARG_LEN];
+ char pkt_chkr_args[ARK_MAX_ARG_LEN];
+ uint32_t pkt_dir_v;
+
+ /* eth device */
+ struct rte_eth_dev *eth_dev;
+
+ void *d_handle;
+ struct ark_user_ext user_ext;
+
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* Application Bar */
+ uint8_t *a_bar;
+
+ /* Arkville demo block offsets */
+ def_ptr(sys_ctrl, sysctrl);
+ def_ptr(pkt_gen, pktgen);
+ def_ptr(mpu_rx, mpurx);
+ def_ptr(UDM, udm);
+ def_ptr(mpu_tx, mputx);
+ def_ptr(DDM, ddm);
+ def_ptr(CMAC, cmac);
+ def_ptr(external, external);
+ def_ptr(pkt_dir, pktdir);
+ def_ptr(pkt_chkr, pktchkr);
+
+ int started;
+ uint16_t rx_queues;
+ uint16_t tx_queues;
+
+ struct ark_rqpace_t *rqpacing;
+};
+
+typedef uint32_t *ark_t;
+
+#endif
diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h
new file mode 100644
index 00000000..8aff2963
--- /dev/null
+++ b/drivers/net/ark/ark_logs.h
@@ -0,0 +1,119 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DEBUG_H_
+#define _ARK_DEBUG_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+
+
+/* Configuration option to pad TX packets to 60 bytes */
+#ifdef RTE_LIBRTE_ARK_PAD_TX
+#define ARK_TX_PAD_TO_60 1
+#else
+#define ARK_TX_PAD_TO_60 0
+#endif
+
+/* system camel case definition changed to upper case */
+#define PRIU32 PRIu32
+#define PRIU64 PRIu64
+
+/* Format specifiers for string data pairs */
+#define ARK_SU32 "\n\t%-20s %'20" PRIU32
+#define ARK_SU64 "\n\t%-20s %'20" PRIU64
+#define ARK_SU64X "\n\t%-20s %#20" PRIx64
+#define ARK_SPTR "\n\t%-20s %20p"
+
+
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, fmt, ## args)
+
+/* Conditional trace definitions */
+#define ARK_TRACE_ON(level, fmt, ...) \
+ RTE_LOG(level, PMD, fmt, ##__VA_ARGS__)
+
+/* This pattern allows compiler check arguments even if disabled */
+#define ARK_TRACE_OFF(level, fmt, ...) \
+ do {if (0) RTE_LOG(level, PMD, fmt, ##__VA_ARGS__); } \
+ while (0)
+
+
+/* tracing including the function name */
+#define ARK_FUNC_ON(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+
+/* tracing including the function name */
+#define ARK_FUNC_OFF(level, fmt, args...) \
+ do { if (0) RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args); } \
+ while (0)
+
+
+/* Debug macro for tracing full behavior, function tracing and messages*/
+#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_ON(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_OFF(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for reporting FPGA statistics */
+#ifdef RTE_LIBRTE_ARK_DEBUG_STATS
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for RX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_RX
+#define ARK_RX_DEBUG 1
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_RX_DEBUG 0
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+/* Debug macro for TX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_TX
+#define ARK_TX_DEBUG 1
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_TX_DEBUG 0
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+#endif
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c
new file mode 100644
index 00000000..cd2c0788
--- /dev/null
+++ b/drivers/net/ark/ark_mpu.c
@@ -0,0 +1,181 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_mpu.h"
+
+uint16_t
+ark_api_num_queues(struct ark_mpu_t *mpu)
+{
+ return mpu->hw.num_queues;
+}
+
+uint16_t
+ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
+{
+ return mpu->hw.num_queues / ark_ports;
+}
+
+int
+ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
+{
+ uint32_t version;
+
+ version = mpu->id.vernum & 0x0000fF00;
+ if ((mpu->id.idnum != 0x2055504d) ||
+ (mpu->hw.obj_size != obj_size) ||
+ (version != 0x00003100)) {
+ PMD_DRV_LOG(ERR,
+ " MPU module not found as expected %08x"
+ " \"%c%c%c%c %c%c%c%c\"\n",
+ mpu->id.idnum,
+ mpu->id.id[0], mpu->id.id[1],
+ mpu->id.id[2], mpu->id.id[3],
+ mpu->id.ver[0], mpu->id.ver[1],
+ mpu->id.ver[2], mpu->id.ver[3]);
+ PMD_DRV_LOG(ERR,
+ " MPU HW num_queues: %u hw_depth %u,"
+ " obj_size: %u, obj_per_mrr: %u"
+ " Expected size %u\n",
+ mpu->hw.num_queues,
+ mpu->hw.hw_depth,
+ mpu->hw.obj_size,
+ mpu->hw.obj_per_mrr,
+ obj_size);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_mpu_stop(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_STOP;
+}
+
+void
+ark_mpu_start(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_RUN;
+}
+
+int
+ark_mpu_reset(struct ark_mpu_t *mpu)
+{
+ int cnt = 0;
+
+ mpu->cfg.command = MPU_CMD_RESET;
+
+ while (mpu->cfg.command != MPU_CMD_IDLE) {
+ if (cnt++ > 1000)
+ break;
+ usleep(10);
+ }
+ if (mpu->cfg.command != MPU_CMD_IDLE) {
+ mpu->cfg.command = MPU_CMD_FORCE_RESET;
+ usleep(10);
+ }
+ ark_mpu_reset_stats(mpu);
+ return mpu->cfg.command != MPU_CMD_IDLE;
+}
+
+void
+ark_mpu_reset_stats(struct ark_mpu_t *mpu)
+{
+ mpu->stats.pci_request = 1; /* reset stats */
+}
+
+int
+ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring, uint32_t ring_size,
+ int is_tx)
+{
+ ark_mpu_reset(mpu);
+
+ if (!rte_is_power_of_2(ring_size)) {
+ PMD_DRV_LOG(ERR, "ARK: Invalid ring size for MPU %d\n",
+ ring_size);
+ return -1;
+ }
+
+ mpu->cfg.ring_base = ring;
+ mpu->cfg.ring_size = ring_size;
+ mpu->cfg.ring_mask = ring_size - 1;
+ mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr;
+ mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr;
+ mpu->cfg.sw_prod_index = 0;
+ mpu->cfg.hw_cons_index = 0;
+ return 0;
+}
+
+void
+ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
+{
+ /* DUMP to see that we have started */
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n",
+ code, qid,
+ mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index);
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s state: %d count %d, reserved %d"
+ " data 0x%08x_%08x 0x%08x_%08x\n",
+ code,
+ mpu->debug.state, mpu->debug.count,
+ mpu->debug.reserved,
+ mpu->debug.peek[1],
+ mpu->debug.peek[0],
+ mpu->debug.peek[3],
+ mpu->debug.peek[2]
+ );
+ PMD_STATS_LOG(INFO, "MPU: %s Q: %3u"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ code, qid,
+ "PCI Request:", mpu->stats.pci_request,
+ "Queue_empty", mpu->stats.q_empty,
+ "Queue_q1", mpu->stats.q_q1,
+ "Queue_q2", mpu->stats.q_q2,
+ "Queue_q3", mpu->stats.q_q3,
+ "Queue_q4", mpu->stats.q_q4,
+ "Queue_full", mpu->stats.q_full
+ );
+}
+
+void
+ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "MPU Setup Q: %u"
+ ARK_SU64X "\n",
+ q_id,
+ "ring_base", mpu->cfg.ring_base
+ );
+}
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h
new file mode 100644
index 00000000..a0171dbd
--- /dev/null
+++ b/drivers/net/ark/ark_mpu.h
@@ -0,0 +1,154 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_MPU_H_
+#define _ARK_MPU_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The MPU or Memory Prefetch Unit is an internal Arkville hardware
+ * module for moving data between host memory and the hardware FPGA.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * MPU hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_MPU_ID 0x00
+struct ark_mpu_id_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
+ uint32_t phys_id;
+ uint32_t mrr_code;
+};
+
+#define ARK_MPU_HW 0x010
+struct ark_mpu_hw_t {
+ uint16_t num_queues;
+ uint16_t reserved;
+ uint32_t hw_depth;
+ uint32_t obj_size;
+ uint32_t obj_per_mrr;
+};
+
+#define ARK_MPU_CFG 0x040
+struct ark_mpu_cfg_t {
+ phys_addr_t ring_base; /* phys_addr_t is a uint64_t */
+ uint32_t ring_size;
+ uint32_t ring_mask;
+ uint32_t min_host_move;
+ uint32_t min_hw_move;
+ volatile uint32_t sw_prod_index;
+ volatile uint32_t hw_cons_index;
+ volatile uint32_t command;
+};
+enum ARK_MPU_COMMAND {
+ MPU_CMD_IDLE = 1,
+ MPU_CMD_RUN = 2,
+ MPU_CMD_STOP = 4,
+ MPU_CMD_RESET = 8,
+ MPU_CMD_FORCE_RESET = 16,
+ MPU_COMMAND_LIMIT = 0xfFFFFFFF
+};
+
+#define ARK_MPU_STATS 0x080
+struct ark_mpu_stats_t {
+ volatile uint64_t pci_request;
+ volatile uint64_t q_empty;
+ volatile uint64_t q_q1;
+ volatile uint64_t q_q2;
+ volatile uint64_t q_q3;
+ volatile uint64_t q_q4;
+ volatile uint64_t q_full;
+};
+
+#define ARK_MPU_DEBUG 0x0C0
+struct ark_mpu_debug_t {
+ volatile uint32_t state;
+ uint32_t reserved;
+ volatile uint32_t count;
+ volatile uint32_t take;
+ volatile uint32_t peek[4];
+};
+
+/* Consolidated structure */
+struct ark_mpu_t {
+ struct ark_mpu_id_t id;
+ uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID)
+ - sizeof(struct ark_mpu_id_t)];
+ struct ark_mpu_hw_t hw;
+ uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) -
+ sizeof(struct ark_mpu_hw_t)];
+ struct ark_mpu_cfg_t cfg;
+ uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) -
+ sizeof(struct ark_mpu_cfg_t)];
+ struct ark_mpu_stats_t stats;
+ uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) -
+ sizeof(struct ark_mpu_stats_t)];
+ struct ark_mpu_debug_t debug;
+};
+
+uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
+ uint16_t ark_ports);
+int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
+void ark_mpu_stop(struct ark_mpu_t *mpu);
+void ark_mpu_start(struct ark_mpu_t *mpu);
+int ark_mpu_reset(struct ark_mpu_t *mpu);
+int ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring,
+ uint32_t ring_size, int is_tx);
+
+void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
+
+/* this action is in a performance critical path */
+static inline void
+ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx)
+{
+ mpu->cfg.sw_prod_index = idx;
+}
+
+#endif
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
new file mode 100644
index 00000000..62b3673b
--- /dev/null
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ark_pktchkr.h"
+#include "ark_logs.h"
+
+static int set_arg(char *arg, char *val);
+static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"port"}, OTINT, {0} },
+ {{"mac-dump"}, OTBOOL, {0} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"stop"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"en_resync"}, OTBOOL, {0} },
+ {{"tuser_err_val"}, OTINT, {1} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTINT, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 10000000000000L},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {60} },
+ {{"pkt_size_min"}, OTINT, {2005} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_chkr_t
+ark_pktchkr_init(void *addr, int ord, int l2_mode)
+{
+ struct ark_pkt_chkr_inst *inst =
+ rte_malloc("ark_pkt_chkr_inst",
+ sizeof(struct ark_pkt_chkr_inst), 0);
+ inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr;
+ inst->cregs =
+ (struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100);
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktchkr_uninit(ark_pkt_chkr_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktchkr_run(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->sregs->pkt_start_stop = 0;
+ inst->sregs->pkt_start_stop = 0x1;
+}
+
+int
+ark_pktchkr_stopped(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktchkr_stop(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ int wait_cycle = 10;
+
+ inst->sregs->pkt_start_stop = 0;
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for pktchk %d to stop...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
+}
+
+int
+ark_pktchkr_is_running(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+static void
+ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
+ uint32_t gen_forever,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t en_resync,
+ uint32_t tuser_err_val,
+ uint32_t ins_time_stamp)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = (tuser_err_val << 16) | (en_resync << 0);
+
+ inst->sregs->pkt_ctrl = r;
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+ r = ((gen_forever << 24) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+ inst->cregs->pkt_ctrl = r;
+}
+
+static
+int
+ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->cregs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+int
+ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ if (ark_pktchkr_is_gen_forever(handle)) {
+ PMD_DEBUG_LOG(ERR, "Pktchk wait_done will not terminate"
+ " because gen_forever=1\n");
+ return -1;
+ }
+ int wait_cycle = 10;
+
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for packet checker %d's"
+ " internal pktgen to finish sending...\n",
+ inst->ordinal);
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
+ inst->ordinal);
+ }
+ return 0;
+}
+
+int
+ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ return inst->cregs->pkts_sent;
+}
+
+void
+ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_payload = b;
+}
+
+void
+ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_min = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_max = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_incr = x;
+}
+
+void
+ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->num_pkts = x;
+}
+
+void
+ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->eth_type = x;
+}
+
+void
+ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->cregs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktchkr_dump_stats(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ PMD_STATS_LOG(INFO, "pkts_rcvd = (%'u)\n",
+ inst->sregs->pkts_rcvd);
+ PMD_STATS_LOG(INFO, "bytes_rcvd = (%'" PRIU64 ")\n",
+ inst->sregs->bytes_rcvd);
+ PMD_STATS_LOG(INFO, "pkts_ok = (%'u)\n",
+ inst->sregs->pkts_ok);
+ PMD_STATS_LOG(INFO, "pkts_mismatch = (%'u)\n",
+ inst->sregs->pkts_mismatch);
+ PMD_STATS_LOG(INFO, "pkts_err = (%'u)\n",
+ inst->sregs->pkts_err);
+ PMD_STATS_LOG(INFO, "first_mismatch = (%'u)\n",
+ inst->sregs->first_mismatch);
+ PMD_STATS_LOG(INFO, "resync_events = (%'u)\n",
+ inst->sregs->resync_events);
+ PMD_STATS_LOG(INFO, "pkts_missing = (%'u)\n",
+ inst->sregs->pkts_missing);
+ PMD_STATS_LOG(INFO, "min_latency = (%'u)\n",
+ inst->sregs->min_latency);
+ PMD_STATS_LOG(INFO, "max_latency = (%'u)\n",
+ inst->sregs->max_latency);
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+ PMD_DRV_LOG(ERR,
+ "pktchkr: Could not find requested option!, option = %s\n",
+ id);
+ return NULL;
+}
+
+static int
+set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktchkr_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = "=\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+void
+ark_pktchkr_setup(ark_pkt_chkr_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("stop")->v.BOOL && options("configure")->v.BOOL) {
+ ark_pktchkr_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktchkr_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktchkr_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+
+ ark_pktchkr_set_eth_type(handle,
+ options("eth_type")->v.INT);
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktchkr_set_hdr_dW(handle, hdr);
+ ark_pktchkr_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktchkr_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktchkr_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktchkr_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktchkr_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("en_resync")->v.BOOL,
+ options("tuser_err_val")->v.INT,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("stop")->v.BOOL)
+ ark_pktchkr_stop(handle);
+
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet checker on port %d\n",
+ options("port")->v.INT);
+ ark_pktchkr_run(handle);
+ }
+}
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
new file mode 100644
index 00000000..f4025dd6
--- /dev/null
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -0,0 +1,117 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTCHKR_H_
+#define _ARK_PKTCHKR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTCHKR_BASE_ADR 0x90000
+
+typedef void *ark_pkt_chkr_t;
+
+/* The packet checker is an internal Arkville hardware module, which
+ * verifies packet streams generated from the corresponding packet
+ * generator. This module is used for Arkville testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_chkr_stat_regs {
+ uint32_t r0;
+ uint32_t pkt_start_stop;
+ uint32_t pkt_ctrl;
+ uint32_t pkts_rcvd;
+ uint64_t bytes_rcvd;
+ uint32_t pkts_ok;
+ uint32_t pkts_mismatch;
+ uint32_t pkts_err;
+ uint32_t first_mismatch;
+ uint32_t resync_events;
+ uint32_t pkts_missing;
+ uint32_t min_latency;
+ uint32_t max_latency;
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_ctl_regs {
+ uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ uint32_t num_pkts;
+ uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_inst {
+ struct rte_eth_dev_info *dev_info;
+ volatile struct ark_pkt_chkr_stat_regs *sregs;
+ volatile struct ark_pkt_chkr_ctl_regs *cregs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet checker functions */
+ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode);
+void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
+void ark_pktchkr_run(ark_pkt_chkr_t handle);
+int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
+void ark_pktchkr_stop(ark_pkt_chkr_t handle);
+int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
+int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
+void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
+void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
+void ark_pktchkr_parse(char *args);
+void ark_pktchkr_setup(ark_pkt_chkr_t handle);
+void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
+int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
new file mode 100644
index 00000000..66e5ce24
--- /dev/null
+++ b/drivers/net/ark/ark_pktdir.c
@@ -0,0 +1,80 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ark_pktdir.h"
+#include "ark_global.h"
+
+
+ark_pkt_dir_t
+ark_pktdir_init(void *base)
+{
+ struct ark_pkt_dir_inst *inst =
+ rte_malloc("ark_pkt_dir_inst",
+ sizeof(struct ark_pkt_dir_inst),
+ 0);
+ inst->regs = (struct ark_pkt_dir_regs *)base;
+ inst->regs->ctrl = 0x00110110; /* POR state */
+ return inst;
+}
+
+void
+ark_pktdir_uninit(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+
+ rte_free(inst);
+}
+
+void
+ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ inst->regs->ctrl = v;
+}
+
+uint32_t
+ark_pktdir_status(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->ctrl;
+}
+
+uint32_t
+ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->stall_cnt;
+}
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
new file mode 100644
index 00000000..e13fe821
--- /dev/null
+++ b/drivers/net/ark/ark_pktdir.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTDIR_H_
+#define _ARK_PKTDIR_H_
+
+#include <stdint.h>
+
+#define ARK_PKTDIR_BASE_ADR 0xa0000
+
+typedef void *ark_pkt_dir_t;
+
+
+/* The packet director is an internal Arkville hardware module for
+ * directing packet data in non-typical flows, such as testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_dir_regs {
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t stall_cnt;
+} __attribute__ ((packed));
+
+struct ark_pkt_dir_inst {
+ volatile struct ark_pkt_dir_regs *regs;
+};
+
+ark_pkt_dir_t ark_pktdir_init(void *base);
+void ark_pktdir_uninit(ark_pkt_dir_t handle);
+void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
+uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
+uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
new file mode 100644
index 00000000..bdac054e
--- /dev/null
+++ b/drivers/net/ark/ark_pktgen.c
@@ -0,0 +1,496 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ark_pktgen.h"
+#include "ark_logs.h"
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"pause"}, OTBOOL, {0} },
+ {{"reset"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTBOOL, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 100000000},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {130} },
+ {{"pkt_size_min"}, OTINT, {2006} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"bytes_per_cycle"}, OTINT, {10} },
+ {{"shaping"}, OTBOOL, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_gen_t
+ark_pktgen_init(void *adr, int ord, int l2_mode)
+{
+ struct ark_pkt_gen_inst *inst =
+ rte_malloc("ark_pkt_gen_inst_pmd",
+ sizeof(struct ark_pkt_gen_inst), 0);
+ inst->regs = (struct ark_pkt_gen_regs *)adr;
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktgen_uninit(ark_pkt_gen_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktgen_run(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->pkt_start_stop = 1;
+}
+
+uint32_t
+ark_pktgen_paused(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktgen_pause(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int cnt = 0;
+
+ inst->regs->pkt_start_stop = 0;
+
+ while (!ark_pktgen_paused(handle)) {
+ usleep(1000);
+ if (cnt++ > 100) {
+ PMD_DRV_LOG(ERR, "Pktgen %d failed to pause.\n",
+ inst->ordinal);
+ break;
+ }
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d paused.\n", inst->ordinal);
+}
+
+void
+ark_pktgen_reset(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d is not running"
+ " and is not paused. No need to reset.\n",
+ inst->ordinal);
+ return;
+ }
+
+ if (ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG,
+ "Pktgen %d is not paused. Pausing first.\n",
+ inst->ordinal);
+ ark_pktgen_pause(handle);
+ }
+
+ PMD_DEBUG_LOG(DEBUG, "Resetting pktgen %d.\n", inst->ordinal);
+ inst->regs->pkt_start_stop = (1 << 8);
+}
+
+uint32_t
+ark_pktgen_tx_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_running(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+void
+ark_pktgen_wait_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int wait_cycle = 10;
+
+ if (ark_pktgen_is_gen_forever(handle))
+ PMD_DRV_LOG(ERR, "Pktgen wait_done will not terminate"
+ " because gen_forever=1\n");
+
+ while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG,
+ "Waiting for pktgen %d to finish sending...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
+}
+
+uint32_t
+ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ return inst->regs->pkts_sent;
+}
+
+void
+ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_payload = b;
+}
+
+void
+ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_spacing = x;
+}
+
+void
+ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_min = x;
+}
+
+void
+ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_max = x;
+}
+
+void
+ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_incr = x;
+}
+
+void
+ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->num_pkts = x;
+}
+
+void
+ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->eth_type = x;
+}
+
+void
+ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->regs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->start_offset = x;
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Pktgen: Could not find requested option!, "
+ "option = %s\n",
+ id
+ );
+ return NULL;
+}
+
+static int pmd_set_arg(char *arg, char *val);
+static int
+pmd_set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktgen_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = " =\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ pmd_set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+static void
+ark_pktgen_set_pkt_ctrl(ark_pkt_gen_t handle,
+ uint32_t gen_forever,
+ uint32_t en_slaved_start,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t ins_time_stamp)
+{
+ uint32_t r;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+
+ r = ((gen_forever << 24) |
+ (en_slaved_start << 20) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+
+ inst->regs->bytes_per_cycle = options("bytes_per_cycle")->v.INT;
+ if (options("shaping")->v.BOOL)
+ r = r | (1 << 28); /* enable shaping */
+
+ inst->regs->pkt_ctrl = r;
+}
+
+void
+ark_pktgen_setup(ark_pkt_gen_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("pause")->v.BOOL &&
+ (!options("reset")->v.BOOL &&
+ (options("configure")->v.BOOL))) {
+ ark_pktgen_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktgen_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktgen_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+ ark_pktgen_set_eth_type(handle,
+ options("eth_type")->v.INT);
+
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktgen_set_hdr_dW(handle, hdr);
+ ark_pktgen_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktgen_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktgen_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktgen_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktgen_set_pkt_spacing(handle,
+ options("pkt_spacing")->v.INT);
+ ark_pktgen_set_start_offset(handle,
+ options("start_offset")->v.INT);
+ ark_pktgen_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("en_slaved_start")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("pause")->v.BOOL)
+ ark_pktgen_pause(handle);
+
+ if (options("reset")->v.BOOL)
+ ark_pktgen_reset(handle);
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet generator on port %d\n",
+ options("port")->v.INT);
+ ark_pktgen_run(handle);
+ }
+}
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
new file mode 100644
index 00000000..bf5a241b
--- /dev/null
+++ b/drivers/net/ark/ark_pktgen.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTGEN_H_
+#define _ARK_PKTGEN_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTGEN_BASE_ADR 0x10000
+
+typedef void *ark_pkt_gen_t;
+
+/* The packet generator is an internal Arkville hardware module, which
+ * generates known packets for use in integrity and line-rate testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structure to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_gen_regs {
+ uint32_t r0;
+ volatile uint32_t pkt_start_stop;
+ volatile uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_spacing;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ volatile uint32_t num_pkts;
+ volatile uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+ uint32_t start_offset;
+ uint32_t bytes_per_cycle;
+} __attribute__ ((packed));
+
+struct ark_pkt_gen_inst {
+ struct rte_eth_dev_info *dev_info;
+ struct ark_pkt_gen_regs *regs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet generator functions */
+ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode);
+void ark_pktgen_uninit(ark_pkt_gen_t handle);
+void ark_pktgen_run(ark_pkt_gen_t handle);
+void ark_pktgen_pause(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_paused(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
+void ark_pktgen_reset(ark_pkt_gen_t handle);
+void ark_pktgen_wait_done(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
+void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
+void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr);
+void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_parse(char *argv);
+void ark_pktgen_setup(ark_pkt_gen_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c
new file mode 100644
index 00000000..41c497b0
--- /dev/null
+++ b/drivers/net/ark/ark_rqp.c
@@ -0,0 +1,97 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_rqp.h"
+#include "ark_logs.h"
+
+/* ************************************************************************* */
+void
+ark_rqp_stats_reset(struct ark_rqpace_t *rqp)
+{
+ rqp->stats_clear = 1;
+ /* POR 992 */
+ /* rqp->cpld_max = 992; */
+ /* POR 64 */
+ /* rqp->cplh_max = 64; */
+}
+
+/* ************************************************************************* */
+void
+ark_rqp_dump(struct ark_rqpace_t *rqp)
+{
+ if (rqp->err_count_other != 0)
+ PMD_DRV_LOG(ERR,
+ "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other);
+
+ PMD_STATS_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other,
+ "stall_pS", rqp->stall_ps,
+ "stall_pS Min", rqp->stall_ps_min,
+ "stall_pS Max", rqp->stall_ps_max,
+ "req_pS", rqp->req_ps,
+ "req_pS Min", rqp->req_ps_min,
+ "req_pS Max", rqp->req_ps_max,
+ "req_dWPS", rqp->req_dw_ps,
+ "req_dWPS Min", rqp->req_dw_ps_min,
+ "req_dWPS Max", rqp->req_dw_ps_max,
+ "cpl_pS", rqp->cpl_ps,
+ "cpl_pS Min", rqp->cpl_ps_min,
+ "cpl_pS Max", rqp->cpl_ps_max,
+ "cpl_dWPS", rqp->cpl_dw_ps,
+ "cpl_dWPS Min", rqp->cpl_dw_ps_min,
+ "cpl_dWPS Max", rqp->cpl_dw_ps_max,
+ "cplh pending", rqp->cplh_pending,
+ "cpld pending", rqp->cpld_pending,
+ "cplh pending max", rqp->cplh_pending_max,
+ "cpld pending max", rqp->cpld_pending_max);
+}
+
+int
+ark_rqp_lasped(struct ark_rqpace_t *rqp)
+{
+ return rqp->lasped;
+}
diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h
new file mode 100644
index 00000000..0c380071
--- /dev/null
+++ b/drivers/net/ark/ark_rqp.h
@@ -0,0 +1,86 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_RQP_H_
+#define _ARK_RQP_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The RQP or ReQuest Pacer is an internal Arkville hardware module
+ * which limits the PCIE data flow to insure correct operation for the
+ * particular hardware PCIE endpoint.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * RQ Pacing core hardware structure
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_rqpace_t {
+ volatile uint32_t ctrl;
+ volatile uint32_t stats_clear;
+ volatile uint32_t cplh_max;
+ volatile uint32_t cpld_max;
+ volatile uint32_t err_cnt;
+ volatile uint32_t stall_ps;
+ volatile uint32_t stall_ps_min;
+ volatile uint32_t stall_ps_max;
+ volatile uint32_t req_ps;
+ volatile uint32_t req_ps_min;
+ volatile uint32_t req_ps_max;
+ volatile uint32_t req_dw_ps;
+ volatile uint32_t req_dw_ps_min;
+ volatile uint32_t req_dw_ps_max;
+ volatile uint32_t cpl_ps;
+ volatile uint32_t cpl_ps_min;
+ volatile uint32_t cpl_ps_max;
+ volatile uint32_t cpl_dw_ps;
+ volatile uint32_t cpl_dw_ps_min;
+ volatile uint32_t cpl_dw_ps_max;
+ volatile uint32_t cplh_pending;
+ volatile uint32_t cpld_pending;
+ volatile uint32_t cplh_pending_max;
+ volatile uint32_t cpld_pending_max;
+ volatile uint32_t err_count_other;
+ char eval[4];
+ volatile int lasped;
+};
+
+void ark_rqp_dump(struct ark_rqpace_t *rqp);
+void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+int ark_rqp_lasped(struct ark_rqpace_t *rqp);
+#endif
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
new file mode 100644
index 00000000..1ba7d26d
--- /dev/null
+++ b/drivers/net/ark/ark_udm.c
@@ -0,0 +1,226 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_udm.h"
+
+int
+ark_udm_verify(struct ark_udm_t *udm)
+{
+ if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM structure looks incorrect %d vs %zd\n",
+ ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t));
+ return -1;
+ }
+
+ if (udm->setup.const0 != ARK_UDM_CONST) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM module not found as expected 0x%08x\n",
+ udm->setup.const0);
+ return -1;
+ }
+ return 0;
+}
+
+int
+ark_udm_stop(struct ark_udm_t *udm, const int wait)
+{
+ int cnt = 0;
+
+ udm->cfg.command = 2;
+
+ while (wait && (udm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+int
+ark_udm_reset(struct ark_udm_t *udm)
+{
+ int status;
+
+ status = ark_udm_stop(udm, 1);
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ udm->cfg.command = 4;
+ usleep(10);
+ udm->cfg.command = 3;
+ status = ark_udm_stop(udm, 0);
+ PMD_DEBUG_LOG(INFO, "%s stop status %d post failure"
+ " and forced reset\n",
+ __func__, status);
+ } else {
+ udm->cfg.command = 3;
+ }
+
+ return status;
+}
+
+void
+ark_udm_start(struct ark_udm_t *udm)
+{
+ udm->cfg.command = 1;
+}
+
+void
+ark_udm_stats_reset(struct ark_udm_t *udm)
+{
+ udm->pcibp.pci_clear = 1;
+ udm->tlp_ps.tlp_clear = 1;
+}
+
+void
+ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns)
+{
+ /* headroom and data room are in DWords in the UDM */
+ udm->cfg.dataroom = dataroom / 4;
+ udm->cfg.headroom = headroom / 4;
+
+ /* 4 NS period ns */
+ udm->rt_cfg.write_interval = write_interval_ns / 4;
+}
+
+void
+ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr)
+{
+ udm->rt_cfg.hw_prod_addr = addr;
+}
+
+int
+ark_udm_is_flushed(struct ark_udm_t *udm)
+{
+ return (udm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_udm_dropped(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_pkt_drop;
+}
+
+uint64_t
+ark_udm_bytes(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_byte_count;
+}
+
+uint64_t
+ark_udm_packets(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_ff_packet_count;
+}
+
+void
+ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_STATS_LOG(INFO, "UDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ msg,
+ "Pkts Received", udm->stats.rx_packet_count,
+ "Pkts Finalized", udm->stats.rx_sent_packets,
+ "Pkts Dropped", udm->tlp.pkt_drop,
+ "Bytes Count", udm->stats.rx_byte_count,
+ "MBuf Count", udm->stats.rx_mbuf_count);
+}
+
+void
+ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
+{
+ PMD_STATS_LOG(INFO, "UDM Queue %3u Stats: %s"
+ ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64
+ ARK_SU64 "\n",
+ qid, msg,
+ "Pkts Received", udm->qstats.q_packet_count,
+ "Pkts Finalized", udm->qstats.q_ff_packet_count,
+ "Pkts Dropped", udm->qstats.q_pkt_drop,
+ "Bytes Count", udm->qstats.q_byte_count,
+ "MBuf Count", udm->qstats.q_mbuf_count);
+}
+
+void
+ark_udm_dump(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg,
+ udm->cfg.stop_flushed);
+}
+
+void
+ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Setup Q: %u"
+ ARK_SU64X ARK_SU32 "\n",
+ q_id,
+ "hw_prod_addr", udm->rt_cfg.hw_prod_addr,
+ "prod_idx", udm->rt_cfg.prod_idx);
+}
+
+void
+ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
+{
+ struct ark_udm_pcibp_t *bp = &udm->pcibp;
+
+ PMD_STATS_LOG(INFO, "UDM Performance %s"
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ "\n",
+ msg,
+ "PCI Empty", bp->pci_empty,
+ "PCI Q1", bp->pci_q1,
+ "PCI Q2", bp->pci_q2,
+ "PCI Q3", bp->pci_q3,
+ "PCI Q4", bp->pci_q4,
+ "PCI Full", bp->pci_full);
+}
+
+void
+ark_udm_queue_stats_reset(struct ark_udm_t *udm)
+{
+ udm->qstats.q_byte_count = 1;
+}
+
+void
+ark_udm_queue_enable(struct ark_udm_t *udm, int enable)
+{
+ udm->qstats.q_enable = enable ? 1 : 0;
+}
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
new file mode 100644
index 00000000..29bf1e8f
--- /dev/null
+++ b/drivers/net/ark/ark_udm.h
@@ -0,0 +1,192 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_UDM_H_
+#define _ARK_UDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The UDM or Upstream Data Mover is an internal Arkville hardware
+ * module for moving packet from the RX packet streams to host memory.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* Meta data structure apssed from FPGA, must match layout in FPGA */
+struct ark_rx_meta {
+ uint64_t timestamp;
+ uint64_t user_data;
+ uint8_t port;
+ uint8_t dst_queue;
+ uint16_t pkt_len;
+};
+
+/*
+ * UDM hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_RX_WRITE_TIME_NS 2500
+#define ARK_UDM_SETUP 0
+#define ARK_UDM_CONST 0xbACECACE
+struct ark_udm_setup_t {
+ uint32_t r0;
+ uint32_t r4;
+ volatile uint32_t cycle_count;
+ uint32_t const0;
+};
+
+#define ARK_UDM_CFG 0x010
+struct ark_udm_cfg_t {
+ volatile uint32_t stop_flushed; /* RO */
+ volatile uint32_t command;
+ uint32_t dataroom;
+ uint32_t headroom;
+};
+
+typedef enum {
+ ARK_UDM_START = 0x1,
+ ARK_UDM_STOP = 0x2,
+ ARK_UDM_RESET = 0x3
+} ark_udm_commands;
+
+#define ARK_UDM_STATS 0x020
+struct ark_udm_stats_t {
+ volatile uint64_t rx_byte_count;
+ volatile uint64_t rx_packet_count;
+ volatile uint64_t rx_mbuf_count;
+ volatile uint64_t rx_sent_packets;
+};
+
+#define ARK_UDM_PQ 0x040
+struct ark_udm_queue_stats_t {
+ volatile uint64_t q_byte_count;
+ volatile uint64_t q_packet_count; /* includes drops */
+ volatile uint64_t q_mbuf_count;
+ volatile uint64_t q_ff_packet_count;
+ volatile uint64_t q_pkt_drop;
+ uint32_t q_enable;
+};
+
+#define ARK_UDM_TLP 0x0070
+struct ark_udm_tlp_t {
+ volatile uint64_t pkt_drop; /* global */
+ volatile uint32_t tlp_q1;
+ volatile uint32_t tlp_q2;
+ volatile uint32_t tlp_q3;
+ volatile uint32_t tlp_q4;
+ volatile uint32_t tlp_full;
+};
+
+#define ARK_UDM_PCIBP 0x00a0
+struct ark_udm_pcibp_t {
+ volatile uint32_t pci_clear;
+ volatile uint32_t pci_empty;
+ volatile uint32_t pci_q1;
+ volatile uint32_t pci_q2;
+ volatile uint32_t pci_q3;
+ volatile uint32_t pci_q4;
+ volatile uint32_t pci_full;
+};
+
+#define ARK_UDM_TLP_PS 0x00bc
+struct ark_udm_tlp_ps_t {
+ volatile uint32_t tlp_clear;
+ volatile uint32_t tlp_ps_min;
+ volatile uint32_t tlp_ps_max;
+ volatile uint32_t tlp_full_ps_min;
+ volatile uint32_t tlp_full_ps_max;
+ volatile uint32_t tlp_dw_ps_min;
+ volatile uint32_t tlp_dw_ps_max;
+ volatile uint32_t tlp_pldw_ps_min;
+ volatile uint32_t tlp_pldw_ps_max;
+};
+
+#define ARK_UDM_RT_CFG 0x00e0
+struct ark_udm_rt_cfg_t {
+ phys_addr_t hw_prod_addr;
+ uint32_t write_interval; /* 4ns cycles */
+ volatile uint32_t prod_idx; /* RO */
+};
+
+/* Consolidated structure */
+#define ARK_UDM_EXPECT_SIZE (0x00fc + 4)
+#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE
+struct ark_udm_t {
+ struct ark_udm_setup_t setup;
+ struct ark_udm_cfg_t cfg;
+ struct ark_udm_stats_t stats;
+ struct ark_udm_queue_stats_t qstats;
+ uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ sizeof(struct ark_udm_queue_stats_t)];
+ struct ark_udm_tlp_t tlp;
+ uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
+ sizeof(struct ark_udm_tlp_t)];
+ struct ark_udm_pcibp_t pcibp;
+ struct ark_udm_tlp_ps_t tlp_ps;
+ struct ark_udm_rt_cfg_t rt_cfg;
+ int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) -
+ sizeof(struct ark_udm_rt_cfg_t)];
+};
+
+
+int ark_udm_verify(struct ark_udm_t *udm);
+int ark_udm_stop(struct ark_udm_t *udm, int wait);
+void ark_udm_start(struct ark_udm_t *udm);
+int ark_udm_reset(struct ark_udm_t *udm);
+void ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns);
+void ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr);
+void ark_udm_stats_reset(struct ark_udm_t *udm);
+void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
+ uint16_t qid);
+void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
+int ark_udm_is_flushed(struct ark_udm_t *udm);
+
+/* Per queue data */
+uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+uint64_t ark_udm_packets(struct ark_udm_t *udm);
+
+void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
+
+#endif
diff --git a/drivers/net/ark/rte_pmd_ark_version.map b/drivers/net/ark/rte_pmd_ark_version.map
new file mode 100644
index 00000000..1062e042
--- /dev/null
+++ b/drivers/net/ark/rte_pmd_ark_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+ local: *;
+
+};
diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile
new file mode 100644
index 00000000..cd465aac
--- /dev/null
+++ b/drivers/net/avp/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Wind River Systems nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avp.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_pmd_avp_version.map
+
+LIBABIVER := 1
+
+# install public header files to enable compilation of the hypervisor level
+# dpdk application
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_common.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_fifo.h
+
+#
+# all source files are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp_ethdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
new file mode 100644
index 00000000..fe6849f5
--- /dev/null
+++ b/drivers/net/avp/avp_ethdev.c
@@ -0,0 +1,2312 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2013-2017, Wind River Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2) Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3) Neither the name of Wind River Systems nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_dev.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+
+#include "rte_avp_common.h"
+#include "rte_avp_fifo.h"
+
+#include "avp_logs.h"
+
+
+static int avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev);
+
+static int avp_dev_configure(struct rte_eth_dev *dev);
+static int avp_dev_start(struct rte_eth_dev *dev);
+static void avp_dev_stop(struct rte_eth_dev *dev);
+static void avp_dev_close(struct rte_eth_dev *dev);
+static void avp_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avp_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
+
+static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool);
+
+static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+static uint16_t avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static void avp_dev_rx_queue_release(void *rxq);
+static void avp_dev_tx_queue_release(void *txq);
+
+static void avp_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avp_dev_stats_reset(struct rte_eth_dev *dev);
+
+
+#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
+
+#define AVP_MAX_RX_BURST 64
+#define AVP_MAX_TX_BURST 64
+#define AVP_MAX_MAC_ADDRS 1
+#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+
+
+/*
+ * Defines the number of microseconds to wait before checking the response
+ * queue for completion.
+ */
+#define AVP_REQUEST_DELAY_USECS (5000)
+
+/*
+ * Defines the number times to check the response queue for completion before
+ * declaring a timeout.
+ */
+#define AVP_MAX_REQUEST_RETRY (100)
+
+/* Defines the current PCI driver version number */
+#define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_avp_map[] = {
+ { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
+ .device_id = RTE_AVP_PCI_DEVICE_ID,
+ .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
+ .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
+ .class_id = RTE_CLASS_ANY_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+/*
+ * dev_ops for avp, bare necessities for basic operation
+ */
+static const struct eth_dev_ops avp_eth_dev_ops = {
+ .dev_configure = avp_dev_configure,
+ .dev_start = avp_dev_start,
+ .dev_stop = avp_dev_stop,
+ .dev_close = avp_dev_close,
+ .dev_infos_get = avp_dev_info_get,
+ .vlan_offload_set = avp_vlan_offload_set,
+ .stats_get = avp_dev_stats_get,
+ .stats_reset = avp_dev_stats_reset,
+ .link_update = avp_dev_link_update,
+ .promiscuous_enable = avp_dev_promiscuous_enable,
+ .promiscuous_disable = avp_dev_promiscuous_disable,
+ .rx_queue_setup = avp_dev_rx_queue_setup,
+ .rx_queue_release = avp_dev_rx_queue_release,
+ .tx_queue_setup = avp_dev_tx_queue_setup,
+ .tx_queue_release = avp_dev_tx_queue_release,
+};
+
+/**@{ AVP device flags */
+#define AVP_F_PROMISC (1 << 1)
+#define AVP_F_CONFIGURED (1 << 2)
+#define AVP_F_LINKUP (1 << 3)
+#define AVP_F_DETACHED (1 << 4)
+/**@} */
+
+/* Ethernet device validation marker */
+#define AVP_ETHDEV_MAGIC 0x92972862
+
+/*
+ * Defines the AVP device attributes which are attached to an RTE ethernet
+ * device
+ */
+struct avp_dev {
+ uint32_t magic; /**< Memory validation marker */
+ uint64_t device_id; /**< Unique system identifier */
+ struct ether_addr ethaddr; /**< Host specified MAC address */
+ struct rte_eth_dev_data *dev_data;
+ /**< Back pointer to ethernet device data */
+ volatile uint32_t flags; /**< Device operational flags */
+ uint8_t port_id; /**< Ethernet port identifier */
+ struct rte_mempool *pool; /**< pkt mbuf mempool */
+ unsigned int guest_mbuf_size; /**< local pool mbuf size */
+ unsigned int host_mbuf_size; /**< host mbuf size */
+ unsigned int max_rx_pkt_len; /**< maximum receive unit */
+ uint32_t host_features; /**< Supported feature bitmap */
+ uint32_t features; /**< Enabled feature bitmap */
+ unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
+ unsigned int max_tx_queues; /**< Maximum number of transmit queues */
+ unsigned int num_rx_queues; /**< Negotiated number of receive queues */
+ unsigned int max_rx_queues; /**< Maximum number of receive queues */
+
+ struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
+ struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
+ struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
+ /**< Allocated mbufs queue */
+ struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
+ /**< To be freed mbufs queue */
+
+ /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
+ rte_spinlock_t lock;
+
+ /* For request & response */
+ struct rte_avp_fifo *req_q; /**< Request queue */
+ struct rte_avp_fifo *resp_q; /**< Response queue */
+ void *host_sync_addr; /**< (host) Req/Resp Mem address */
+ void *sync_addr; /**< Req/Resp Mem address */
+ void *host_mbuf_addr; /**< (host) MBUF pool start address */
+ void *mbuf_addr; /**< MBUF pool start address */
+} __rte_cache_aligned;
+
+/* RTE ethernet private data */
+struct avp_adapter {
+ struct avp_dev avp;
+} __rte_cache_aligned;
+
+
+/* 32-bit MMIO register write */
+#define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
+
+/* 32-bit MMIO register read */
+#define AVP_READ32(_addr) rte_read32_relaxed((_addr))
+
+/* Macro to cast the ethernet device private data to a AVP object */
+#define AVP_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avp_adapter *)adapter)->avp)
+
+/*
+ * Defines the structure of a AVP device queue for the purpose of handling the
+ * receive and transmit burst callback functions
+ */
+struct avp_queue {
+ struct rte_eth_dev_data *dev_data;
+ /**< Backpointer to ethernet device data */
+ struct avp_dev *avp; /**< Backpointer to AVP device */
+ uint16_t queue_id;
+ /**< Queue identifier used for indexing current queue */
+ uint16_t queue_base;
+ /**< Base queue identifier for queue servicing */
+ uint16_t queue_limit;
+ /**< Maximum queue identifier for queue servicing */
+
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
+/* send a request and wait for a response
+ *
+ * @warning must be called while holding the avp->lock spinlock.
+ */
+static int
+avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
+{
+ unsigned int retry = AVP_MAX_REQUEST_RETRY;
+ void *resp_addr = NULL;
+ unsigned int count;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
+
+ request->result = -ENOTSUP;
+
+ /* Discard any stale responses before starting a new request */
+ while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
+ PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
+
+ rte_memcpy(avp->sync_addr, request, sizeof(*request));
+ count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
+ if (count < 1) {
+ PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
+ request->req_id);
+ ret = -EBUSY;
+ goto done;
+ }
+
+ while (retry--) {
+ /* wait for a response */
+ usleep(AVP_REQUEST_DELAY_USECS);
+
+ count = avp_fifo_count(avp->resp_q);
+ if (count >= 1) {
+ /* response received */
+ break;
+ }
+
+ if ((count < 1) && (retry == 0)) {
+ PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
+ request->req_id);
+ ret = -ETIME;
+ goto done;
+ }
+ }
+
+ /* retrieve the response */
+ count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
+ if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
+ PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
+ count, resp_addr, avp->host_sync_addr);
+ ret = -ENODATA;
+ goto done;
+ }
+
+ /* copy to user buffer */
+ rte_memcpy(request, avp->sync_addr, sizeof(*request));
+ ret = 0;
+
+ PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
+ request->result, request->req_id);
+
+done:
+ return ret;
+}
+
+static int
+avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a link state change request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
+ request.if_up = state;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
+ struct rte_avp_device_config *config)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a configure request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_DEVICE;
+ memcpy(&request.config, config, sizeof(request.config));
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a shutdown request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+/* translate from host mbuf virtual address to guest virtual address */
+static inline void *
+avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
+{
+ return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
+ (uintptr_t)avp->host_mbuf_addr),
+ (uintptr_t)avp->mbuf_addr);
+}
+
+/* translate from host physical address to guest virtual address */
+static void *
+avp_dev_translate_address(struct rte_eth_dev *eth_dev,
+ phys_addr_t host_phys_addr)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_mem_resource *resource;
+ struct rte_avp_memmap_info *info;
+ struct rte_avp_memmap *map;
+ off_t offset;
+ void *addr;
+ unsigned int i;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
+ info = (struct rte_avp_memmap_info *)resource->addr;
+
+ offset = 0;
+ for (i = 0; i < info->nb_maps; i++) {
+ /* search all segments looking for a matching address */
+ map = &info->maps[i];
+
+ if ((host_phys_addr >= map->phys_addr) &&
+ (host_phys_addr < (map->phys_addr + map->length))) {
+ /* address is within this segment */
+ offset += (host_phys_addr - map->phys_addr);
+ addr = RTE_PTR_ADD(addr, offset);
+
+ PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
+ host_phys_addr, addr);
+
+ return addr;
+ }
+ offset += map->length;
+ }
+
+ return NULL;
+}
+
+/* verify that the incoming device version is compatible with our version */
+static int
+avp_dev_version_check(uint32_t version)
+{
+ uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
+ uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
+
+ if (device <= driver) {
+ /* the host driver version is less than or equal to ours */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* verify that memory regions have expected version and validation markers */
+static int
+avp_dev_check_regions(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_avp_memmap_info *memmap;
+ struct rte_avp_device_info *info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ /* Dump resource info for debug */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ resource = &pci_dev->mem_resource[i];
+ if ((resource->phys_addr == 0) || (resource->len == 0))
+ continue;
+
+ PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
+ i, resource->phys_addr,
+ resource->len, resource->addr);
+
+ switch (i) {
+ case RTE_AVP_PCI_MEMMAP_BAR:
+ memmap = (struct rte_avp_memmap_info *)resource->addr;
+ if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
+ (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
+ PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
+ memmap->magic, memmap->version);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_DEVICE_BAR:
+ info = (struct rte_avp_device_info *)resource->addr;
+ if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
+ info->magic, info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MEMORY_BAR:
+ case RTE_AVP_PCI_MMIO_BAR:
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
+ i);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MSIX_BAR:
+ default:
+ /* no validation required */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_detach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(NOTICE, "port %u already detached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /* shutdown the device first so the host stops sending us packets. */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
+ ret);
+ avp->flags &= ~AVP_F_DETACHED;
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /* wait for queues to acknowledge the presence of the detach flag */
+ rte_delay_ms(1);
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+_avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *rxq;
+ uint16_t queue_count;
+ uint16_t remainder;
+
+ rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * Must map all AVP fifos as evenly as possible between the configured
+ * device queues. Each device queue will service a subset of the AVP
+ * fifos. If there is an odd number of device queues the first set of
+ * device queues will get the extra AVP fifos.
+ */
+ queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
+ remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
+ if (rx_queue_id < remainder) {
+ /* these queues must service one extra FIFO */
+ rxq->queue_base = rx_queue_id * (queue_count + 1);
+ rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
+ } else {
+ /* these queues service the regular number of FIFO */
+ rxq->queue_base = ((remainder * (queue_count + 1)) +
+ ((rx_queue_id - remainder) * queue_count));
+ rxq->queue_limit = rxq->queue_base + queue_count - 1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
+ rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
+
+ rxq->queue_id = rxq->queue_base;
+}
+
+static void
+_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ void *addr;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /*
+ * the transmit direction is not negotiated beyond respecting the max
+ * number of queues because the host can handle arbitrary guest tx
+ * queues (host rx queues).
+ */
+ avp->num_tx_queues = eth_dev->data->nb_tx_queues;
+
+ /*
+ * the receive direction is more restrictive. The host requires a
+ * minimum number of guest rx queues (host tx queues) therefore
+ * negotiate a value that is at least as large as the host minimum
+ * requirement. If the host and guest values are not identical then a
+ * mapping will be established in the receive_queue_setup function.
+ */
+ avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
+ eth_dev->data->nb_rx_queues);
+
+ PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
+ avp->num_tx_queues, avp->num_rx_queues);
+}
+
+static int
+avp_dev_attach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_config config;
+ unsigned int i;
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (!(avp->flags & AVP_F_DETACHED)) {
+ PMD_DRV_LOG(NOTICE, "port %u already attached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /*
+ * make sure that the detached flag is set prior to reconfiguring the
+ * queues.
+ */
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /*
+ * re-run the device create utility which will parse the new host info
+ * and setup the AVP device queue pointers.
+ */
+ ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ if (avp->flags & AVP_F_CONFIGURED) {
+ /*
+ * Update the receive queue mapping to handle cases where the
+ * source and destination hosts have different queue
+ * requirements. As long as the DETACHED flag is asserted the
+ * queue table should not be referenced so it should be safe to
+ * update it.
+ */
+ _avp_set_queue_counts(eth_dev);
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ _avp_set_rx_queue_mappings(eth_dev, i);
+
+ /*
+ * Update the host with our config details so that it knows the
+ * device is active.
+ */
+ memset(&config, 0, sizeof(config));
+ config.device_id = avp->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+ config.if_up = !!(avp->flags & AVP_F_LINKUP);
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+ }
+
+ rte_wmb();
+ avp->flags &= ~AVP_F_DETACHED;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_interrupt_handler(void *data)
+{
+ struct rte_eth_dev *eth_dev = data;
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t status, value;
+ int ret;
+
+ if (registers == NULL)
+ rte_panic("no mapped MMIO register space\n");
+
+ /* read the interrupt status register
+ * note: this register clears on read so all raised interrupts must be
+ * handled or remembered for later processing
+ */
+ status = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_INTERRUPT_STATUS_OFFSET));
+
+ if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
+ /* handle interrupt based on current status */
+ value = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ switch (value) {
+ case RTE_AVP_MIGRATION_DETACHED:
+ ret = avp_dev_detach(eth_dev);
+ break;
+ case RTE_AVP_MIGRATION_ATTACHED:
+ ret = avp_dev_attach(eth_dev);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
+ value);
+ ret = -EINVAL;
+ }
+
+ /* acknowledge the request by writing out our current status */
+ value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+
+ PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
+ }
+
+ if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
+ PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
+ status);
+
+ /* re-enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
+ ret);
+ /* continue */
+ }
+}
+
+static int
+avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return -EINVAL;
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* inform the device that all interrupts are enabled */
+ AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ return 0;
+}
+
+static int
+avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return 0;
+
+ /* inform the device that all interrupts are disabled */
+ AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_disable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ int ret;
+
+ /* register a callback handler with UIO for interrupt notifications */
+ ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ avp_dev_interrupt_handler,
+ (void *)eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* enable interrupt processing */
+ return avp_dev_enable_interrupts(eth_dev);
+}
+
+static int
+avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t value;
+
+ if (registers == NULL)
+ return 0;
+
+ value = AVP_READ32(RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ if (value == RTE_AVP_MIGRATION_DETACHED) {
+ /* migration is in progress; ack it if we have not already */
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * create a AVP device using the supplied device info by first translating it
+ * to guest address space(s).
+ */
+static int
+avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
+ RTE_AVP_PCI_DEVICE_BAR);
+ return -EFAULT;
+ }
+ host_info = (struct rte_avp_device_info *)resource->addr;
+
+ if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(host_info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
+ host_info->magic, host_info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
+ RTE_AVP_GET_RELEASE_VERSION(host_info->version),
+ RTE_AVP_GET_MAJOR_VERSION(host_info->version),
+ RTE_AVP_GET_MINOR_VERSION(host_info->version));
+
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
+ host_info->min_tx_queues, host_info->max_tx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
+ host_info->min_rx_queues, host_info->max_rx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
+ host_info->features);
+
+ if (avp->magic != AVP_ETHDEV_MAGIC) {
+ /*
+ * First time initialization (i.e., not during a VM
+ * migration)
+ */
+ memset(avp, 0, sizeof(*avp));
+ avp->magic = AVP_ETHDEV_MAGIC;
+ avp->dev_data = eth_dev->data;
+ avp->port_id = eth_dev->data->port_id;
+ avp->host_mbuf_size = host_info->mbuf_size;
+ avp->host_features = host_info->features;
+ rte_spinlock_init(&avp->lock);
+ memcpy(&avp->ethaddr.addr_bytes[0],
+ host_info->ethaddr, ETHER_ADDR_LEN);
+ /* adjust max values to not exceed our max */
+ avp->max_tx_queues =
+ RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
+ avp->max_rx_queues =
+ RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
+ } else {
+ /* Re-attaching during migration */
+
+ /* TODO... requires validation of host values */
+ if ((host_info->features & avp->features) != avp->features) {
+ PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
+ avp->features, host_info->features);
+ /* this should not be possible; continue for now */
+ }
+ }
+
+ /* the device id is allowed to change over migrations */
+ avp->device_id = host_info->device_id;
+
+ /* translate incoming host addresses to guest address space */
+ PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
+ host_info->tx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
+ host_info->alloc_phys);
+ for (i = 0; i < avp->max_tx_queues; i++) {
+ avp->tx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->tx_phys + (i * host_info->tx_size));
+
+ avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->alloc_phys + (i * host_info->alloc_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
+ host_info->rx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
+ host_info->free_phys);
+ for (i = 0; i < avp->max_rx_queues; i++) {
+ avp->rx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->rx_phys + (i * host_info->rx_size));
+ avp->free_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->free_phys + (i * host_info->free_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
+ host_info->req_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
+ host_info->resp_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
+ host_info->sync_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
+ host_info->mbuf_phys);
+ avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
+ avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
+ avp->sync_addr =
+ avp_dev_translate_address(eth_dev, host_info->sync_phys);
+ avp->mbuf_addr =
+ avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
+
+ /*
+ * store the host mbuf virtual address so that we can calculate
+ * relative offsets for each mbuf as they are processed
+ */
+ avp->host_mbuf_addr = host_info->mbuf_va;
+ avp->host_sync_addr = host_info->sync_va;
+
+ /*
+ * store the maximum packet length that is supported by the host.
+ */
+ avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
+ PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
+ host_info->max_rx_pkt_len);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function in avp_pci.c
+ * It returns 0 on success.
+ */
+static int
+eth_avp_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ int ret;
+
+ pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ eth_dev->dev_ops = &avp_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avp_recv_pkts;
+ eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /*
+ * no setup required on secondary processes. All data is saved
+ * in dev_private by the primary process. All resource should
+ * be mapped to the same virtual address so all pointers should
+ * be valid.
+ */
+ if (eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ /* Check current migration status */
+ if (avp_dev_migration_pending(eth_dev)) {
+ PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
+ return -EBUSY;
+ }
+
+ /* Check BAR resources */
+ ret = avp_dev_check_regions(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = avp_dev_setup_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Handle each subtype */
+ ret = avp_dev_create(pci_dev, eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ /* Get a mac from device config */
+ ether_addr_copy(&avp->ethaddr, &eth_dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (eth_dev->data == NULL)
+ return 0;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ return 0;
+}
+
+static int
+eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev,
+ sizeof(struct avp_adapter));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_avp_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_avp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ eth_avp_dev_uninit);
+}
+
+static struct rte_pci_driver rte_avp_pmd = {
+ .id_table = pci_id_avp_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_avp_pci_probe,
+ .remove = eth_avp_pci_remove,
+};
+
+static int
+avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
+ struct avp_dev *avp)
+{
+ unsigned int max_rx_pkt_len;
+
+ max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the guest MTU is greater than either the host or guest
+ * buffers then chained mbufs have to be enabled in the TX
+ * direction. It is assumed that the application will not need
+ * to send packets larger than their max_rx_pkt_len (MRU).
+ */
+ return 1;
+ }
+
+ if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the host MRU is greater than its own mbuf size or the
+ * guest mbuf size then chained mbufs have to be enabled in the
+ * RX direction.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct avp_queue *rxq;
+
+ if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
+ rx_queue_id, eth_dev->data->nb_rx_queues);
+ return -EINVAL;
+ }
+
+ /* Save mbuf pool pointer */
+ avp->pool = pool;
+
+ /* Save the local mbuf size */
+ mbp_priv = rte_mempool_get_priv(pool);
+ avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
+ avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
+
+ if (avp_dev_enable_scattered(eth_dev, avp)) {
+ if (!eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->data->scattered_rx = 1;
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
+ avp->max_rx_pkt_len,
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ avp->host_mbuf_size,
+ avp->guest_mbuf_size);
+
+ /* allocate a queue object */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* save back pointers to AVP and Ethernet devices */
+ rxq->avp = avp;
+ rxq->dev_data = eth_dev->data;
+ eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
+
+ /* setup the queue receive mapping for the current queue. */
+ _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
+
+ PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
+
+ (void)nb_rx_desc;
+ (void)rx_conf;
+ return 0;
+}
+
+static int
+avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *txq;
+
+ if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
+ PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
+ tx_queue_id, eth_dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+
+ /* allocate a queue object */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* only the configured set of transmit queues are used */
+ txq->queue_id = tx_queue_id;
+ txq->queue_base = tx_queue_id;
+ txq->queue_limit = tx_queue_id;
+
+ /* save back pointers to AVP and Ethernet devices */
+ txq->avp = avp;
+ txq->dev_data = eth_dev->data;
+ eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
+
+ PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
+
+ (void)nb_tx_desc;
+ (void)tx_conf;
+ return 0;
+}
+
+static inline int
+_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+{
+ uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
+ uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
+ return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
+}
+
+static inline int
+_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
+{
+ struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
+ /* allow all packets destined to our address */
+ return 0;
+ }
+
+ if (likely(is_broadcast_ether_addr(&eth->d_addr))) {
+ /* allow all broadcast packets */
+ return 0;
+ }
+
+ if (likely(is_multicast_ether_addr(&eth->d_addr))) {
+ /* allow all multicast packets */
+ return 0;
+ }
+
+ if (avp->flags & AVP_F_PROMISC) {
+ /* allow all packets when in promiscuous mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+static inline void
+__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
+{
+ struct rte_avp_desc *first_buf;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int pkt_len;
+ unsigned int nb_segs;
+ void *pkt_data;
+ unsigned int i;
+
+ first_buf = avp_dev_translate_buffer(avp, buf);
+
+ i = 0;
+ pkt_len = 0;
+ nb_segs = first_buf->nb_segs;
+ do {
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ if (pkt_buf == NULL)
+ rte_panic("bad buffer: segment %u has an invalid address %p\n",
+ i, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ if (pkt_data == NULL)
+ rte_panic("bad buffer: segment %u has a NULL data pointer\n",
+ i);
+ if (pkt_buf->data_len == 0)
+ rte_panic("bad buffer: segment %u has 0 data length\n",
+ i);
+ pkt_len += pkt_buf->data_len;
+ nb_segs--;
+ i++;
+
+ } while (nb_segs && (buf = pkt_buf->next) != NULL);
+
+ if (nb_segs != 0)
+ rte_panic("bad buffer: expected %u segments found %u\n",
+ first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
+ if (pkt_len != first_buf->pkt_len)
+ rte_panic("bad buffer: expected length %u found %u\n",
+ first_buf->pkt_len, pkt_len);
+}
+
+#define avp_dev_buffer_sanity_check(a, b) \
+ __avp_dev_buffer_sanity_check((a), (b))
+
+#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
+
+#define avp_dev_buffer_sanity_check(a, b) do {} while (0)
+
+#endif
+
+/*
+ * Copy a host buffer chain to a set of mbufs. This function assumes that
+ * there exactly the required number of mbufs to copy all source bytes.
+ */
+static inline struct rte_mbuf *
+avp_dev_copy_from_buffers(struct avp_dev *avp,
+ struct rte_avp_desc *buf,
+ struct rte_mbuf **mbufs,
+ unsigned int count)
+{
+ struct rte_mbuf *m_previous = NULL;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int total_length = 0;
+ unsigned int copy_length;
+ unsigned int src_offset;
+ struct rte_mbuf *m;
+ uint16_t ol_flags;
+ uint16_t vlan_tci;
+ void *pkt_data;
+ unsigned int i;
+
+ avp_dev_buffer_sanity_check(avp, buf);
+
+ /* setup the first source buffer */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ total_length = pkt_buf->pkt_len;
+ src_offset = 0;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ ol_flags = PKT_RX_VLAN_PKT;
+ vlan_tci = pkt_buf->vlan_tci;
+ } else {
+ ol_flags = 0;
+ vlan_tci = 0;
+ }
+
+ for (i = 0; (i < count) && (buf != NULL); i++) {
+ /* fill each destination buffer */
+ m = mbufs[i];
+
+ if (m_previous != NULL)
+ m_previous->next = m;
+
+ m_previous = m;
+
+ do {
+ /*
+ * Copy as many source buffers as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->guest_mbuf_size -
+ rte_pktmbuf_data_len(m)),
+ (pkt_buf->data_len -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ rte_pktmbuf_data_len(m)),
+ RTE_PTR_ADD(pkt_data, src_offset),
+ copy_length);
+ rte_pktmbuf_data_len(m) += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == pkt_buf->data_len)) {
+ /* need a new source buffer */
+ buf = pkt_buf->next;
+ if (buf != NULL) {
+ pkt_buf = avp_dev_translate_buffer(
+ avp, buf);
+ pkt_data = avp_dev_translate_buffer(
+ avp, pkt_buf->data);
+ src_offset = 0;
+ }
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(m) ==
+ avp->guest_mbuf_size)) {
+ /* need a new destination mbuf */
+ break;
+ }
+
+ } while (buf != NULL);
+ }
+
+ m = mbufs[0];
+ m->ol_flags = ol_flags;
+ m->nb_segs = count;
+ rte_pktmbuf_pkt_len(m) = total_length;
+ m->vlan_tci = vlan_tci;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ return m;
+}
+
+static uint16_t
+avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ struct rte_avp_desc *buf;
+ unsigned int count, avail, n;
+ unsigned int guest_mbuf_size;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int buf_len;
+ unsigned int port_id;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ guest_mbuf_size = avp->guest_mbuf_size;
+ port_id = avp->port_id;
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i + 1 < n) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+ buf = avp_bufs[i];
+
+ /* Peek into the first buffer to determine the total length */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ buf_len = pkt_buf->pkt_len;
+
+ /* Allocate enough mbufs to receive the entire packet */
+ required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
+ if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* Copy the data from the buffers to our mbufs */
+ m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
+
+ /* finalize mbuf */
+ m->port = port_id;
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += buf_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+
+static uint16_t
+avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ unsigned int count, avail, n;
+ unsigned int pkt_len;
+ struct rte_mbuf *m;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i < n - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust host pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = pkt_buf->pkt_len;
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_buf->nb_segs > 1))) {
+ /*
+ * application should be using the scattered receive
+ * function
+ */
+ rxq->errors++;
+ continue;
+ }
+
+ /* process each packet to be transmitted */
+ m = rte_pktmbuf_alloc(avp->pool);
+ if (unlikely(m == NULL)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* copy data out of the host buffer to our buffer */
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
+
+ /* initialize the local mbuf */
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ m->port = avp->port_id;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ m->ol_flags = PKT_RX_VLAN_PKT;
+ m->vlan_tci = pkt_buf->vlan_tci;
+ }
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += pkt_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+/*
+ * Copy a chained mbuf to a set of host buffers. This function assumes that
+ * there are sufficient destination buffers to contain the entire source
+ * packet.
+ */
+static inline uint16_t
+avp_dev_copy_to_buffers(struct avp_dev *avp,
+ struct rte_mbuf *mbuf,
+ struct rte_avp_desc **buffers,
+ unsigned int count)
+{
+ struct rte_avp_desc *previous_buf = NULL;
+ struct rte_avp_desc *first_buf = NULL;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_desc *buf;
+ size_t total_length;
+ struct rte_mbuf *m;
+ size_t copy_length;
+ size_t src_offset;
+ char *pkt_data;
+ unsigned int i;
+
+ __rte_mbuf_sanity_check(mbuf, 1);
+
+ m = mbuf;
+ src_offset = 0;
+ total_length = rte_pktmbuf_pkt_len(m);
+ for (i = 0; (i < count) && (m != NULL); i++) {
+ /* fill each destination buffer */
+ buf = buffers[i];
+
+ if (i < count - 1) {
+ /* prefetch next entry while processing this one */
+ pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+
+ /* setup the buffer chain */
+ if (previous_buf != NULL)
+ previous_buf->next = buf;
+ else
+ first_buf = pkt_buf;
+
+ previous_buf = pkt_buf;
+
+ do {
+ /*
+ * copy as many source mbuf segments as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->host_mbuf_size -
+ pkt_buf->data_len),
+ (rte_pktmbuf_data_len(m) -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
+ RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ src_offset),
+ copy_length);
+ pkt_buf->data_len += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == rte_pktmbuf_data_len(m))) {
+ /* need a new source buffer */
+ m = m->next;
+ src_offset = 0;
+ }
+
+ if (unlikely(pkt_buf->data_len ==
+ avp->host_mbuf_size)) {
+ /* need a new destination buffer */
+ break;
+ }
+
+ } while (m != NULL);
+ }
+
+ first_buf->nb_segs = count;
+ first_buf->pkt_len = total_length;
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ first_buf->vlan_tci = mbuf->vlan_tci;
+ }
+
+ avp_dev_buffer_sanity_check(avp, buffers[0]);
+
+ return total_length;
+}
+
+
+static uint16_t
+avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)];
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ unsigned int orig_nb_pkts;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int segments;
+ unsigned int tx_bytes;
+ unsigned int i;
+
+ orig_nb_pkts = nb_pkts;
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop? */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+ if (unlikely(avail > (AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)))
+ avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ nb_pkts = RTE_MIN(count, nb_pkts);
+
+ /* determine how many packets will fit in the available buffers */
+ count = 0;
+ segments = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (likely(i < (unsigned int)nb_pkts - 1)) {
+ /* prefetch next entry while processing this one */
+ rte_prefetch0(tx_pkts[i + 1]);
+ }
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ if (unlikely((required == 0) ||
+ (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
+ break;
+ else if (unlikely(required + segments > avail))
+ break;
+ segments += required;
+ count++;
+ }
+ nb_pkts = count;
+
+ if (unlikely(nb_pkts == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ nb_pkts, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
+ if (unlikely(n != segments)) {
+ PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
+ "n=%u, segments=%u, orig=%u\n",
+ n, segments, orig_nb_pkts);
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ count = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* determine how many buffers are required for this packet */
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ tx_bytes += avp_dev_copy_to_buffers(avp, m,
+ &avp_bufs[count], required);
+ tx_bufs[i] = avp_bufs[count];
+ count += required;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += nb_pkts;
+ txq->bytes += tx_bytes;
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+ for (i = 0; i < nb_pkts; i++)
+ avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
+#endif
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
+ if (unlikely(n != orig_nb_pkts))
+ txq->errors += (orig_nb_pkts - n);
+
+ return n;
+}
+
+
+static uint16_t
+avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ struct rte_mbuf *m;
+ unsigned int pkt_len;
+ unsigned int tx_bytes;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop?! */
+ txq->errors++;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+
+ if (unlikely(count == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ count, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
+ if (unlikely(n != count)) {
+ txq->errors++;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ for (i = 0; i < count; i++) {
+ /* prefetch next entry while processing the current one */
+ if (i < count - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = rte_pktmbuf_pkt_len(m);
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_len > avp->host_mbuf_size))) {
+ /*
+ * application should be using the scattered transmit
+ * function; send it truncated to avoid the performance
+ * hit of having to manage returning the already
+ * allocated buffer to the free list. This should not
+ * happen since the application should have set the
+ * max_rx_pkt_len based on its MTU and it should be
+ * policing its own packet sizes.
+ */
+ txq->errors++;
+ pkt_len = RTE_MIN(avp->guest_mbuf_size,
+ avp->host_mbuf_size);
+ }
+
+ /* copy data out of our mbuf and into the AVP buffer */
+ rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
+ pkt_buf->pkt_len = pkt_len;
+ pkt_buf->data_len = pkt_len;
+ pkt_buf->nb_segs = 1;
+ pkt_buf->next = NULL;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ pkt_buf->vlan_tci = m->vlan_tci;
+ }
+
+ tx_bytes += pkt_len;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += count;
+ txq->bytes += tx_bytes;
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
+
+ return n;
+}
+
+static void
+avp_dev_rx_queue_release(void *rx_queue)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct avp_dev *avp = rxq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ if (data->rx_queues[i] == rxq)
+ data->rx_queues[i] = NULL;
+ }
+}
+
+static void
+avp_dev_tx_queue_release(void *tx_queue)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct avp_dev *avp = txq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ if (data->tx_queues[i] == txq)
+ data->tx_queues[i] = NULL;
+ }
+}
+
+static int
+avp_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_avp_device_config config;
+ int mask = 0;
+ void *addr;
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /* Setup required number of queues */
+ _avp_set_queue_counts(eth_dev);
+
+ mask = (ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+ avp_vlan_offload_set(eth_dev, mask);
+
+ /* update device config */
+ memset(&config, 0, sizeof(config));
+ config.device_id = host_info->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_CONFIGURED;
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static int
+avp_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ /* disable features that we do not support */
+ eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
+ eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
+ eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
+ eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags |= AVP_F_LINKUP;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+ avp->flags &= ~AVP_F_CONFIGURED;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
+ /* continue */
+ }
+
+ /* update device state */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
+ ret);
+ /* continue */
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static int
+avp_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_link *link = &eth_dev->data->dev_link;
+
+ link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = !!(avp->flags & AVP_F_LINKUP);
+
+ return -1;
+}
+
+static void
+avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) == 0) {
+ avp->flags |= AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) != 0) {
+ avp->flags &= ~AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ dev_info->driver_name = "rte_avp_pmd";
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->max_rx_queues = avp->max_rx_queues;
+ dev_info->max_tx_queues = avp->max_tx_queues;
+ dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
+ dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
+ dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ }
+}
+
+static void
+avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ else
+ avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ } else {
+ PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
+ }
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
+ PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
+ }
+}
+
+static void
+avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+
+ stats->q_ipackets[i] += rxq->packets;
+ stats->q_ibytes[i] += rxq->bytes;
+ stats->q_errors[i] += rxq->errors;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+
+ stats->q_opackets[i] += txq->packets;
+ stats->q_obytes[i] += txq->bytes;
+ stats->q_errors[i] += txq->errors;
+ }
+ }
+}
+
+static void
+avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ rxq->bytes = 0;
+ rxq->packets = 0;
+ rxq->errors = 0;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ txq->bytes = 0;
+ txq->packets = 0;
+ txq->errors = 0;
+ }
+ }
+}
+
+RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h
new file mode 100644
index 00000000..252cab7d
--- /dev/null
+++ b/drivers/net/avp/avp_logs.h
@@ -0,0 +1,59 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2013-2015, Wind River Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2) Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3) Neither the name of Wind River Systems nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AVP_LOGS_H_
+#define _AVP_LOGS_H_
+
+#include <rte_log.h>
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _AVP_LOGS_H_ */
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
new file mode 100644
index 00000000..488d7216
--- /dev/null
+++ b/drivers/net/avp/rte_avp_common.h
@@ -0,0 +1,432 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contact Information:
+ * Wind River Systems, Inc.
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_AVP_COMMON_H_
+#define _RTE_AVP_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#define RTE_STD_C11
+#else
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_ether.h>
+#include <rte_atomic.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * AVP name is part of network device name.
+ */
+#define RTE_AVP_NAMESIZE 32
+
+/**
+ * AVP alias is a user-defined value used for lookups from secondary
+ * processes. Typically, this is a UUID.
+ */
+#define RTE_AVP_ALIASSIZE 128
+
+/*
+ * Request id.
+ */
+enum rte_avp_req_id {
+ RTE_AVP_REQ_UNKNOWN = 0,
+ RTE_AVP_REQ_CHANGE_MTU,
+ RTE_AVP_REQ_CFG_NETWORK_IF,
+ RTE_AVP_REQ_CFG_DEVICE,
+ RTE_AVP_REQ_SHUTDOWN_DEVICE,
+ RTE_AVP_REQ_MAX,
+};
+
+/**@{ AVP device driver types */
+#define RTE_AVP_DRIVER_TYPE_UNKNOWN 0
+#define RTE_AVP_DRIVER_TYPE_DPDK 1
+#define RTE_AVP_DRIVER_TYPE_KERNEL 2
+#define RTE_AVP_DRIVER_TYPE_QEMU 3
+/**@} */
+
+/**@{ AVP device operational modes */
+#define RTE_AVP_MODE_HOST 0 /**< AVP interface created in host */
+#define RTE_AVP_MODE_GUEST 1 /**< AVP interface created for export to guest */
+#define RTE_AVP_MODE_TRACE 2 /**< AVP interface created for packet tracing */
+/**@} */
+
+/*
+ * Structure for AVP queue configuration query request/result
+ */
+struct rte_avp_device_config {
+ uint64_t device_id; /**< Unique system identifier */
+ uint32_t driver_type; /**< Device Driver type */
+ uint32_t driver_version; /**< Device Driver version */
+ uint32_t features; /**< Negotiated features */
+ uint16_t num_tx_queues; /**< Number of active transmit queues */
+ uint16_t num_rx_queues; /**< Number of active receive queues */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+} __attribute__ ((__packed__));
+
+/*
+ * Structure for AVP request.
+ */
+struct rte_avp_request {
+ uint32_t req_id; /**< Request id */
+ RTE_STD_C11
+ union {
+ uint32_t new_mtu; /**< New MTU */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+ struct rte_avp_device_config config; /**< Queue configuration */
+ };
+ int32_t result; /**< Result for processing request */
+} __attribute__ ((__packed__));
+
+/*
+ * FIFO struct mapped in a shared memory. It describes a circular buffer FIFO
+ * Write and read should wrap around. FIFO is empty when write == read
+ * Writing should never overwrite the read position
+ */
+struct rte_avp_fifo {
+ volatile unsigned int write; /**< Next position to be written*/
+ volatile unsigned int read; /**< Next position to be read */
+ unsigned int len; /**< Circular buffer length */
+ unsigned int elem_size; /**< Pointer size - for 32/64 bit OS */
+ void *volatile buffer[]; /**< The buffer contains mbuf pointers */
+};
+
+
+/*
+ * AVP packet buffer header used to define the exchange of packet data.
+ */
+struct rte_avp_desc {
+ uint64_t pad0;
+ void *pkt_mbuf; /**< Reference to packet mbuf */
+ uint8_t pad1[14];
+ uint16_t ol_flags; /**< Offload features. */
+ void *next; /**< Reference to next buffer in chain */
+ void *data; /**< Start address of data in segment buffer. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint8_t nb_segs; /**< Number of segments */
+ uint8_t pad2;
+ uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ uint32_t pad3;
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
+ uint32_t pad4;
+} __attribute__ ((__aligned__(RTE_CACHE_LINE_SIZE), __packed__));
+
+
+/**{ AVP device features */
+#define RTE_AVP_FEATURE_VLAN_OFFLOAD (1 << 0) /**< Emulated HW VLAN offload */
+/**@} */
+
+
+/**@{ Offload feature flags */
+#define RTE_AVP_TX_VLAN_PKT 0x0001 /**< TX packet is a 802.1q VLAN packet. */
+#define RTE_AVP_RX_VLAN_PKT 0x0800 /**< RX packet is a 802.1q VLAN packet. */
+/**@} */
+
+
+/**@{ AVP PCI identifiers */
+#define RTE_AVP_PCI_VENDOR_ID 0x1af4
+#define RTE_AVP_PCI_DEVICE_ID 0x1110
+/**@} */
+
+/**@{ AVP PCI subsystem identifiers */
+#define RTE_AVP_PCI_SUB_VENDOR_ID RTE_AVP_PCI_VENDOR_ID
+#define RTE_AVP_PCI_SUB_DEVICE_ID 0x1104
+/**@} */
+
+/**@{ AVP PCI BAR definitions */
+#define RTE_AVP_PCI_MMIO_BAR 0
+#define RTE_AVP_PCI_MSIX_BAR 1
+#define RTE_AVP_PCI_MEMORY_BAR 2
+#define RTE_AVP_PCI_MEMMAP_BAR 4
+#define RTE_AVP_PCI_DEVICE_BAR 5
+#define RTE_AVP_PCI_MAX_BAR 6
+/**@} */
+
+/**@{ AVP PCI BAR name definitions */
+#define RTE_AVP_MMIO_BAR_NAME "avp-mmio"
+#define RTE_AVP_MSIX_BAR_NAME "avp-msix"
+#define RTE_AVP_MEMORY_BAR_NAME "avp-memory"
+#define RTE_AVP_MEMMAP_BAR_NAME "avp-memmap"
+#define RTE_AVP_DEVICE_BAR_NAME "avp-device"
+/**@} */
+
+/**@{ AVP PCI MSI-X vectors */
+#define RTE_AVP_MIGRATION_MSIX_VECTOR 0 /**< Migration interrupts */
+#define RTE_AVP_MAX_MSIX_VECTORS 1
+/**@} */
+
+/**@} AVP Migration status/ack register values */
+#define RTE_AVP_MIGRATION_NONE 0 /**< Migration never executed */
+#define RTE_AVP_MIGRATION_DETACHED 1 /**< Device attached during migration */
+#define RTE_AVP_MIGRATION_ATTACHED 2 /**< Device reattached during migration */
+#define RTE_AVP_MIGRATION_ERROR 3 /**< Device failed to attach/detach */
+/**@} */
+
+/**@} AVP MMIO Register Offsets */
+#define RTE_AVP_REGISTER_BASE 0
+#define RTE_AVP_INTERRUPT_MASK_OFFSET (RTE_AVP_REGISTER_BASE + 0)
+#define RTE_AVP_INTERRUPT_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 4)
+#define RTE_AVP_MIGRATION_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 8)
+#define RTE_AVP_MIGRATION_ACK_OFFSET (RTE_AVP_REGISTER_BASE + 12)
+/**@} */
+
+/**@} AVP Interrupt Status Mask */
+#define RTE_AVP_MIGRATION_INTERRUPT_MASK (1 << 1)
+#define RTE_AVP_APP_INTERRUPTS_MASK 0xFFFFFFFF
+#define RTE_AVP_NO_INTERRUPTS_MASK 0
+/**@} */
+
+/*
+ * Maximum number of memory regions to export
+ */
+#define RTE_AVP_MAX_MAPS 2048
+
+/*
+ * Description of a single memory region
+ */
+struct rte_avp_memmap {
+ void *addr;
+ phys_addr_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * AVP memory mapping validation marker
+ */
+#define RTE_AVP_MEMMAP_MAGIC 0x20131969
+
+/**@{ AVP memory map versions */
+#define RTE_AVP_MEMMAP_VERSION_1 1
+#define RTE_AVP_MEMMAP_VERSION RTE_AVP_MEMMAP_VERSION_1
+/**@} */
+
+/*
+ * Defines a list of memory regions exported from the host to the guest
+ */
+struct rte_avp_memmap_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+ uint32_t nb_maps;
+ struct rte_avp_memmap maps[RTE_AVP_MAX_MAPS];
+};
+
+/*
+ * AVP device memory validation marker
+ */
+#define RTE_AVP_DEVICE_MAGIC 0x20131975
+
+/**@{ AVP device map versions
+ * WARNING: do not change the format or names of these variables. They are
+ * automatically parsed from the build system to generate the SDK package
+ * name.
+ **/
+#define RTE_AVP_RELEASE_VERSION_1 1
+#define RTE_AVP_RELEASE_VERSION RTE_AVP_RELEASE_VERSION_1
+#define RTE_AVP_MAJOR_VERSION_0 0
+#define RTE_AVP_MAJOR_VERSION_1 1
+#define RTE_AVP_MAJOR_VERSION_2 2
+#define RTE_AVP_MAJOR_VERSION RTE_AVP_MAJOR_VERSION_2
+#define RTE_AVP_MINOR_VERSION_0 0
+#define RTE_AVP_MINOR_VERSION_1 1
+#define RTE_AVP_MINOR_VERSION_13 13
+#define RTE_AVP_MINOR_VERSION RTE_AVP_MINOR_VERSION_13
+/**@} */
+
+
+/**
+ * Generates a 32-bit version number from the specified version number
+ * components
+ */
+#define RTE_AVP_MAKE_VERSION(_release, _major, _minor) \
+((((_release) & 0xffff) << 16) | (((_major) & 0xff) << 8) | ((_minor) & 0xff))
+
+
+/**
+ * Represents the current version of the AVP host driver
+ * WARNING: in the current development branch the host and guest driver
+ * version should always be the same. When patching guest features back to
+ * GA releases the host version number should not be updated unless there was
+ * an actual change made to the host driver.
+ */
+#define RTE_AVP_CURRENT_HOST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_0, \
+ RTE_AVP_MINOR_VERSION_1)
+
+
+/**
+ * Represents the current version of the AVP guest drivers
+ */
+#define RTE_AVP_CURRENT_GUEST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_2, \
+ RTE_AVP_MINOR_VERSION_13)
+
+/**
+ * Access AVP device version values
+ */
+#define RTE_AVP_GET_RELEASE_VERSION(_version) (((_version) >> 16) & 0xffff)
+#define RTE_AVP_GET_MAJOR_VERSION(_version) (((_version) >> 8) & 0xff)
+#define RTE_AVP_GET_MINOR_VERSION(_version) ((_version) & 0xff)
+/**@}*/
+
+
+/**
+ * Remove the minor version number so that only the release and major versions
+ * are used for comparisons.
+ */
+#define RTE_AVP_STRIP_MINOR_VERSION(_version) ((_version) >> 8)
+
+
+/**
+ * Defines the number of mbuf pools supported per device (1 per socket)
+ */
+#define RTE_AVP_MAX_MEMPOOLS 8
+
+/*
+ * Defines address translation parameters for each support mbuf pool
+ */
+struct rte_avp_mempool_info {
+ void *addr;
+ phys_addr_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * Struct used to create a AVP device. Passed to the kernel in IOCTL call or
+ * via inter-VM shared memory when used in a guest.
+ */
+struct rte_avp_device_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+
+ char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
+
+ phys_addr_t tx_phys;
+ phys_addr_t rx_phys;
+ phys_addr_t alloc_phys;
+ phys_addr_t free_phys;
+
+ uint32_t features; /**< Supported feature bitmap */
+ uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
+ uint8_t num_rx_queues; /**< Recommended number of receive/free queues */
+ uint8_t max_rx_queues; /**< Maximum supported receive/free queues */
+ uint8_t min_tx_queues; /**< Minimum supported transmit/alloc queues */
+ uint8_t num_tx_queues;
+ /**< Recommended number of transmit/alloc queues */
+ uint8_t max_tx_queues; /**< Maximum supported transmit/alloc queues */
+
+ uint32_t tx_size; /**< Size of each transmit queue */
+ uint32_t rx_size; /**< Size of each receive queue */
+ uint32_t alloc_size; /**< Size of each alloc queue */
+ uint32_t free_size; /**< Size of each free queue */
+
+ /* Used by Ethtool */
+ phys_addr_t req_phys;
+ phys_addr_t resp_phys;
+ phys_addr_t sync_phys;
+ void *sync_va;
+
+ /* mbuf mempool (used when a single memory area is supported) */
+ void *mbuf_va;
+ phys_addr_t mbuf_phys;
+
+ /* mbuf mempools */
+ struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
+
+#ifdef __KERNEL__
+ /* Ethernet info */
+ char ethaddr[ETH_ALEN];
+#else
+ char ethaddr[ETHER_ADDR_LEN];
+#endif
+
+ uint8_t mode; /**< device mode, i.e guest, host, trace */
+
+ /* mbuf size */
+ unsigned int mbuf_size;
+
+ /*
+ * unique id to differentiate between two instantiations of the same
+ * AVP device (i.e., the guest needs to know if the device has been
+ * deleted and recreated).
+ */
+ uint64_t device_id;
+
+ uint32_t max_rx_pkt_len; /**< Maximum receive unit size */
+};
+
+#define RTE_AVP_MAX_QUEUES 8 /**< Maximum number of queues per device */
+
+/** Maximum number of chained mbufs in a packet */
+#define RTE_AVP_MAX_MBUF_SEGMENTS 5
+
+#define RTE_AVP_DEVICE "avp"
+
+#define RTE_AVP_IOCTL_TEST _IOWR(0, 1, int)
+#define RTE_AVP_IOCTL_CREATE _IOWR(0, 2, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_RELEASE _IOWR(0, 3, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_QUERY _IOWR(0, 4, struct rte_avp_device_config)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_COMMON_H_ */
diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h
new file mode 100644
index 00000000..803eb80a
--- /dev/null
+++ b/drivers/net/avp/rte_avp_fifo.h
@@ -0,0 +1,169 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contact Information:
+ * Wind River Systems, Inc.
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_AVP_FIFO_H_
+#define _RTE_AVP_FIFO_H_
+
+#include "rte_avp_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+/* Write memory barrier for kernel compiles */
+#define AVP_WMB() smp_wmb()
+/* Read memory barrier for kernel compiles */
+#define AVP_RMB() smp_rmb()
+#else
+/* Write memory barrier for userspace compiles */
+#define AVP_WMB() rte_wmb()
+/* Read memory barrier for userspace compiles */
+#define AVP_RMB() rte_rmb()
+#endif
+
+#ifndef __KERNEL__
+#include <rte_debug.h>
+
+/**
+ * Initializes the avp fifo structure
+ */
+static inline void
+avp_fifo_init(struct rte_avp_fifo *fifo, unsigned int size)
+{
+ /* Ensure size is power of 2 */
+ if (size & (size - 1))
+ rte_panic("AVP fifo size must be power of 2\n");
+
+ fifo->write = 0;
+ fifo->read = 0;
+ fifo->len = size;
+ fifo->elem_size = sizeof(void *);
+}
+#endif
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+avp_fifo_put(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int fifo_write = fifo->write;
+ unsigned int fifo_read = fifo->read;
+ unsigned int new_write = fifo_write;
+
+ for (i = 0; i < num; i++) {
+ new_write = (new_write + 1) & (fifo->len - 1);
+
+ if (new_write == fifo_read)
+ break;
+ fifo->buffer[fifo_write] = data[i];
+ fifo_write = new_write;
+ }
+ AVP_WMB();
+ fifo->write = fifo_write;
+ return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actually read
+ */
+static inline unsigned int
+avp_fifo_get(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int new_read = fifo->read;
+ unsigned int fifo_write = fifo->write;
+
+ if (new_read == fifo_write)
+ return 0; /* empty */
+
+ for (i = 0; i < num; i++) {
+ if (new_read == fifo_write)
+ break;
+
+ data[i] = fifo->buffer[new_read];
+ new_read = (new_read + 1) & (fifo->len - 1);
+ }
+ AVP_RMB();
+ fifo->read = new_read;
+ return i;
+}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_free_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_FIFO_H_ */
diff --git a/drivers/net/avp/rte_pmd_avp_version.map b/drivers/net/avp/rte_pmd_avp_version.map
new file mode 100644
index 00000000..af8f3f47
--- /dev/null
+++ b/drivers/net/avp/rte_pmd_avp_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index e971fb66..e1231069 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -29,8 +29,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_mempool lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 0d16a737..1a7e1c8e 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -31,7 +31,7 @@
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 1
+#define BNX2X_PMD_VERSION_REVISION 5
#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
@@ -2220,7 +2220,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
}
PMD_TX_LOG(DEBUG,
- "start bd: nbytes %d flags %x vlan %x\n",
+ "start bd: nbytes %d flags %x vlan %x",
tx_start_bd->nbytes,
tx_start_bd->bd_flags.as_bitfield,
tx_start_bd->vlan_or_ethertype);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 57093054..91c5aec2 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -18,6 +18,7 @@
#include <rte_byteorder.h>
#include <rte_spinlock.h>
+#include <rte_io.h>
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#ifndef __LITTLE_ENDIAN
@@ -1420,8 +1421,7 @@ bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
{
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
- *((volatile uint8_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
static inline void
@@ -1434,8 +1434,8 @@ bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
#endif
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
(unsigned long)offset, val);
- *((volatile uint16_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
+
}
static inline void
@@ -1449,8 +1449,7 @@ bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
- *((volatile uint32_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
static inline uint8_t
@@ -1458,8 +1457,7 @@ bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
{
uint8_t val;
- val = (uint8_t)(*((volatile uint8_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset);
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
@@ -1477,8 +1475,7 @@ bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
(unsigned long)offset);
#endif
- val = (uint16_t)(*((volatile uint16_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset));
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
@@ -1496,8 +1493,7 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
(unsigned long)offset);
#endif
- val = (uint32_t)(*((volatile uint32_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset));
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
@@ -1561,11 +1557,9 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
#define DPM_TRIGGER_TYPE 0x40
/* Doorbell macro */
-#define BNX2X_DB_WRITE(db_bar, val) \
- *((volatile uint32_t *)(db_bar)) = (val)
+#define BNX2X_DB_WRITE(db_bar, val) rte_write32_relaxed((val), (db_bar))
-#define BNX2X_DB_READ(db_bar) \
- *((volatile uint32_t *)(db_bar))
+#define BNX2X_DB_READ(db_bar) rte_read32_relaxed(db_bar)
#define DOORBELL_ADDR(sc, offset) \
(volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset)))
@@ -1983,7 +1977,7 @@ bnx2x_set_rx_mode(struct bnx2x_softc *sc)
static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
void *val, uint8_t size)
{
- if (rte_eal_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
+ if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't read from PCI config space");
return ENXIO;
}
@@ -1995,7 +1989,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
{
uint16_t val16 = val;
- if (rte_eal_pci_write_config(sc->pci_dev, &val16,
+ if (rte_pci_write_config(sc->pci_dev, &val16,
sizeof(val16), addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't write to PCI config space");
return ENXIO;
@@ -2007,7 +2001,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
{
uint32_t val32 = val;
- if (rte_eal_pci_write_config(sc->pci_dev, &val32,
+ if (rte_pci_write_config(sc->pci_dev, &val32,
sizeof(val32), addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't write to PCI config space");
return ENXIO;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index a8aebbe3..b79cfdb0 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -12,12 +12,13 @@
#include "bnx2x_rxtx.h"
#include <rte_dev.h>
+#include <rte_ethdev_pci.h>
/*
* The set of PCI devices this driver supports
*/
#define BROADCOM_PCI_VENDOR_ID 0x14E4
-static struct rte_pci_id pci_id_bnx2x_map[] = {
+static const struct rte_pci_id pci_id_bnx2x_map[] = {
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
@@ -33,7 +34,7 @@ static struct rte_pci_id pci_id_bnx2x_map[] = {
{ .vendor_id = 0, }
};
-static struct rte_pci_id pci_id_bnx2xvf_map[] = {
+static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
@@ -119,12 +120,13 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
}
static __rte_unused void
-bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+bnx2x_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
bnx2x_interrupt_action(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(&sc->pci_dev->intr_handle);
}
/*
@@ -187,10 +189,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
}
if (IS_PF(sc)) {
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(&sc->pci_dev->intr_handle,
bnx2x_interrupt_handler, (void *)dev);
- if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
+ if (rte_intr_enable(&sc->pci_dev->intr_handle))
PMD_DRV_LOG(ERR, "rte_intr_enable failed");
}
@@ -215,8 +217,8 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (IS_PF(sc)) {
- rte_intr_disable(&(dev->pci_dev->intr_handle));
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_disable(&sc->pci_dev->intr_handle);
+ rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
bnx2x_interrupt_handler, (void *)dev);
}
@@ -440,6 +442,7 @@ static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@@ -448,14 +451,17 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_inf
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
}
-static void
+static int
bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- if (sc->mac_ops.mac_addr_add)
+ if (sc->mac_ops.mac_addr_add) {
sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
+ return 0;
+ }
+ return -ENOTSUP;
}
static void
@@ -525,7 +531,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -625,32 +631,57 @@ eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
return bnx2x_common_dev_init(eth_dev, 1);
}
-static struct eth_driver rte_bnx2x_pmd = {
- .pci_drv = {
- .id_table = pci_id_bnx2x_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_bnx2x_dev_init,
- .dev_private_size = sizeof(struct bnx2x_softc),
+static struct rte_pci_driver rte_bnx2x_pmd;
+static struct rte_pci_driver rte_bnx2xvf_pmd;
+
+static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ if (pci_drv == &rte_bnx2x_pmd)
+ ret = eth_bnx2x_dev_init(eth_dev);
+ else if (pci_drv == &rte_bnx2xvf_pmd)
+ ret = eth_bnx2xvf_dev_init(eth_dev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_bnx2x_pmd = {
+ .id_table = pci_id_bnx2x_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
};
/*
* virtual function driver struct
*/
-static struct eth_driver rte_bnx2xvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_bnx2xvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_bnx2xvf_dev_init,
- .dev_private_size = sizeof(struct bnx2x_softc),
+static struct rte_pci_driver rte_bnx2xvf_pmd = {
+ .id_table = pci_id_bnx2xvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
-RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio");
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 170e48fb..5dd4aee7 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -19,7 +19,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
+ dev->device->driver->name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -273,6 +273,8 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_free_thresh = tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+ txq->tx_free_thresh = min(txq->tx_free_thresh,
+ txq->nb_tx_desc - BDS_PER_TX_PKT);
PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index dd251aaf..2e38ec26 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -11,7 +11,7 @@
#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_
-#define DEFAULT_TX_FREE_THRESH 512
+#define DEFAULT_TX_FREE_THRESH 64
#define RTE_PMD_BNX2X_TX_MAX_BURST 1
/**
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index e6fecd88..22f2dc95 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -2725,7 +2725,7 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
/* DEL command deletes all currently configured MACs */
case ECORE_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall-through */
/* RESTORE command will restore the entire multicast configuration */
case ECORE_MCAST_CMD_RESTORE:
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 53293962..9ffa7dc6 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -5898,6 +5898,7 @@ elink_status_t elink_set_led(struct elink_params *params,
*/
if (!vars->link_up)
break;
+ /* fall-through */
case ELINK_LED_MODE_ON:
if (((params->phy[ELINK_EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
@@ -11534,11 +11535,13 @@ static void elink_phy_def_cfg(struct elink_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = ELINK_SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = ELINK_SPEED_100;
break;
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 65aaa929..0fffe356 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -66,10 +66,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index f9f2adb4..83e53761 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -34,6 +34,8 @@
#ifndef _BNXT_CPR_H_
#define _BNXT_CPR_H_
+#include <rte_io.h>
+
#define CMP_VALID(cmp, raw_cons, ring) \
(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
!((raw_cons) & ((ring)->ring_size)))
@@ -50,13 +52,14 @@
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define B_CP_DB_REARM(cpr, raw_cons) \
- (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
- RING_CMP(cpr->cp_ring_struct, raw_cons)))
+ rte_write32((DB_CP_REARM_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
#define B_CP_DIS_DB(cpr, raw_cons) \
- rte_smp_wmb(); \
- (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
- RING_CMP(cpr->cp_ring_struct, raw_cons)))
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
struct bnxt_ring;
struct bnxt_cp_ring_info {
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 035fe07a..bb873615 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -36,6 +36,7 @@
#include <rte_dev.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
@@ -59,25 +60,44 @@ static const char bnxt_version[] =
#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
+#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
#define BROADCOM_DEV_ID_57302 0x16c9
#define BROADCOM_DEV_ID_57304_PF 0x16ca
#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
#define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
#define BROADCOM_DEV_ID_57402 0x16d0
#define BROADCOM_DEV_ID_57404 0x16d1
#define BROADCOM_DEV_ID_57406_PF 0x16d2
#define BROADCOM_DEV_ID_57406_VF 0x16d3
#define BROADCOM_DEV_ID_57402_MF 0x16d4
#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
#define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
#define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
#define BROADCOM_DEV_ID_57404_MF 0x16e7
#define BROADCOM_DEV_ID_57406_MF 0x16e8
#define BROADCOM_DEV_ID_57407_SFP 0x16e9
#define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
-static struct rte_pci_id bnxt_pci_id_map[] = {
+static const struct rte_pci_id bnxt_pci_id_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
@@ -95,6 +115,21 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -303,6 +338,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+
/* MAC Specifics */
dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
@@ -581,9 +618,9 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
}
}
-static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool)
+static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
@@ -591,30 +628,30 @@ static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
if (BNXT_VF(bp)) {
RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
- return;
+ return -ENOTSUP;
}
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
- return;
+ return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
RTE_LOG(ERR, PMD,
"MAC addr already existed for pool %d\n", pool);
- return;
+ return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
- return;
+ return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- bnxt_hwrm_set_filter(bp, vnic, filter);
+ return bnxt_hwrm_set_filter(bp, vnic, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -743,6 +780,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct rte_intr_handle *intr_handle
+ = &bp->pdev->intr_handle;
/* Retrieve from the default VNIC */
if (!vnic)
@@ -759,7 +798,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
/* EW - need to revisit here copying from u64 to u16 */
memcpy(reta_conf, vnic->rss_table, reta_size);
- if (rte_intr_allow_others(&eth_dev->pci_dev->intr_handle)) {
+ if (rte_intr_allow_others(intr_handle)) {
if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
bnxt_dev_lsc_intr_setup(eth_dev);
}
@@ -968,7 +1007,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
* Initialization
*/
-static struct eth_dev_ops bnxt_dev_ops = {
+static const struct eth_dev_ops bnxt_dev_ops = {
.dev_infos_get = bnxt_dev_info_get_op,
.dev_close = bnxt_dev_close_op,
.dev_configure = bnxt_dev_configure_op,
@@ -1009,11 +1048,12 @@ static bool bnxt_vf_pciid(uint16_t id)
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
- int rc;
struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ int rc;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
- if (!eth_dev->pci_dev->mem_resource[0].addr) {
+ if (!pci_dev->mem_resource[0].addr) {
RTE_LOG(ERR, PMD,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
@@ -1021,9 +1061,9 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
}
bp->eth_dev = eth_dev;
- bp->pdev = eth_dev->pci_dev;
+ bp->pdev = pci_dev;
- bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
@@ -1040,9 +1080,12 @@ init_err_disable:
return rc;
}
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
static int version_printed;
struct bnxt *bp;
int rc;
@@ -1050,10 +1093,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (version_printed++ == 0)
RTE_LOG(INFO, PMD, "%s", bnxt_version);
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
bp = eth_dev->data->dev_private;
- if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
+ if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
rc = bnxt_init_board(eth_dev);
@@ -1121,15 +1166,15 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(INFO, PMD,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
- eth_dev->pci_dev->mem_resource[0].phys_addr,
- eth_dev->pci_dev->mem_resource[0].addr);
+ pci_dev->mem_resource[0].phys_addr,
+ pci_dev->mem_resource[0].addr);
bp->dev_stopped = 0;
return 0;
error_free:
- eth_dev->driver->eth_dev_uninit(eth_dev);
+ bnxt_dev_uninit(eth_dev);
error:
return rc;
}
@@ -1158,18 +1203,26 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
return rc;
}
-static struct eth_driver bnxt_rte_pmd = {
- .pci_drv = {
- .id_table = bnxt_pci_id_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
- RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove
- },
- .eth_dev_init = bnxt_dev_init,
- .eth_dev_uninit = bnxt_dev_uninit,
- .dev_private_size = sizeof(struct bnxt),
+static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
+ bnxt_dev_init);
+}
+
+static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
+}
+
+static struct rte_pci_driver bnxt_rte_pmd = {
+ .id_table = bnxt_pci_id_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = bnxt_pci_probe,
+ .remove = bnxt_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 07e71241..3849d1a6 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -50,6 +50,8 @@
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
+#include <rte_io.h>
+
#define HWRM_CMD_TIMEOUT 2000
/*
@@ -72,19 +74,19 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
- *(volatile uint32_t *)bar = *data;
+ rte_write32(*data, bar);
data++;
}
/* Zero the rest of the request space */
for (; i < bp->max_req_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
- *(volatile uint32_t *)bar = 0;
+ rte_write32(0, bar);
}
/* Ring channel doorbell */
bar = (uint8_t *)bp->bar0 + 0x100;
- *(volatile uint32_t *)bar = 1;
+ rte_write32(1, bar);
/* Poll for the valid bit */
for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index e93585a0..20e17ff5 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -45,8 +45,7 @@
* Interrupts
*/
-static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
- void *param)
+static void bnxt_int_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 3f81ffcc..0fafa13f 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -209,6 +209,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*/
int bnxt_alloc_hwrm_rings(struct bnxt *bp)
{
+ struct rte_pci_device *pci_dev = bp->pdev;
unsigned int i;
int rc = 0;
@@ -222,8 +223,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
0, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
+ cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
}
@@ -242,8 +242,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
idx, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
@@ -255,8 +254,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
rxr->rx_prod = 0;
- rxr->rx_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -283,8 +281,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
@@ -296,8 +293,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- txr->tx_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
}
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 8bf8fee3..0d15bb1e 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -298,7 +298,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
nb_tx_pkts++;
else
- RTE_LOG(DEBUG, PMD,
+ RTE_LOG_DP(DEBUG, PMD,
"Unhandled CMP type %02x\n",
CMP_TYPE(txcmp));
raw_cons = NEXT_RAW_CMP(raw_cons);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 4c16101b..5b097114 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -34,12 +34,12 @@
#ifndef _BNXT_TXR_H_
#define _BNXT_TXR_H_
+#include <rte_io.h>
+
#define MAX_TX_RINGS 16
#define BNXT_TX_PUSH_THRESH 92
-#define B_TX_DB(db, prod) \
- rte_smp_wmb(); \
- (*(uint32_t *)db = (DB_KEY_TX | prod))
+#define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db)
struct bnxt_tx_ring_info {
uint16_t tx_prod;
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 504f2e8b..910c932d 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -58,13 +58,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c
SYMLINK-y-include += rte_eth_bond.h
SYMLINK-y-include += rte_eth_bond_8023ad.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_cmdline
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ring
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2f7ae70c..7b863d6e 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -888,8 +888,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
RTE_ASSERT(port->tx_ring == NULL);
socket_id = rte_eth_devices[slave_id].data->numa_node;
- element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
- + RTE_PKTMBUF_HEADROOM;
+ element_size = sizeof(struct slow_protocol_frame) +
+ RTE_PKTMBUF_HEADROOM;
/* The size of the mempool should be at least:
* the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
@@ -900,11 +900,10 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
}
snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
- port->mbuf_pool = rte_mempool_create(mem_name,
- total_tx_desc, element_size,
- RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
- NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD);
+ port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
+ RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
+ 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+ 0, element_size, socket_id);
/* Any memory allocation failure in initalization is critical because
* resources can't be free, so reinitialization is impossible. */
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 2a3893a1..36ec65d6 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -37,15 +37,13 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_tcp.h>
+#include <rte_vdev.h>
+#include <rte_kvargs.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_private.h"
#include "rte_eth_bond_8023ad_private.h"
-#define DEFAULT_POLLING_INTERVAL_10_MS (10)
-
-const char pmd_bond_driver_name[] = "rte_bond_pmd";
-
int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
{
@@ -54,7 +52,7 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
return -1;
/* return 0 if driver name matches */
- return eth_dev->data->drv_name != pmd_bond_driver_name;
+ return eth_dev->data->drv_name != pmd_bond_drv.driver.name;
}
int
@@ -164,172 +162,45 @@ number_of_sockets(void)
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
- struct bond_dev_private *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- uint32_t vlan_filter_bmp_size;
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
+ struct bond_dev_private *internals;
+ char devargs[52];
+ uint8_t port_id;
+ int ret;
if (name == NULL) {
RTE_BOND_LOG(ERR, "Invalid name specified");
- goto err;
- }
-
- if (socket_id >= number_of_sockets()) {
- RTE_BOND_LOG(ERR,
- "Invalid socket id specified to create bonded device on.");
- goto err;
- }
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
- if (internals == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
- goto err;
- }
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
- goto err;
- }
-
- eth_dev->data->dev_private = internals;
- eth_dev->data->nb_rx_queues = (uint16_t)1;
- eth_dev->data->nb_tx_queues = (uint16_t)1;
-
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
- socket_id);
- if (eth_dev->data->mac_addrs == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
- goto err;
+ return -EINVAL;
}
- eth_dev->data->dev_started = 0;
- eth_dev->data->promiscuous = 0;
- eth_dev->data->scattered_rx = 0;
- eth_dev->data->all_multicast = 0;
-
- eth_dev->dev_ops = &default_dev_ops;
- eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
- RTE_ETH_DEV_DETACHABLE;
- eth_dev->driver = NULL;
- eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = pmd_bond_driver_name;
- eth_dev->data->numa_node = socket_id;
-
- rte_spinlock_init(&internals->lock);
-
- internals->port_id = eth_dev->data->port_id;
- internals->mode = BONDING_MODE_INVALID;
- internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
- internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
- internals->xmit_hash = xmit_l2_hash;
- internals->user_defined_mac = 0;
- internals->link_props_set = 0;
-
- internals->link_status_polling_enabled = 0;
-
- internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
- internals->link_down_delay_ms = 0;
- internals->link_up_delay_ms = 0;
-
- internals->slave_count = 0;
- internals->active_slave_count = 0;
- internals->rx_offload_capa = 0;
- internals->tx_offload_capa = 0;
- internals->candidate_max_rx_pktlen = 0;
- internals->max_rx_pktlen = 0;
+ ret = snprintf(devargs, sizeof(devargs),
+ "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
+ if (ret < 0 || ret >= (int)sizeof(devargs))
+ return -ENOMEM;
- /* Initially allow to choose any offload type */
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ ret = rte_vdev_init(name, devargs);
+ if (ret)
+ return -ENOMEM;
- memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
- memset(internals->slaves, 0, sizeof(internals->slaves));
+ ret = rte_eth_dev_get_port_by_name(name, &port_id);
+ RTE_ASSERT(!ret);
- /* Set mode 4 default configuration */
- bond_mode_8023ad_setup(eth_dev, NULL);
- if (bond_ethdev_mode_set(eth_dev, mode)) {
- RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
- eth_dev->data->port_id, mode);
- goto err;
- }
-
- vlan_filter_bmp_size =
- rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
- internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
- RTE_CACHE_LINE_SIZE);
- if (internals->vlan_filter_bmpmem == NULL) {
- RTE_BOND_LOG(ERR,
- "Failed to allocate vlan bitmap for bonded device %u\n",
- eth_dev->data->port_id);
- goto err;
- }
-
- internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
- internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
- if (internals->vlan_filter_bmp == NULL) {
- RTE_BOND_LOG(ERR,
- "Failed to init vlan bitmap for bonded device %u\n",
- eth_dev->data->port_id);
- rte_free(internals->vlan_filter_bmpmem);
- goto err;
- }
-
- return eth_dev->data->port_id;
+ /*
+ * To make bond_ethdev_configure() happy we need to free the
+ * internals->kvlist here.
+ *
+ * Also see comment in bond_ethdev_configure().
+ */
+ internals = rte_eth_devices[port_id].data->dev_private;
+ rte_kvargs_free(internals->kvlist);
+ internals->kvlist = NULL;
-err:
- rte_free(internals);
- if (eth_dev != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- rte_eth_dev_release_port(eth_dev);
- }
- return -1;
+ return port_id;
}
int
rte_eth_bond_free(const char *name)
{
- struct rte_eth_dev *eth_dev = NULL;
- struct bond_dev_private *internals;
-
- /* now free all data allocation - for eth_dev structure,
- * dummy pci driver and internal (private) data
- */
-
- /* find an ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev == NULL)
- return -ENODEV;
-
- internals = eth_dev->data->dev_private;
- if (internals->slave_count != 0)
- return -EBUSY;
-
- if (eth_dev->data->dev_started == 1) {
- bond_ethdev_stop(eth_dev);
- bond_ethdev_close(eth_dev);
- }
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
- internals = eth_dev->data->dev_private;
- rte_bitmap_free(internals->vlan_filter_bmp);
- rte_free(internals->vlan_filter_bmpmem);
- rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data->mac_addrs);
-
- rte_eth_dev_release_port(eth_dev);
-
- return 0;
+ return rte_vdev_uninit(name);
}
static int
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index 02ecde64..e3bdad9d 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -47,22 +47,30 @@ const char *pmd_bond_init_valid_arguments[] = {
PMD_BOND_XMIT_POLICY_KVARG,
PMD_BOND_SOCKET_ID_KVARG,
PMD_BOND_MAC_ADDR_KVARG,
-
+ "driver",
NULL
};
static inline int
find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
{
+ struct rte_pci_device *pci_dev;
struct rte_pci_addr *eth_pci_addr;
unsigned i;
for (i = 0; i < rte_eth_dev_count(); i++) {
- if (rte_eth_devices[i].pci_dev == NULL)
+ /* Currently populated by rte_eth_copy_pci_info().
+ *
+ * TODO: Once the PCI bus has arrived we should have a better
+ * way to test for being a PCI device or not.
+ */
+ if (rte_eth_devices[i].data->kdrv == RTE_KDRV_UNKNOWN ||
+ rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
continue;
- eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr);
+ pci_dev = RTE_DEV_TO_PCI(rte_eth_devices[i].device);
+ eth_pci_addr = &pci_dev->addr;
if (pci_addr->bus == eth_pci_addr->bus &&
pci_addr->devid == eth_pci_addr->devid &&
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index a80b6fa9..82959abc 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_ip.h>
@@ -51,6 +52,7 @@
#include "rte_eth_bond_8023ad_private.h"
#define REORDER_PERIOD_MS 10
+#define DEFAULT_POLLING_INTERVAL_10_MS (10)
#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
@@ -145,7 +147,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint8_t slave_count, idx;
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
@@ -159,12 +161,18 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
+ idx = internals->active_slave;
+ if (idx >= slave_count) {
+ internals->active_slave = 0;
+ idx = 0;
+ }
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
+ collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ COLLECTING);
/* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
&bufs[num_rx_total], nb_pkts - num_rx_total);
for (k = j; k < 2 && k < num_rx_total; k++)
@@ -187,8 +195,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
- bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
- bufs[j]);
+ bond_mode_8023ad_handle_slow_pkt(
+ internals, slaves[idx], bufs[j]);
} else
rte_pktmbuf_free(bufs[j]);
@@ -201,8 +209,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
} else
j++;
}
+ if (unlikely(++idx == slave_count))
+ idx = 0;
}
+ internals->active_slave = idx;
return num_rx_total;
}
@@ -900,7 +911,6 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
num_tx_total += num_send;
- num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
}
return num_tx_total;
@@ -1009,7 +1019,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];
slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];
for (j = 0; j < slave_slow_nb_pkts[i]; j++)
@@ -1317,8 +1328,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
- uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
- uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
int errval;
uint16_t q_id;
@@ -1362,9 +1371,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* Setup Rx Queues */
- /* Use existing queues, if any */
- for (q_id = old_nb_rx_queues;
- q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
@@ -1380,9 +1387,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* Setup Tx Queues */
- /* Use existing queues, if any */
- for (q_id = old_nb_tx_queues;
- q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
@@ -1430,9 +1435,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* If lsc interrupt is set, check initial slave's link status */
- if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ }
return 0;
}
@@ -1454,6 +1461,9 @@ slave_remove(struct bond_dev_private *internals,
(internals->slave_count - i - 1));
internals->slave_count--;
+
+ /* force reconfiguration of slave interfaces */
+ _rte_eth_dev_reset(slave_eth_dev);
}
static void
@@ -1653,7 +1663,22 @@ void
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint8_t bond_port_id = internals->port_id;
+ int skipped = 0;
+
+ RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->data->name);
+ while (internals->slave_count != skipped) {
+ uint8_t port_id = internals->slaves[skipped].port_id;
+
+ rte_eth_dev_stop(port_id);
+ if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to remove port %d from bonded device "
+ "%s\n", port_id, dev->data->name);
+ skipped++;
+ }
+ }
bond_ethdev_free_queues(dev);
rte_bitmap_reset(internals->vlan_filter_bmp);
}
@@ -1668,14 +1693,14 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
- internals->candidate_max_rx_pktlen : 2048;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
+ ? internals->candidate_max_rx_pktlen
+ : ETHER_MAX_JUMBO_FRAME_LEN;
dev_info->max_rx_queues = (uint16_t)128;
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->rx_offload_capa = internals->rx_offload_capa;
dev_info->tx_offload_capa = internals->tx_offload_capa;
@@ -2235,16 +2260,133 @@ const struct eth_dev_ops default_dev_ops = {
};
static int
-bond_probe(const char *name, const char *params)
+bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
+{
+ const char *name = rte_vdev_device_name(dev);
+ uint8_t socket_id = dev->device.numa_node;
+ struct bond_dev_private *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ uint32_t vlan_filter_bmp_size;
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+
+ if (socket_id >= number_of_sockets()) {
+ RTE_BOND_LOG(ERR,
+ "Invalid socket id specified to create bonded device on.");
+ goto err;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
+ goto err;
+ }
+
+ internals = eth_dev->data->dev_private;
+ eth_dev->data->nb_rx_queues = (uint16_t)1;
+ eth_dev->data->nb_tx_queues = (uint16_t)1;
+
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
+ socket_id);
+ if (eth_dev->data->mac_addrs == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
+ goto err;
+ }
+
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_DETACHABLE;
+
+ rte_spinlock_init(&internals->lock);
+
+ internals->port_id = eth_dev->data->port_id;
+ internals->mode = BONDING_MODE_INVALID;
+ internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
+ internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ internals->xmit_hash = xmit_l2_hash;
+ internals->user_defined_mac = 0;
+ internals->link_props_set = 0;
+
+ internals->link_status_polling_enabled = 0;
+
+ internals->link_status_polling_interval_ms =
+ DEFAULT_POLLING_INTERVAL_10_MS;
+ internals->link_down_delay_ms = 0;
+ internals->link_up_delay_ms = 0;
+
+ internals->slave_count = 0;
+ internals->active_slave_count = 0;
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
+
+ /* Initially allow to choose any offload type */
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+
+ memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
+ memset(internals->slaves, 0, sizeof(internals->slaves));
+
+ /* Set mode 4 default configuration */
+ bond_mode_8023ad_setup(eth_dev, NULL);
+ if (bond_ethdev_mode_set(eth_dev, mode)) {
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
+ eth_dev->data->port_id, mode);
+ goto err;
+ }
+
+ vlan_filter_bmp_size =
+ rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
+ RTE_CACHE_LINE_SIZE);
+ if (internals->vlan_filter_bmpmem == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ goto err;
+ }
+
+ internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
+ if (internals->vlan_filter_bmp == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to init vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ rte_free(internals->vlan_filter_bmpmem);
+ goto err;
+ }
+
+ return eth_dev->data->port_id;
+
+err:
+ rte_free(internals);
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+ }
+ return -1;
+}
+
+static int
+bond_probe(struct rte_vdev_device *dev)
{
+ const char *name;
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
uint8_t bonding_mode, socket_id;
int arg_count, port_id;
+ if (!dev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
- kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
+ pmd_bond_init_valid_arguments);
if (kvlist == NULL)
return -1;
@@ -2281,8 +2423,10 @@ bond_probe(const char *name, const char *params)
socket_id = rte_socket_id();
}
+ dev->device.numa_node = socket_id;
+
/* Create link bonding eth device */
- port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
+ port_id = bond_alloc(dev, bonding_mode);
if (port_id < 0) {
RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
"socket %u.\n", name, bonding_mode, socket_id);
@@ -2302,21 +2446,51 @@ parse_error:
}
static int
-bond_remove(const char *name)
+bond_remove(struct rte_vdev_device *dev)
{
- int ret;
+ struct rte_eth_dev *eth_dev;
+ struct bond_dev_private *internals;
+ const char *name;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
- /* free link bonding eth device */
- ret = rte_eth_bond_free(name);
- if (ret < 0)
- RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
+ /* now free all data allocation - for eth_dev structure,
+ * dummy pci driver and internal (private) data
+ */
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ RTE_ASSERT(eth_dev->device == &dev->device);
- return ret;
+ internals = eth_dev->data->dev_private;
+ if (internals->slave_count != 0)
+ return -EBUSY;
+
+ if (eth_dev->data->dev_started == 1) {
+ bond_ethdev_stop(eth_dev);
+ bond_ethdev_close(eth_dev);
+ }
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ internals = eth_dev->data->dev_private;
+ rte_bitmap_free(internals->vlan_filter_bmp);
+ rte_free(internals->vlan_filter_bmpmem);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data->mac_addrs);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
}
/* this part will resolve the slave portids after all the other pdev and vdev
@@ -2566,12 +2740,12 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
return 0;
}
-static struct rte_vdev_driver bond_drv = {
+struct rte_vdev_driver pmd_bond_drv = {
.probe = bond_probe,
.remove = bond_remove,
};
-RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
+RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index d95d440b..c8db0900 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,7 @@
extern const char *pmd_bond_init_valid_arguments[];
-extern const char pmd_bond_driver_name[];
+extern struct rte_vdev_driver pmd_bond_drv;
/** Port Queue Mapping Structure */
struct bond_rx_queue {
@@ -144,6 +144,7 @@ struct bond_dev_private {
uint16_t nb_rx_queues; /**< Total number of rx queues */
uint16_t nb_tx_queues; /**< Total number of tx queues*/
+ uint8_t active_slave; /**< Next active_slave to poll */
uint8_t active_slave_count; /**< Number of active slaves */
uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index bfcc3159..7cef6279 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -81,9 +81,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 5e3bd509..26807900 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -37,6 +37,7 @@
#define __T4_ADAPTER_H__
#include <rte_mbuf.h>
+#include <rte_io.h>
#include "cxgbe_compat.h"
#include "t4_regs_values.h"
@@ -324,7 +325,7 @@ struct adapter {
int use_unpacked_mode; /* unpacked rx mode state */
};
-#define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
{
@@ -350,16 +351,21 @@ static inline uint32_t cxgbe_read_addr(volatile void *addr)
#define CXGBE_READ_REG64(adap, reg) \
cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
-#define CXGBE_PCI_REG_WRITE(reg, value) ({ \
- CXGBE_PCI_REG((reg)) = (value); })
+#define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+#define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((value), (reg))
#define CXGBE_WRITE_REG(adap, reg, value) \
CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+#define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
{
- CXGBE_PCI_REG(addr) = val;
- CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32);
+ CXGBE_PCI_REG_WRITE(addr, val);
+ CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
return val;
}
@@ -383,7 +389,7 @@ static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
}
/**
- * t4_write_reg - write a HW register
+ * t4_write_reg - write a HW register with barrier
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
@@ -398,6 +404,22 @@ static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
}
/**
+ * t4_write_reg_relaxed - write a HW register with no barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
+ u32 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
+ CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
+}
+
+/**
* t4_read_reg64 - read a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
@@ -456,7 +478,7 @@ static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
{
u32 val32 = val;
- if (rte_eal_pci_write_config(adapter->pdev, &val32, sizeof(val32),
+ if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
addr) < 0)
dev_err(adapter, "Can't write to PCI config space\n");
}
@@ -472,7 +494,7 @@ static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
u32 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
@@ -490,7 +512,7 @@ static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
{
u16 val16 = val;
- if (rte_eal_pci_write_config(adapter->pdev, &val16, sizeof(val16),
+ if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
addr) < 0)
dev_err(adapter, "Can't write to PCI config space\n");
}
@@ -506,7 +528,7 @@ static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
u16 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
@@ -522,7 +544,7 @@ static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
u8 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index c089b068..9dca8da1 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -359,6 +359,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
struct mbox_entry entry;
u32 pcie_fw = 0;
+ if (!temp)
+ return -ENOMEM;
+
if ((size & 15) || size > MBOX_LEN) {
free(temp);
return -EINVAL;
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index e68f8f59..1551cbf5 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -45,6 +45,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#define dev_printf(level, fmt, args...) \
RTE_LOG(level, PMD, "rte_cxgbe_pmd: " fmt, ## args)
@@ -254,7 +255,7 @@ static inline unsigned long ilog2(unsigned long n)
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
- *(volatile unsigned int *)addr = val;
+ rte_write32(val, addr);
}
static inline void writeq(u64 val, volatile void __iomem *addr)
@@ -263,4 +264,9 @@ static inline void writeq(u64 val, volatile void __iomem *addr)
writel(val >> 32, (void *)((uintptr_t)addr + 4));
}
+static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32_relaxed(val, addr);
+}
+
#endif /* _CXGBE_COMPAT_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index b7f28ebb..34fed84a 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -57,6 +57,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -68,7 +69,7 @@
* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct rte_pci_id cxgb4_pci_tbl[] = {
+ static const struct rte_pci_id cxgb4_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
#define PCI_VENDOR_ID_CHELSIO 0x1425
@@ -147,6 +148,8 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -1005,7 +1008,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1037,16 +1040,25 @@ out_free_adapter:
return err;
}
-static struct eth_driver rte_cxgbe_pmd = {
- .pci_drv = {
- .id_table = cxgb4_pci_tbl,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_cxgbe_dev_init,
- .dev_private_size = sizeof(struct port_info),
+static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct port_info), eth_cxgbe_dev_init);
+}
+
+static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbe_pmd = {
+ .id_table = cxgb4_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_cxgbe_pci_probe,
+ .remove = eth_cxgbe_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 345f9b03..1f230cd5 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -57,6 +57,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -1163,16 +1164,13 @@ int cxgbe_probe(struct adapter *adapter)
pi->eth_dev->data = data;
allocate_mac:
- pi->eth_dev->pci_dev = adapter->pdev;
+ pi->eth_dev->device = &adapter->pdev->device;
pi->eth_dev->data->dev_private = pi;
- pi->eth_dev->driver = adapter->eth_dev->driver;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
- rte_eth_copy_pci_info(pi->eth_dev, pi->eth_dev->pci_dev);
-
- TAILQ_INIT(&pi->eth_dev->link_intr_cbs);
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
ETHER_ADDR_LEN, 0);
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 736f08ce..2f9e12c9 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -338,12 +338,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- val | V_QID(q->cntxt_id));
+ t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ val | V_QID(q->cntxt_id));
} else {
- writel(val | V_QID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_KDOORBELL));
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
/*
* This Write memory Barrier will force the write to
@@ -890,15 +890,11 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits, hw_cidx = ntohs(q->stat->cidx);
- int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
+ int credits;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
- if (in_use < 0)
- in_use += q->size;
-
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
@@ -1645,7 +1641,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1698,7 +1694,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1894,7 +1890,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name, "tx_ring",
+ eth_dev->data->drv_name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
diff --git a/config/defconfig_tile-tilegx-linuxapp-gcc b/drivers/net/dpaa2/Makefile
index 5a50793d..ce65542a 100644
--- a/config/defconfig_tile-tilegx-linuxapp-gcc
+++ b/drivers/net/dpaa2/Makefile
@@ -1,6 +1,7 @@
# BSD LICENSE
#
-# Copyright (C) EZchip Semiconductor 2015.
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +13,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of EZchip Semiconductor nor the names of its
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -28,46 +29,42 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "common_linuxapp"
+include $(RTE_SDK)/mk/rte.vars.mk
-CONFIG_RTE_MACHINE="tilegx"
+#
+# library name
+#
+LIB = librte_pmd_dpaa2.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
-CONFIG_RTE_ARCH="tile"
-CONFIG_RTE_ARCH_TILE=y
-CONFIG_RTE_ARCH_64=y
-CONFIG_RTE_ARCH_STRICT_ALIGN=y
-CONFIG_RTE_FORCE_INTRINSICS=y
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-CONFIG_RTE_TOOLCHAIN="gcc"
-CONFIG_RTE_TOOLCHAIN_GCC=y
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_version.map
-CONFIG_RTE_MEMPOOL_ALIGN=128
+# library version
+LIBABIVER := 1
-CONFIG_RTE_LIBRTE_MPIPE_PMD=y
-CONFIG_RTE_LIBRTE_MPIPE_PMD_DEBUG=n
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
-# Disable things that we don't support or need
-CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
-CONFIG_RTE_EAL_IGB_UIO=n
-CONFIG_RTE_EAL_VFIO=n
-CONFIG_RTE_LIBRTE_KNI=n
-CONFIG_RTE_KNI_KMOD=n
-CONFIG_RTE_LIBRTE_XEN_DOM0=n
-CONFIG_RTE_LIBRTE_IGB_PMD=n
-CONFIG_RTE_LIBRTE_EM_PMD=n
-CONFIG_RTE_LIBRTE_IXGBE_PMD=n
-CONFIG_RTE_LIBRTE_I40E_PMD=n
-CONFIG_RTE_LIBRTE_FM10K_PMD=n
-CONFIG_RTE_LIBRTE_VIRTIO_PMD=n
-CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
-CONFIG_RTE_LIBRTE_ENIC_PMD=n
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
-# This following libraries are not available on the tile architecture.
-# So they're turned off.
-CONFIG_RTE_LIBRTE_LPM=n
-CONFIG_RTE_LIBRTE_ACL=n
-CONFIG_RTE_LIBRTE_SCHED=n
-CONFIG_RTE_LIBRTE_PORT=n
-CONFIG_RTE_LIBRTE_TABLE=n
-CONFIG_RTE_LIBRTE_PIPELINE=n
-CONFIG_RTE_LIBRTE_CXGBE_PMD=n
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
new file mode 100644
index 00000000..3dc60ccc
--- /dev/null
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -0,0 +1,344 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "../dpaa2_ethdev.h"
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint32_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg);
+
+int
+dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint32_t req_dist_set)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret, tc_index = 0;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+ " err code: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpaa2_remove_flow_dist(
+ struct rte_eth_dev *eth_dev,
+ uint8_t tc_index)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = 0;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
+
+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+ " err code: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint32_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg)
+{
+ uint32_t loop = 0, i = 0, dist_field = 0;
+ int l2_configured = 0, l3_configured = 0;
+ int l4_configured = 0, sctp_configured = 0;
+
+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ while (req_dist_set) {
+ if (req_dist_set % 2 != 0) {
+ dist_field = 1U << loop;
+ switch (dist_field) {
+ case ETH_RSS_L2_PAYLOAD:
+
+ if (l2_configured)
+ break;
+ l2_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_ETH;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_ETH_TYPE;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_IPV4:
+ case ETH_RSS_FRAG_IPV4:
+ case ETH_RSS_NONFRAG_IPV4_OTHER:
+ case ETH_RSS_IPV6:
+ case ETH_RSS_FRAG_IPV6:
+ case ETH_RSS_NONFRAG_IPV6_OTHER:
+ case ETH_RSS_IPV6_EX:
+
+ if (l3_configured)
+ break;
+ l3_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_PROTO;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ kg_cfg->num_extracts++;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_TCP:
+ case ETH_RSS_NONFRAG_IPV6_TCP:
+ case ETH_RSS_NONFRAG_IPV4_UDP:
+ case ETH_RSS_NONFRAG_IPV6_UDP:
+ case ETH_RSS_IPV6_TCP_EX:
+ case ETH_RSS_IPV6_UDP_EX:
+
+ if (l4_configured)
+ break;
+ l4_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_SCTP:
+ case ETH_RSS_NONFRAG_IPV6_SCTP:
+
+ if (sctp_configured)
+ break;
+ sctp_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ default:
+ PMD_DRV_LOG(WARNING, "Bad flow distribution"
+ " option %x\n", dist_field);
+ }
+ }
+ req_dist_set = req_dist_set >> 1;
+ loop++;
+ }
+ kg_cfg->num_extracts = i;
+}
+
+int
+dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ void *blist)
+{
+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list
+ * handle is passed in blist.
+ */
+ int32_t retcode;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_pools_cfg bpool_cfg;
+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
+ struct dpni_buffer_layout layout;
+ int tot_size;
+
+ /* ... rx buffer layout .
+ * Check alignment for buffer layouts first
+ */
+
+ /* ... rx buffer layout ... */
+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
+
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+
+ layout.data_head_room =
+ tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, &layout);
+ if (retcode) {
+ PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
+ retcode);
+ return retcode;
+ }
+
+ /*Attach buffer pool to the network interface as described by the user*/
+ bpool_cfg.num_dpbp = 1;
+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
+ bpool_cfg.pools[0].backup_pool = 0;
+ bpool_cfg.pools[0].buffer_size =
+ RTE_ALIGN_CEIL(bp_list->buf_pool.size,
+ 256 /*DPAA2_PACKET_LAYOUT_ALIGN*/);
+
+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
+ " bpid = %d Error code = %d\n",
+ bpool_cfg.pools[0].dpbp_id, retcode);
+ return retcode;
+ }
+
+ priv->bp_list = bp_list;
+ return 0;
+}
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
new file mode 100644
index 00000000..9324c6a3
--- /dev/null
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -0,0 +1,257 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * DPNI packet parse results - implementation internal
+ */
+
+#ifndef _DPAA2_HW_DPNI_ANNOT_H_
+#define _DPAA2_HW_DPNI_ANNOT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_PTA 0x00800000
+#define DPAA2_FD_CTRL_PTV1 0x00400000
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ uint8_t reserved;
+ uint8_t ppid;
+ __le16 ifpid;
+ __le32 status;
+} __packed;
+
+/**
+ * HW Packet Annotation Register structures
+ */
+struct dpaa2_annot_hdr {
+ /**< word1: Frame Annotation Status (8 bytes)*/
+ uint64_t word1;
+
+ /**< word2: Time Stamp (8 bytes)*/
+ uint64_t word2;
+
+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/
+ uint64_t word3;
+
+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */
+ uint64_t word4;
+
+ /**< word5:
+ * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset +
+ * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n +
+ * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word5;
+
+ /**< word6:
+ * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1
+ * + IPOffset_norMInEncapO + GREOffset + L4Offset +
+ * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word6;
+
+ /**< word7:
+ * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset
+ * + IPv6FragOffset + GrossRunningSum
+ * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes)
+ */
+ uint64_t word7;
+
+ /**< word8:
+ * ParseErrorcode + Soft Parsing Context (1 + 7 bytes)
+ */
+ uint64_t word8;
+};
+
+/**
+ * Internal Macros to get/set Packet annotation header
+ */
+
+/** General Macro to define a particular bit position*/
+#define BIT_POS(x) ((uint64_t)1 << ((x)))
+/** Set a bit in the variable */
+#define BIT_SET_AT_POS(var, pos) ((var) |= (pos))
+/** Reset the bit in the variable */
+#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos))
+/** Check the bit is set in the variable */
+#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0)
+/**
+ * Macrso to define bit position in word3
+ */
+#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000)
+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16)
+#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000)
+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000)
+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23)
+#define PARSING_ERROR BIT_POS(22)
+#define L2_ETH_MAC_PRESENT BIT_POS(21)
+#define L2_ETH_MAC_UNICAST BIT_POS(20)
+#define L2_ETH_MAC_MULTICAST BIT_POS(19)
+#define L2_ETH_MAC_BROADCAST BIT_POS(18)
+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17)
+#define L2_ETH_FCOE_PRESENT BIT_POS(16)
+#define L2_ETH_FIP_PRESENT BIT_POS(15)
+#define L2_ETH_PARSING_ERROR BIT_POS(14)
+#define L2_LLC_SNAP_PRESENT BIT_POS(13)
+#define L2_UNKNOWN_LLC_OUI BIT_POS(12)
+#define L2_LLC_SNAP_ERROR BIT_POS(11)
+#define L2_VLAN_1_PRESENT BIT_POS(10)
+#define L2_VLAN_N_PRESENT BIT_POS(9)
+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8)
+#define L2_VLAN_PARSING_ERROR BIT_POS(7)
+#define L2_PPPOE_PPP_PRESENT BIT_POS(6)
+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5)
+#define L2_MPLS_1_PRESENT BIT_POS(4)
+#define L2_MPLS_N_PRESENT BIT_POS(3)
+#define L2_MPLS_PARSING_ERROR BIT_POS(2)
+#define L2_ARP_PRESENT BIT_POS(1)
+#define L2_ARP_PARSING_ERROR BIT_POS(0)
+/**
+ * Macrso to define bit position in word4
+ */
+#define L2_UNKNOWN_PROTOCOL BIT_POS(63)
+#define L2_SOFT_PARSING_ERROR BIT_POS(62)
+#define L3_IPV4_1_PRESENT BIT_POS(61)
+#define L3_IPV4_1_UNICAST BIT_POS(60)
+#define L3_IPV4_1_MULTICAST BIT_POS(59)
+#define L3_IPV4_1_BROADCAST BIT_POS(58)
+#define L3_IPV4_N_PRESENT BIT_POS(57)
+#define L3_IPV4_N_UNICAST BIT_POS(56)
+#define L3_IPV4_N_MULTICAST BIT_POS(55)
+#define L3_IPV4_N_BROADCAST BIT_POS(54)
+#define L3_IPV6_1_PRESENT BIT_POS(53)
+#define L3_IPV6_1_UNICAST BIT_POS(52)
+#define L3_IPV6_1_MULTICAST BIT_POS(51)
+#define L3_IPV6_N_PRESENT BIT_POS(50)
+#define L3_IPV6_N_UNICAST BIT_POS(49)
+#define L3_IPV6_N_MULTICAST BIT_POS(48)
+#define L3_IP_1_OPT_PRESENT BIT_POS(47)
+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46)
+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45)
+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44)
+#define L3_IP_1_PARSING_ERROR BIT_POS(43)
+#define L3_IP_N_OPT_PRESENT BIT_POS(42)
+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41)
+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40)
+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39)
+#define L3_PROTO_ICMP_PRESENT BIT_POS(38)
+#define L3_PROTO_IGMP_PRESENT BIT_POS(37)
+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36)
+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35)
+#define L3_IP_N_PARSING_ERROR BIT_POS(34)
+#define L3_MIN_ENCAP_PRESENT BIT_POS(33)
+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32)
+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31)
+#define L3_PROTO_GRE_PRESENT BIT_POS(30)
+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29)
+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28)
+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27)
+#define L3_SOFT_PARSING_ERROR BIT_POS(26)
+#define L3_PROTO_UDP_PRESENT BIT_POS(25)
+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24)
+#define L3_PROTO_TCP_PRESENT BIT_POS(23)
+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22)
+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21)
+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20)
+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19)
+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18)
+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17)
+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16)
+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15)
+#define L3_PROTO_SCTP_PRESENT BIT_POS(14)
+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13)
+#define L3_PROTO_DCCP_PRESENT BIT_POS(12)
+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11)
+#define L4_UNKNOWN_PROTOCOL BIT_POS(10)
+#define L4_SOFT_PARSING_ERROR BIT_POS(9)
+#define L3_PROTO_GTP_PRESENT BIT_POS(8)
+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7)
+#define L3_PROTO_ESP_PRESENT BIT_POS(6)
+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5)
+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4)
+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3)
+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2)
+#define L5_SOFT_PARSING_ERROR BIT_POS(1)
+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
+
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_ETH_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_ETH_FAS_MS 0x40000000
+#define DPAA2_ETH_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_ETH_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_ETH_FAS_BC 0x02000000
+#define DPAA2_ETH_FAS_KSE 0x00040000
+#define DPAA2_ETH_FAS_EOFHE 0x00020000
+#define DPAA2_ETH_FAS_MNLE 0x00010000
+#define DPAA2_ETH_FAS_TIDE 0x00008000
+#define DPAA2_ETH_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_ETH_FAS_FLE 0x00002000
+/* Frame physical error; our favourite pastime */
+#define DPAA2_ETH_FAS_FPE 0x00001000
+#define DPAA2_ETH_FAS_PTE 0x00000080
+#define DPAA2_ETH_FAS_ISP 0x00000040
+#define DPAA2_ETH_FAS_PHE 0x00000020
+#define DPAA2_ETH_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_ETH_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_ETH_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_ETH_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_ETH_FAS_L4CE 0x00000001
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
new file mode 100644
index 00000000..45764420
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -0,0 +1,1035 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_fslmc.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_ethdev.h"
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd;
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->if_index = priv->hw_id;
+
+ dev_info->max_mac_addrs = priv->max_mac_filters;
+ dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
+ dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_10G;
+}
+
+static int
+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ uint16_t dist_idx;
+ uint32_t vq_id;
+ struct dpaa2_queue *mc_q, *mcq;
+ uint32_t tot_queues;
+ int i;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (!mc_q) {
+ PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
+ return -1;
+ }
+
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ mc_q->dev = dev;
+ priv->rx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_q->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa2_q->q_storage)
+ goto fail;
+
+ memset(dpaa2_q->q_storage, 0,
+ sizeof(struct queue_storage_info_t));
+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ goto fail;
+ }
+
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ mc_q->dev = dev;
+ mc_q->flow_id = DPNI_NEW_FLOW_ID;
+ priv->tx_vq[i] = mc_q++;
+ }
+
+ vq_id = 0;
+ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
+ dist_idx++) {
+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+ mcq->tc_index = DPAA2_DEF_TC;
+ mcq->flow_id = dist_idx;
+ vq_id++;
+ }
+
+ return 0;
+fail:
+ i -= 1;
+ mc_q = priv->rx_vq[0];
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_free_dq_storage(dpaa2_q->q_storage);
+ rte_free(dpaa2_q->q_storage);
+ priv->rx_vq[i--] = NULL;
+ }
+ rte_free(mc_q);
+ return -1;
+}
+
+static int
+dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Check for correct configuration */
+ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
+ data->nb_rx_queues > 1) {
+ PMD_INIT_LOG(ERR, "Distribution is not enabled, "
+ "but Rx queues more than 1\n");
+ return -1;
+ }
+
+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ /* Return in case number of Rx queues is 1 */
+ if (data->nb_rx_queues == 1)
+ return 0;
+ ret = dpaa2_setup_flow_dist(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "unable to set flow distribution."
+ "please check queue config\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* Function to setup RX flow information. It contains traffic class ID,
+ * flow ID, destination configuration etc.
+ */
+static int
+dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpni_queue cfg;
+ uint8_t options = 0;
+ uint8_t flow_id;
+ uint32_t bpid;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
+
+ if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ bpid = mempool_to_bpid(mb_pool);
+ ret = dpaa2_attach_bp_list(priv,
+ rte_dpaa2_bpid_info[bpid].bp_list);
+ if (ret)
+ return ret;
+ }
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+ dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
+
+ /*Get the tc id and flow id from given VQ id*/
+ flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+
+ options = options | DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (uint64_t)(dpaa2_q);
+
+ /*if ls2088 or rev2 device, enable the stashing */
+ if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
+ options |= DPNI_QUEUE_OPT_FLC;
+ cfg.flc.stash_control = true;
+ cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 00 00 00 - last 6 bit represent annotation, context stashing,
+ * data stashing setting 01 01 00 (0x14) to enable
+ * 1 line annotation, 1 line context
+ */
+ cfg.flc.value |= 0x14;
+ }
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
+ return -1;
+ }
+
+ dev->data->rx_queues[rx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static int
+dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
+ priv->tx_vq[tx_queue_id];
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_queue tx_conf_cfg;
+ struct dpni_queue tx_flow_cfg;
+ uint8_t options = 0, flow_id;
+ uint32_t tc_id;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if queue already configured */
+ if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
+ return 0;
+
+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+
+ if (priv->num_tc == 1) {
+ tc_id = 0;
+ flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
+ } else {
+ tc_id = tx_queue_id;
+ flow_id = 0;
+ }
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ tc_id, flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
+ "tc_id=%d, flow =%d ErrorCode = %x\n",
+ tc_id, flow_id, -ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
+ if (tx_queue_id == 0) {
+ /*Set tx-conf and error configuration*/
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_DISABLE);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
+ " ErrorCode = %x", ret);
+ return -1;
+ }
+ }
+ dpaa2_q->tc_index = tc_id;
+
+ dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static void
+dpaa2_dev_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+dpaa2_dev_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static const uint32_t *
+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == dpaa2_dev_rx)
+ return ptypes;
+ return NULL;
+}
+
+static int
+dpaa2_dev_start(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct dpaa2_dev_priv *priv = data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpni_queue cfg;
+ struct dpni_error_cfg err_cfg;
+ uint16_t qdid;
+ struct dpni_queue_id qid;
+ struct dpaa2_queue *dpaa2_q;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
+ ret, priv->hw_id);
+ return ret;
+ }
+
+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &qdid);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
+ return ret;
+ }
+ priv->qdid = qdid;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, dpaa2_q->tc_index,
+ dpaa2_q->flow_id, &cfg, &qid);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get flow "
+ "information Error code = %d\n", ret);
+ return ret;
+ }
+ dpaa2_q->fqid = qid.fqid;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L3_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L4_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L3_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L4_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ /*checksum errors, send them to normal path and set it in annotation */
+ err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
+
+ err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
+ err_cfg.set_frame_annotation = true;
+
+ ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
+ priv->token, &err_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
+ "code = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ */
+static void
+dpaa2_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+ struct rte_eth_link link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
+ ret, priv->hw_id);
+ return;
+ }
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ dpaa2_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+dpaa2_dev_close(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
+ " error code %d\n", ret);
+ return;
+ }
+}
+
+static void
+dpaa2_dev_promiscuous_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
+}
+
+static void
+dpaa2_dev_promiscuous_disable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
+}
+
+static int
+dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return -EINVAL;
+ }
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
+ return -EINVAL;
+
+ /* Set the Max Rx frame length as 'mtu' +
+ * Maximum Ethernet header length
+ */
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
+ mtu + ETH_VLAN_HLEN);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "setting the max frame length failed");
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
+ return 0;
+}
+
+static
+void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ uint8_t page0 = 0, page1 = 1, page2 = 2;
+ union dpni_statistics value;
+
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!dpni) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ if (!stats) {
+ RTE_LOG(ERR, PMD, "stats is NULL");
+ return;
+ }
+
+ /*Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page0, &value);
+ if (retcode)
+ goto err;
+
+ stats->ipackets = value.page_0.ingress_all_frames;
+ stats->ibytes = value.page_0.ingress_all_bytes;
+
+ /*Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, &value);
+ if (retcode)
+ goto err;
+
+ stats->opackets = value.page_1.egress_all_frames;
+ stats->obytes = value.page_1.egress_all_bytes;
+
+ /*Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page2, &value);
+ if (retcode)
+ goto err;
+
+ stats->ierrors = value.page_2.ingress_discarded_frames;
+ stats->oerrors = value.page_2.egress_discarded_frames;
+ stats->imissed = value.page_2.ingress_nobuffer_discards;
+
+ return;
+
+err:
+ RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ return;
+};
+
+static
+void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ if (retcode)
+ goto error;
+
+ return;
+
+error:
+ RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ return;
+};
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+dpaa2_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_link link, old;
+ struct dpni_link_state state = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "error : dpni is NULL");
+ return 0;
+ }
+ memset(&old, 0, sizeof(old));
+ dpaa2_dev_atomic_read_link_status(dev, &old);
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
+ return -1;
+ }
+
+ if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
+ RTE_LOG(DEBUG, PMD, "No change in status\n");
+ return -1;
+ }
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_status = state.up;
+ link.link_speed = state.rate;
+
+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ else
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ dpaa2_dev_atomic_write_link_status(dev, &link);
+
+ if (link.link_status)
+ PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
+ else
+ PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
+ return 0;
+}
+
+static struct eth_dev_ops dpaa2_ethdev_ops = {
+ .dev_configure = dpaa2_eth_dev_configure,
+ .dev_start = dpaa2_dev_start,
+ .dev_stop = dpaa2_dev_stop,
+ .dev_close = dpaa2_dev_close,
+ .promiscuous_enable = dpaa2_dev_promiscuous_enable,
+ .promiscuous_disable = dpaa2_dev_promiscuous_disable,
+ .link_update = dpaa2_dev_link_update,
+ .stats_get = dpaa2_dev_stats_get,
+ .stats_reset = dpaa2_dev_stats_reset,
+ .dev_infos_get = dpaa2_dev_info_get,
+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
+ .mtu_set = dpaa2_dev_mtu_set,
+ .rx_queue_setup = dpaa2_dev_rx_queue_setup,
+ .rx_queue_release = dpaa2_dev_rx_queue_release,
+ .tx_queue_setup = dpaa2_dev_tx_queue_setup,
+ .tx_queue_release = dpaa2_dev_tx_queue_release,
+};
+
+static int
+dpaa2_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct fsl_mc_io *dpni_dev;
+ struct dpni_attr attr;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct dpni_buffer_layout layout;
+ int i, ret, hw_id;
+ int tot_size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+
+ hw_id = dpaa2_dev->object_id;
+
+ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
+ if (!dpni_dev) {
+ PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
+ return -1;
+ }
+
+ dpni_dev->regs = rte_mcp_ptr_list[0];
+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ priv->num_tc = attr.num_tcs;
+ for (i = 0; i < attr.num_tcs; i++) {
+ priv->num_dist_per_tc[i] = attr.num_queues;
+ break;
+ }
+
+ /* Distribution is per Tc only,
+ * so choosing RX queues from default TC only
+ */
+ priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
+
+ if (attr.num_tcs == 1)
+ priv->nb_tx_queues = attr.num_queues;
+ else
+ priv->nb_tx_queues = attr.num_tcs;
+
+ PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
+
+ priv->hw = dpni_dev;
+ priv->hw_id = hw_id;
+ priv->options = attr.options;
+ priv->max_mac_filters = attr.mac_filter_entries;
+ priv->max_vlan_filters = attr.vlan_filter_entries;
+ priv->flags = 0;
+
+ /* Allocate memory for hardware structure for queues */
+ ret = dpaa2_alloc_rx_tx_queues(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
+ return -ret;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("dpni",
+ ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
+ return -ENOMEM;
+ }
+
+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
+ if (ret) {
+ PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
+ " Error Code = %d\n", ret);
+ return -ret;
+ }
+
+ /* ... rx buffer layout ... */
+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
+
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+
+ layout.pass_frame_status = 1;
+ layout.data_head_room = tot_size
+ - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
+ layout.pass_parser_result = 1;
+ PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
+ tot_size, layout.data_head_room, layout.private_data_size);
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
+ return -1;
+ }
+
+ /* ... tx buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
+ " layout", ret);
+ return -1;
+ }
+
+ /* ... tx-conf and error buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
+ " layout", ret);
+ return -1;
+ }
+
+ eth_dev->dev_ops = &dpaa2_ethdev_ops;
+ eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
+
+ eth_dev->rx_pkt_burst = dpaa2_dev_rx;
+ eth_dev->tx_pkt_burst = dpaa2_dev_tx;
+ rte_fslmc_vfio_dmamap();
+
+ return 0;
+}
+
+static int
+dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int i, ret;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (!dpni) {
+ PMD_INIT_LOG(WARNING, "Already closed or not started");
+ return -1;
+ }
+
+ dpaa2_dev_close(eth_dev);
+
+ if (priv->rx_vq[0]) {
+ /* cleaning up queue storage */
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ if (dpaa2_q->q_storage)
+ rte_free(dpaa2_q->q_storage);
+ }
+ /*free the all queue memory */
+ rte_free(priv->rx_vq[0]);
+ priv->rx_vq[0] = NULL;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ if (eth_dev->data->mac_addrs) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ /*Close the device at underlying layer*/
+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure closing dpni device with"
+ " error code %d\n", ret);
+ }
+
+ /*Free the allocated memory for ethernet private data and dpni*/
+ priv->hw = NULL;
+ free(dpni);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+
+ int diag;
+
+ sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
+
+ eth_dev = rte_eth_dev_allocate(ethdev_name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa2_dev_priv),
+ RTE_CACHE_LINE_SIZE);
+ if (eth_dev->data->dev_private == NULL) {
+ PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
+ " private port data\n");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+ }
+ eth_dev->device = &dpaa2_dev->device;
+ dpaa2_dev->eth_dev = eth_dev;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa2_dev_init(eth_dev);
+ if (diag == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = dpaa2_dev->eth_dev;
+ dpaa2_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd = {
+ .drv_type = DPAA2_MC_DPNI_DEVID,
+ .probe = rte_dpaa2_probe,
+ .remove = rte_dpaa2_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
new file mode 100644
index 00000000..7196398f
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_ETHDEV_H
+#define _DPAA2_ETHDEV_H
+
+#include <mc/fsl_dpni.h>
+#include <mc/fsl_mc_sys.h>
+
+#define DPAA2_MIN_RX_BUF_SIZE 512
+#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
+
+#define MAX_TCS DPNI_MAX_TC
+#define MAX_RX_QUEUES 16
+#define MAX_TX_QUEUES 16
+
+/*default tc to be used for ,congestion, distribution etc configuration. */
+#define DPAA2_DEF_TC 0
+
+/* Size of the input SMMU mapped memory required by MC */
+#define DIST_PARAM_IOVA_SIZE 256
+
+struct dpaa2_dev_priv {
+ void *hw;
+ int32_t hw_id;
+ int32_t qdid;
+ uint16_t token;
+ uint8_t nb_tx_queues;
+ uint8_t nb_rx_queues;
+ void *rx_vq[MAX_RX_QUEUES];
+ void *tx_vq[MAX_TX_QUEUES];
+
+ struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
+ uint32_t options;
+ uint16_t num_dist_per_tc[MAX_TCS];
+ uint8_t max_mac_filters;
+ uint8_t max_vlan_filters;
+ uint8_t num_tc;
+ uint8_t flags; /*dpaa2 config flags */
+};
+
+int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint32_t req_dist_set);
+
+int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
+ uint8_t tc_index);
+
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+
+uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+
+#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
new file mode 100644
index 00000000..c5d49cbe
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -0,0 +1,422 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "dpaa2_ethdev.h"
+#include "base/dpaa2_hw_dpni_annot.h"
+
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
+{
+ uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
+ struct dpaa2_annot_hdr *annotation =
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+
+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+ goto parse_done;
+ } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
+ L3_IPV4_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV4;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
+
+ } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
+ L3_IPV6_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV6;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
+ L3_IP_1_MORE_FRAGMENT |
+ L3_IP_N_FIRST_FRAGMENT |
+ L3_IP_N_MORE_FRAGMENT)) {
+ pkt_type |= RTE_PTYPE_L4_FRAG;
+ goto parse_done;
+ } else {
+ pkt_type |= RTE_PTYPE_L4_NONFRAG;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_UDP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_TCP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_SCTP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_ICMP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
+ pkt_type |= RTE_PTYPE_UNKNOWN;
+
+parse_done:
+ return pkt_type;
+}
+
+static inline void __attribute__((hot))
+dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
+{
+ struct dpaa2_annot_hdr *annotation =
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ if (BIT_ISSET_AT_POS(annotation->word3,
+ L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
+eth_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ /* need to repopulated some of the fields,
+ * as they may have changed in last transmission
+ */
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
+ mbuf->data_len = DPAA2_GET_FD_LEN(fd);
+ mbuf->pkt_len = mbuf->data_len;
+
+ /* Parse the packet */
+ /* parse results are after the private - sw annotation area */
+ mbuf->packet_type = dpaa2_dev_rx_parse(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE);
+
+ dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE, mbuf);
+
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+
+ PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+
+ return mbuf;
+}
+
+static void __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ /*Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+
+ PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+}
+
+
+static inline int __attribute__((hot))
+eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *m;
+ void *mb = NULL;
+
+ if (rte_dpaa2_mbuf_alloc_bulk(
+ rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
+ PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+ m = (struct rte_mbuf *)mb;
+ memcpy((char *)m->buf_addr + mbuf->data_off,
+ (void *)((char *)mbuf->buf_addr + mbuf->data_off),
+ mbuf->pkt_len);
+
+ /* Copy required fields */
+ m->data_off = mbuf->data_off;
+ m->ol_flags = mbuf->ol_flags;
+ m->packet_type = mbuf->packet_type;
+ m->tx_offload = mbuf->tx_offload;
+
+ /*Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+
+ PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
+ (void *)mbuf, mbuf->buf_addr);
+
+ PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ /*free the original packet */
+ rte_pktmbuf_free(mbuf);
+
+ return 0;
+}
+
+uint16_t
+dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ /* todo optimization - we can have dq_storage_phys available*/
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(ERR, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ struct rte_mbuf *mbuf;
+ /*Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the
+ * Ethernet Driver and the SEC driver.
+ */
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)
+ - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ /* Prefeth mbuf */
+ rte_prefetch0(mbuf);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd)
+ + DPAA2_FD_PTA_SIZE + 16));
+
+ bufs[num_rx] = eth_fd_to_mbuf(fd);
+ bufs[num_rx]->port = dev->data->port_id;
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_q->rx_pkts += num_rx;
+
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/*
+ * Callback to handle sending packets through WRIOP based interface
+ */
+uint16_t
+dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ uint16_t bpid;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
+
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
+ dpaa2_q->flow_id, dpaa2_q->tc_index);
+
+ /*Clear the unused FD fields before sending*/
+ while (nb_pkts) {
+ frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ fd_arr[loop].simple.frc = 0;
+ DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
+ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
+ mp = (*bufs)->pool;
+ /* Not a hw_pkt pool allocated frame */
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ PMD_TX_LOG(ERR, "non hw offload bufffer ");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ if (priv->bp_list) {
+ bpid = priv->bp_list->buf_pool.bpid;
+ } else {
+ PMD_TX_LOG(ERR, "errr: why no bpool"
+ " attached");
+ num_tx = 0;
+ goto skip_tx;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ bufs++;
+ continue;
+ }
+ } else {
+ bpid = mempool_to_bpid(mp);
+ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
+ }
+ bufs++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_send_multiple(swp, &eqdesc,
+ &fd_arr[loop], frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ dpaa2_q->tx_pkts += frames_to_send;
+ nb_pkts -= frames_to_send;
+ }
+skip_tx:
+ return num_tx;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
new file mode 100644
index 00000000..33306140
--- /dev/null
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -0,0 +1,739 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpni.h>
+#include <fsl_dpni_cmd.h>
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf)
+{
+ int i, j;
+ int offset = 0;
+ int param = 1;
+ uint64_t *params = (uint64_t *)key_cfg_buf;
+
+ if (!key_cfg_buf || !cfg)
+ return -EINVAL;
+
+ params[0] |= mc_enc(0, 8, cfg->num_extracts);
+ params[0] = cpu_to_le64(params[0]);
+
+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ params[param] |= mc_enc(0, 8,
+ cfg->extracts[i].extract.from_hdr.prot);
+ params[param] |= mc_enc(8, 4,
+ cfg->extracts[i].extract.from_hdr.type);
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.from_hdr.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_hdr.offset);
+ params[param] |= mc_enc(32, 32,
+ cfg->extracts[i].extract.
+ from_hdr.field);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ params[param] |= mc_enc(0, 8,
+ cfg->extracts[i].extract.
+ from_hdr.hdr_index);
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.
+ from_data.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_data.offset);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.
+ from_parse.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_parse.offset);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ params[param] |= mc_enc(
+ 24, 8, cfg->extracts[i].num_of_byte_masks);
+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ for (offset = 0, j = 0;
+ j < DPKG_NUM_OF_MASKS;
+ offset += 16, j++) {
+ params[param] |= mc_enc(
+ (offset), 8, cfg->extracts[i].masks[j].mask);
+ params[param] |= mc_enc(
+ (offset + 8), 8,
+ cfg->extracts[i].masks[j].offset);
+ }
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ }
+ return 0;
+}
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPNI_CMD_OPEN(cmd, dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPNI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_POOLS(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout);
+
+ return 0;
+}
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_OFFLOAD(cmd, type, config);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_OFFLOAD(cmd, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_OFFLOAD(cmd, *config);
+
+ return 0;
+}
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_QDID(cmd, qtype);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_QDID(cmd, *qdid);
+
+ return 0;
+}
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_LINK_STATE(cmd, state);
+
+ return 0;
+}
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
+
+ return 0;
+}
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
+
+ return 0;
+}
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ return 0;
+}
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPNI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_QUEUE(cmd, queue, qid);
+
+ return 0;
+}
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ union dpni_statistics *stat)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_STATISTICS(cmd, page);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_STATISTICS(cmd, stat);
+
+ return 0;
+}
+
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
new file mode 100644
index 00000000..3e0f4b0e
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -0,0 +1,184 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <fsl_net.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+/**
+ * Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ /**
+ * union extract - Selects extraction method
+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
+ union {
+ /**
+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @prot: Any of the supported headers
+ * @type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @field: One of the supported fields (NH_FLD_)
+ *
+ * @size: Size in bytes
+ * @offset: Byte offset
+ * @hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ */
+
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ uint32_t field;
+ uint8_t size;
+ uint8_t offset;
+ uint8_t hdr_index;
+ } from_hdr;
+ /**
+ * struct from_data
+ * Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_data;
+
+ /**
+ * struct from_parse
+ * Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_parse;
+ } extract;
+
+ uint8_t num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ uint8_t num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
new file mode 100644
index 00000000..ef14f858
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -0,0 +1,1217 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include <fsl_dpkg.h>
+
+struct fsl_mc_io;
+
+/**
+ * Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+/**
+ * Maximum number of storage-profiles per DPNI
+ */
+#define DPNI_MAX_SP 2
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (uint8_t)(-1)
+/**
+ * All flows within traffic class considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
+/**
+ * Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
+/**
+ * Tx traffic is always released to a buffer pool on transmit, there are no
+ * resources allocated to have the frames confirmed back to the source after
+ * transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * Disables support for MAC address filtering for addresses other than primary
+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
+ * is disabled, only traffic matching the primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
+ * traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * Congestion can be managed in several ways, allowing the buffer pool to
+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
+ * of queues. If set, it configures a single congestion groups across all TCs.
+ * If reset, a congestion group is allocated for each TC. Only relevant if the
+ * DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
+ * variants. Setting this bit on these SoCs will trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token);
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpni_cfg - Structure representing DPNI configuration
+ * @mac_addr: Primary MAC address
+ * @adv: Advanced parameters; default is all zeros;
+ * use this structure to change default settings
+ */
+struct dpni_cfg {
+ /**
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @fs_entries: Number of entries in the flow steering table.
+ * This table is used to select the ingress queue for
+ * ingress traffic, targeting a GPP core or another.
+ * In addition it can be used to discard traffic that
+ * matches the set rule. It is either an exact match table
+ * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING
+ * bit in OPTIONS field. This field is ignored if
+ * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise,
+ * value 0 defaults to 64. Maximum supported value is 1024.
+ * Note that the total number of entries is limited on the
+ * SoC to as low as 512 entries if TCAM is used.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table. This is an exact match table used to filter
+ * ingress traffic based on VLAN IDs. Value 0 disables VLAN
+ * filtering. Maximum supported value is 16.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table. This is an exact match table and allows both
+ * unicast and multicast entries. The primary MAC address
+ * of the network interface is not part of this table,
+ * this contains only entries in addition to it. This
+ * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in
+ * OPTIONS field. Otherwise, value 0 defaults to 80.
+ * Maximum supported value is 80.
+ * @num_queues: Number of Tx and Rx queues used for traffic
+ * distribution. This is orthogonal to QoS and is only
+ * used to distribute traffic to multiple GPP cores.
+ * This configuration affects the number of Tx queues
+ * (logical FQs, all associated with a single CEETM queue),
+ * Rx queues and Tx confirmation queues, if applicable.
+ * Value 0 defaults to one queue. Maximum supported value
+ * is 8.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * TCs can have different priority levels for the purpose
+ * of Tx scheduling (see DPNI_SET_TX_SELECTION), different
+ * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM
+ * queues for traffic classes (including class queues on
+ * Tx). Value 0 defaults to one TC. Maximum supported value
+ * is 8.
+ * @qos_entries: Number of entries in the QoS classification table. This
+ * table is used to select the TC for ingress traffic. It
+ * is either an exact match or a TCAM table, depending on
+ * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This
+ * field is ignored if the DPNI has a single TC. Otherwise,
+ * a value of 0 defaults to 64. Maximum supported value
+ * is 64.
+ */
+ uint32_t options;
+ uint16_t fs_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t mac_filter_entries;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t qos_entries;
+};
+
+/**
+ * dpni_create() - Create the DPNI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPNI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpni_destroy() - Destroy the DPNI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ */
+struct dpni_pools_cfg {
+ uint8_t num_dpbp;
+ /**
+ * struct pools - Buffer pools parameters
+ * @dpbp_id: DPBP object ID
+ * @buffer_size: Buffer size
+ * @backup_pool: Backup pool
+ */
+ struct {
+ int dpbp_id;
+ uint16_t buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg);
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result
+ * in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key
+ * will result in an error.
+ * @wriop_version: Version of WRIOP HW block.
+ * The 3 version values are stored on 6, 5, 5 bits
+ * respectively.
+ * Values returned:
+ * - 0x400 - WRIOP version 1.0.0, used on LS2080 and
+ * variants,
+ * - 0x421 - WRIOP version 1.1.1, used on LS2088 and
+ * variants,
+ * - 0x422 - WRIOP version 1.1.2, used on LS1088 and
+ * variants.
+ */
+struct dpni_attr {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t qos_entries;
+ uint16_t fs_entries;
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_version;
+};
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr);
+
+/**
+ * DPNI errors
+ */
+
+/**
+ * Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ * status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+ uint32_t errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg);
+
+/**
+ * DPNI buffer layout modification options
+ */
+
+/**
+ * Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * Select to modify the parser-result setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ uint32_t options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t data_head_room;
+ uint16_t data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to get the layout from
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to set layout on
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+};
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables
+ * the offload.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config);
+
+/**
+ * dpni_get_offload() - Get DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, a value of 1 indicates that the
+ * offload is enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config);
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to get QDID for. For applications lookig to
+ * transmit traffic this should be set to DPNI_QUEUE_TX
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid);
+
+#define DPNI_STATISTICS_CNT 7
+
+union dpni_statistics {
+ /**
+ * struct page_0 - Page_0 statistics structure
+ * @ingress_all_frames: Ingress frame count
+ * @ingress_all_bytes: Ingress byte count
+ * @ingress_multicast_frames: Ingress multicast frame count
+ * @ingress_multicast_bytes: Ingress multicast byte count
+ * @ingress_broadcast_frames: Ingress broadcast frame count
+ * @ingress_broadcast_bytes: Ingress broadcast byte count
+ */
+ struct {
+ uint64_t ingress_all_frames;
+ uint64_t ingress_all_bytes;
+ uint64_t ingress_multicast_frames;
+ uint64_t ingress_multicast_bytes;
+ uint64_t ingress_broadcast_frames;
+ uint64_t ingress_broadcast_bytes;
+ } page_0;
+ /**
+ * struct page_1 - Page_1 statistics structure
+ * @egress_all_frames: Egress frame count
+ * @egress_all_bytes: Egress byte count
+ * @egress_multicast_frames: Egress multicast frame count
+ * @egress_multicast_bytes: Egress multicast byte count
+ * @egress_broadcast_frames: Egress broadcast frame count
+ * @egress_broadcast_bytes: Egress broadcast byte count
+ */
+ struct {
+ uint64_t egress_all_frames;
+ uint64_t egress_all_bytes;
+ uint64_t egress_multicast_frames;
+ uint64_t egress_multicast_bytes;
+ uint64_t egress_broadcast_frames;
+ uint64_t egress_broadcast_bytes;
+ } page_1;
+ /**
+ * struct page_2 - Page_2 statistics structure
+ * @ingress_filtered_frames: Ingress filtered frame count
+ * @ingress_discarded_frames: Ingress discarded frame count
+ * @ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @egress_discarded_frames: Egress discarded frame count
+ * @egress_confirmed_frames: Egress confirmed frame count
+ */
+ struct {
+ uint64_t ingress_filtered_frames;
+ uint64_t ingress_discarded_frames;
+ uint64_t ingress_nobuffer_discards;
+ uint64_t egress_discarded_frames;
+ uint64_t egress_confirmed_frames;
+ } page_2;
+ /**
+ * struct raw - raw statistics structure, used to index counters
+ */
+ struct {
+ uint64_t counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ uint32_t rate;
+ uint64_t options;
+ int up;
+};
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state);
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length);
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length);
+
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not modified by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ uint16_t default_flow_id;
+};
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ uint16_t dist_size;
+ enum dpni_dist_mode dist_mode;
+ uint64_t key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+
+/**
+ * struct dpni_queue - Queue structure
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ */
+struct dpni_queue {
+ /**
+ * struct destination - Destination structure
+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ */
+ struct {
+ uint16_t id;
+ enum dpni_dest type;
+ char hold_active;
+ uint8_t priority;
+ } destination;
+ uint64_t user_context;
+ /**
+ * struct flc - FD FLow Context structure
+ * @value: FLC value to set
+ * @stash_control: Boolean, indicates whether the 6 lowest
+ * significant bits are used for stash control.
+ */
+ struct {
+ uint64_t value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this
+ * specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI.
+ * Only relevant for Tx queues.
+ */
+struct dpni_queue_id {
+ uint32_t fqid;
+ uint16_t qdbin;
+};
+
+/**
+ * enum dpni_confirmation_mode - Defines DPNI options supported for Tx
+ * confirmation
+ * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is
+ * an affine Tx Confirmation queue
+ * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx
+ * confirmation queue
+ * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated
+ * with proper FD set-up to have buffers release to a Buffer Pool, otherwise
+ * buffers will be leaked
+ */
+enum dpni_confirmation_mode {
+ DPNI_CONF_AFFINE,
+ DPNI_CONF_SINGLE,
+ DPNI_CONF_DISABLE,
+};
+
+/**
+ * dpni_set_tx_confirmation_mode() - Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
+ * selected at DPNI creation.
+ * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all
+ * transmit confirmation (including the private confirmation queues), regardless
+ * of previous settings; Note that in this case, Tx error frames are still
+ * enqueued to the general transmit errors queue.
+ * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
+ * Tx confirmations to a shared Tx conf queue. The ID of the queue when
+ * calling dpni_set/get_queue is -1.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode);
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+/**
+ * Set User Context
+ */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Set queue destination configuration
+ */
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Set FD[FLC] configuration for traffic on this queue. Note that FLC values
+ * set with dpni_add_fs_entry, if any, take precedence over values per queue.
+ */
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+
+/**
+ * Set the queue to hold active mode. This prevents the queue from being
+ * rescheduled between DPIOs while it carries traffic and is active on one
+ * DPNI. Can help reduce reordering when servicing one queue on multiple
+ * CPUs, but the queue is also less likely to push data to multiple CPUs
+ * especially when congested.
+ */
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set
+ * allocated for the same TC.Value must be in
+ * range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control
+ * what configuration options are set on the queue
+ * @queue: Queue configuration structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue);
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated
+ * for the same TC. Value must be in range 0 to
+ * NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * This function returns current queue configuration which can be changed by
+ * calling dpni_set_queue, and queue identification information.
+ * Returned qid.fqid and/or qid.qdbin values can be used to:
+ * - enqueue traffic for Tx queues,
+ * - perform volatile dequeue for Rx and, if applicable, Tx confirmation
+ * clean-up,
+ * - retrieve queue state.
+ *
+ * All these operations are supported through the DPIO run-time API.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output.
+ * Pages are numbered 0 to 2.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ union dpni_statistics *stat);
+
+/**
+ * dpni_reset_statistics() - Clears DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
new file mode 100644
index 00000000..bb92ea89
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -0,0 +1,334 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+
+/* Command IDs */
+#define DPNI_CMDID_OPEN ((0x801 << 4) | (0x1))
+#define DPNI_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPNI_CMDID_CREATE ((0x901 << 4) | (0x1))
+#define DPNI_CMDID_DESTROY ((0x981 << 4) | (0x1))
+#define DPNI_CMDID_GET_API_VERSION ((0xa01 << 4) | (0x1))
+
+#define DPNI_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPNI_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPNI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPNI_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPNI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_POOLS ((0x200 << 4) | (0x1))
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR ((0x20B << 4) | (0x1))
+
+#define DPNI_CMDID_GET_QDID ((0x210 << 4) | (0x1))
+#define DPNI_CMDID_GET_LINK_STATE ((0x215 << 4) | (0x1))
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH ((0x216 << 4) | (0x1))
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH ((0x217 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_UNICAST_PROMISC ((0x222 << 4) | (0x1))
+#define DPNI_CMDID_GET_UNICAST_PROMISC ((0x223 << 4) | (0x1))
+#define DPNI_CMDID_SET_PRIM_MAC ((0x224 << 4) | (0x1))
+#define DPNI_CMDID_GET_PRIM_MAC ((0x225 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_RX_TC_DIST ((0x235 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_STATISTICS ((0x25D << 4) | (0x1))
+#define DPNI_CMDID_RESET_STATISTICS ((0x25E << 4) | (0x1))
+#define DPNI_CMDID_GET_QUEUE ((0x25F << 4) | (0x1))
+#define DPNI_CMDID_SET_QUEUE ((0x260 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR ((0x263 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT ((0x264 << 4) | (0x1))
+#define DPNI_CMDID_SET_BUFFER_LAYOUT ((0x265 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_OFFLOAD ((0x26B << 4) | (0x1))
+#define DPNI_CMDID_SET_OFFLOAD ((0x26C << 4) | (0x1))
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE ((0x266 << 4) | (0x1))
+#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE ((0x26D << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_OPEN(cmd, dpni_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, (cfg)->options); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, (cfg)->num_queues); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, (cfg)->num_tcs); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, (cfg)->mac_filter_entries); \
+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, (cfg)->vlan_filter_entries); \
+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, (cfg)->qos_entries); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->fs_entries); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_POOLS(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* DPNI_CMD_GET_ATTR is not used, no input parameters */
+
+#define DPNI_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, (attr)->options); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, (attr)->num_queues); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, (attr)->num_tcs); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->mac_filter_entries); \
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, (attr)->vlan_filter_entries); \
+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, (attr)->qos_entries); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (attr)->fs_entries); \
+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, (attr)->qos_key_size); \
+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, (attr)->fs_key_size); \
+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, (attr)->wriop_version); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
+} while (0)
+
+#define DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype) \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
+
+#define DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_RSP_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
+ MC_RSP_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
+ MC_RSP_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
+} while (0)
+
+#define DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, (layout)->options); \
+ MC_CMD_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
+ MC_CMD_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
+ MC_CMD_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
+} while (0)
+
+#define DPNI_CMD_SET_OFFLOAD(cmd, type, config) \
+do { \
+ MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type); \
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, config); \
+} while (0)
+
+#define DPNI_CMD_GET_OFFLOAD(cmd, type) \
+ MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type)
+
+#define DPNI_RSP_GET_OFFLOAD(cmd, config) \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, config)
+
+#define DPNI_CMD_GET_QDID(cmd, qtype) \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_QDID(cmd, qdid) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_GET_STATISTICS(cmd, page) \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, page)
+
+#define DPNI_RSP_GET_STATISTICS(cmd, stat) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, (stat)->raw.counter[0]); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, (stat)->raw.counter[1]); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (stat)->raw.counter[2]); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (stat)->raw.counter[3]); \
+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, (stat)->raw.counter[4]); \
+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, (stat)->raw.counter[5]); \
+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, (stat)->raw.counter[6]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
+ cfg->fs_cfg.miss_action); \
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
+} while (0)
+
+#define DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
+} while (0)
+
+#define DPNI_RSP_GET_QUEUE(cmd, queue, queue_id) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
+ MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
+ MC_RSP_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
+ MC_RSP_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
+ MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (queue_id)->fqid); \
+ MC_RSP_OP(cmd, 4, 32, 16, uint16_t, (queue_id)->qdbin); \
+} while (0)
+
+#define DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, options); \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
+ MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
+ MC_CMD_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
+ MC_CMD_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+
+#define DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode) \
+ MC_CMD_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+
+#define DPNI_RSP_GET_TX_CONFIRMATION_MODE(cmd, mode) \
+ MC_RSP_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
new file mode 100644
index 00000000..ef7e4dac
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_net.h
@@ -0,0 +1,487 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_NET_H
+#define __FSL_NET_H
+
+#define LAST_HDR_INDEX 0xFFFFFFFF
+
+/*****************************************************************************/
+/* Protocol fields */
+/*****************************************************************************/
+
+/************************* Ethernet fields *********************************/
+#define NH_FLD_ETH_DA (1)
+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
+
+#define NH_FLD_ETH_ADDR_SIZE 6
+
+/*************************** VLAN fields ***********************************/
+#define NH_FLD_VLAN_VPRI (1)
+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/************************ IP (generic) fields ******************************/
+#define NH_FLD_IP_VER (1)
+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
+
+#define NH_FLD_IP_PROTO_SIZE 1
+
+/***************************** IPV4 fields *********************************/
+#define NH_FLD_IPV4_VER (1)
+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
+
+#define NH_FLD_IPV4_ADDR_SIZE 4
+#define NH_FLD_IPV4_PROTO_SIZE 1
+
+/***************************** IPV6 fields *********************************/
+#define NH_FLD_IPV6_VER (1)
+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
+
+#define NH_FLD_IPV6_ADDR_SIZE 16
+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
+
+/***************************** ICMP fields *********************************/
+#define NH_FLD_ICMP_TYPE (1)
+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
+
+#define NH_FLD_ICMP_CODE_SIZE 1
+#define NH_FLD_ICMP_TYPE_SIZE 1
+
+/***************************** IGMP fields *********************************/
+#define NH_FLD_IGMP_VERSION (1)
+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
+
+/***************************** TCP fields **********************************/
+#define NH_FLD_TCP_PORT_SRC (1)
+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
+
+#define NH_FLD_TCP_PORT_SIZE 2
+
+/***************************** UDP fields **********************************/
+#define NH_FLD_UDP_PORT_SRC (1)
+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_UDP_PORT_SIZE 2
+
+/*************************** UDP-lite fields *******************************/
+#define NH_FLD_UDP_LITE_PORT_SRC (1)
+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS \
+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
+
+#define NH_FLD_UDP_LITE_PORT_SIZE 2
+
+/*************************** UDP-encap-ESP fields **************************/
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
+
+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_PORT_SRC (1)
+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_SCTP_PORT_SIZE 2
+
+/***************************** DCCP fields *********************************/
+#define NH_FLD_DCCP_PORT_SRC (1)
+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
+
+#define NH_FLD_DCCP_PORT_SIZE 2
+
+/***************************** IPHC fields *********************************/
+#define NH_FLD_IPHC_CID (1)
+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
+
+/*************************** L2TPV2 fields *********************************/
+#define NH_FLD_L2TPV2_TYPE_BIT (1)
+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
+#define NH_FLD_L2TPV2_ALL_FIELDS \
+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
+
+/*************************** L2TPV3 fields *********************************/
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
+
+/**************************** PPP fields ***********************************/
+#define NH_FLD_PPP_PID (1)
+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
+
+/************************** PPPoE fields ***********************************/
+#define NH_FLD_PPPOE_VER (1)
+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
+
+/************************* PPP-Mux fields **********************************/
+#define NH_FLD_PPPMUX_PID (1)
+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
+
+/*********************** PPP-Mux sub-frame fields **************************/
+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
+
+/*************************** LLC fields ************************************/
+#define NH_FLD_LLC_DSAP (1)
+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
+
+/*************************** NLPID fields **********************************/
+#define NH_FLD_NLPID_NLPID (1)
+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
+
+/*************************** SNAP fields ***********************************/
+#define NH_FLD_SNAP_OUI (1)
+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
+
+/*************************** LLC SNAP fields *******************************/
+#define NH_FLD_LLC_SNAP_TYPE (1)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
+
+#define NH_FLD_ARP_HTYPE (1)
+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
+
+/*************************** RFC2684 fields ********************************/
+#define NH_FLD_RFC2684_LLC (1)
+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
+
+/*************************** User defined fields ***************************/
+#define NH_FLD_USER_DEFINED_SRCPORT (1)
+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
+
+/*************************** Payload fields ********************************/
+#define NH_FLD_PAYLOAD_BUFFER (1)
+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
+
+/*************************** GRE fields ************************************/
+#define NH_FLD_GRE_TYPE (1)
+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
+/*************************** MINENCAP fields *******************************/
+#define NH_FLD_MINENCAP_SRC_IP (1)
+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
+#define NH_FLD_MINENCAP_ALL_FIELDS \
+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
+/*************************** IPSEC AH fields *******************************/
+#define NH_FLD_IPSEC_AH_SPI (1)
+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
+/*************************** IPSEC ESP fields ******************************/
+#define NH_FLD_IPSEC_ESP_SPI (1)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
+/*************************** MPLS fields ***********************************/
+#define NH_FLD_MPLS_LABEL_STACK (1)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
+/*************************** MACSEC fields *********************************/
+#define NH_FLD_MACSEC_SECTAG (1)
+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
+/*************************** GTP fields ************************************/
+#define NH_FLD_GTP_TEID (1)
+
+/* Protocol options */
+
+/* Ethernet options */
+#define NH_OPT_ETH_BROADCAST 1
+#define NH_OPT_ETH_MULTICAST 2
+#define NH_OPT_ETH_UNICAST 3
+#define NH_OPT_ETH_BPDU 4
+
+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
+/* also applicable for broadcast */
+
+/* VLAN options */
+#define NH_OPT_VLAN_CFI 1
+
+/* IPV4 options */
+#define NH_OPT_IPV4_UNICAST 1
+#define NH_OPT_IPV4_MULTICAST 2
+#define NH_OPT_IPV4_BROADCAST 3
+#define NH_OPT_IPV4_OPTION 4
+#define NH_OPT_IPV4_FRAG 5
+#define NH_OPT_IPV4_INITIAL_FRAG 6
+
+/* IPV6 options */
+#define NH_OPT_IPV6_UNICAST 1
+#define NH_OPT_IPV6_MULTICAST 2
+#define NH_OPT_IPV6_OPTION 3
+#define NH_OPT_IPV6_FRAG 4
+#define NH_OPT_IPV6_INITIAL_FRAG 5
+
+/* General IP options (may be used for any version) */
+#define NH_OPT_IP_FRAG 1
+#define NH_OPT_IP_INITIAL_FRAG 2
+#define NH_OPT_IP_OPTION 3
+
+/* Minenc. options */
+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
+/* GRE. options */
+#define NH_OPT_GRE_ROUTING_PRESENT 1
+
+/* TCP options */
+#define NH_OPT_TCP_OPTIONS 1
+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
+/* CAPWAP options */
+#define NH_OPT_CAPWAP_DTLS 1
+
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/*! IEEE8021.Q */
+#define NH_IEEE8021Q_ETYPE 0x8100
+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
+ (((uint32_t)(pcp & 0x07)) << 13) | \
+ (((uint32_t)(dei & 0x01)) << 12) | \
+ (((uint32_t)(vlan_id & 0xFFF))))
+
+#endif /* __FSL_NET_H */
diff --git a/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index 57a60f0f..b5592d6b 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -57,6 +57,9 @@ CFLAGS_BASE_DRIVER += -Wno-unused-variable
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-misleading-indentation
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
endif
@@ -96,9 +99,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/e1000/base/README b/drivers/net/e1000/base/README
index 8d48135a..de1ae4cf 100644
--- a/drivers/net/e1000/base/README
+++ b/drivers/net/e1000/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,30 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This directory contains source code of FreeBSD em & igb drivers of version
-cid-shared-code.2015.10.09 released by ND. The sub-directory of base/
+cid-shared-code.2016.11.22 released by ND. The sub-directory of base/
contains the original source package.
+This driver is valid for the product(s) listed below
+* Intel® Ethernet Controller 82540
+* Intel® Ethernet Controller 82545 Series
+* Intel® Ethernet Controller 82546 Series
+* Intel® Ethernet Controller 82571 Series
+* Intel® Ethernet Controller 82572 Series
+* Intel® Ethernet Controller 82573
+* Intel® Ethernet Controller 82574
+* Intel® Ethernet Controller 82583
+* Intel® Ethernet Controller I217 Series
+* Intel® Ethernet Controller I218 Series
+* Intel® Ethernet Controller I219 Series
+* Intel® Ethernet Controller 82576 Series
+* Intel® Ethernet Controller 82575 Series
+* Intel® Ethernet Controller 82580 Series
+* Intel® Ethernet Controller I350 Series
+* Intel® Ethernet Controller I210 Series
+* Intel® Ethernet Controller I211
+* Intel® Ethernet Controller I354 Series
+* Intel® Ethernet Controller DH89XXCC Series
+
Updating the driver
===================
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 723885d7..c6400bde 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -100,7 +100,6 @@ STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
u16 offset);
STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
-STATIC void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw);
STATIC void e1000_i2c_start(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index c4986841..4133cdd8 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -492,6 +492,7 @@ enum e1000_promisc_type {
void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
void e1000_rlpml_set_vf(struct e1000_hw *, u16);
s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
u16 e1000_rxpbs_adjust_82580(u32 data);
s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
diff --git a/drivers/net/e1000/base/e1000_api.c b/drivers/net/e1000/base/e1000_api.c
index bbfcae88..f7cf83b6 100644
--- a/drivers/net/e1000/base/e1000_api.c
+++ b/drivers/net/e1000/base/e1000_api.c
@@ -298,6 +298,23 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_I218_V3:
mac->type = e1000_pch_lpt;
break;
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ mac->type = e1000_pch_spt;
+ break;
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
+ mac->type = e1000_pch_cnp;
+ break;
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
@@ -448,6 +465,8 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
e1000_init_function_pointers_ich8lan(hw);
break;
case e1000_82575:
diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h
index 69aa1f23..dbc2bbbe 100644
--- a/drivers/net/e1000/base/e1000_defines.h
+++ b/drivers/net/e1000/base/e1000_defines.h
@@ -198,6 +198,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define E1000_RCTL_RDMTS_HEX 0x00010000
+#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
@@ -468,6 +469,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x3F00
+/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */
+#define MAX_RX_JUMBO_FRAME_SIZE 0x2600
#define E1000_TX_PTR_GAP 0x1F
/* Extended Configuration Control and Size */
@@ -751,6 +754,12 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+
#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
diff --git a/drivers/net/e1000/base/e1000_hw.h b/drivers/net/e1000/base/e1000_hw.h
index e4e4f764..d9de9fc1 100644
--- a/drivers/net/e1000/base/e1000_hw.h
+++ b/drivers/net/e1000/base/e1000_hw.h
@@ -136,6 +136,19 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7
+#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
+#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
+#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
+#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD
+#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE
+#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB
+#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
@@ -221,6 +234,8 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
+ e1000_pch_spt,
+ e1000_pch_cnp,
e1000_82575,
e1000_82576,
e1000_82580,
@@ -950,11 +965,15 @@ struct e1000_dev_spec_ich8lan {
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
+ bool disable_k1_off;
bool eee_disable;
u16 eee_lp_ability;
#ifdef ULP_SUPPORT
enum e1000_ulp_state ulp_state;
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+ bool ulp_capability_disabled;
+ bool during_suspend_flow;
+ bool during_dpg_exit;
+#endif /* ULP_SUPPORT */
u16 lat_enc;
u16 max_ltr_enc;
bool smbus_disable;
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 89d07e90..6dd046d2 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -94,10 +94,13 @@ STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
bool active);
STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
u16 *data);
STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
@@ -125,6 +128,14 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
u32 offset, u8 *data);
STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
u32 offset, u16 *data);
STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
@@ -233,7 +244,7 @@ STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(E1000_READ_REG(hw, E1000_FWSM) &
E1000_ICH_FWSM_FW_VALID)) {
@@ -277,7 +288,7 @@ STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
- usec_delay(10);
+ msec_delay(1);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
@@ -334,6 +345,8 @@ STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -481,6 +494,8 @@ STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -623,36 +638,57 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
+ u32 nvm_size;
DEBUGFUNC("e1000_init_nvm_params_ich8lan");
- /* Can't read flash registers if the register set isn't mapped. */
nvm->type = e1000_nvm_flash_sw;
- if (!hw->flash_address) {
- DEBUGOUT("ERROR: Flash registers not mapped\n");
- return -E1000_ERR_CONFIG;
- }
- gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+ if (hw->mac.type >= e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size =
+ (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ DEBUGOUT("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
- /* sector_X_addr is a "sector"-aligned address (4096 bytes)
- * Add 1 to sector_end_addr since this sector is included in
- * the overall size.
- */
- sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
- sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+ gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
- /* flash_base_addr is byte-aligned */
- nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
- /* find total size of the NVM, then cut in half since the total
- * size represents two separate NVM banks.
- */
- nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT);
- nvm->flash_bank_size /= 2;
- /* Adjust to word count */
- nvm->flash_bank_size /= sizeof(u16);
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
nvm->word_size = E1000_SHADOW_RAM_WORDS;
@@ -668,8 +704,13 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
nvm->ops.release = e1000_release_nvm_ich8lan;
- nvm->ops.read = e1000_read_nvm_ich8lan;
- nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ if (hw->mac.type >= e1000_pch_spt) {
+ nvm->ops.read = e1000_read_nvm_spt;
+ nvm->ops.update = e1000_update_nvm_checksum_spt;
+ } else {
+ nvm->ops.read = e1000_read_nvm_ich8lan;
+ nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ }
nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
nvm->ops.write = e1000_write_nvm_ich8lan;
@@ -758,6 +799,8 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch2lan;
/* fall-through */
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
/* multicast address update for pch2 */
mac->ops.update_mc_addr_list =
@@ -768,7 +811,13 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
/* save PCH revision_id */
e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
- hw->revision_id = (u8)(pci_cfg &= 0x000F);
+ /* SPT uses full byte for revision ID,
+ * as opposed to previous generations
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hw->revision_id = (u8)(pci_cfg &= 0x00FF);
+ else
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -786,7 +835,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if (mac->type == e1000_pch_lpt) {
+ if (mac->type >= e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
@@ -1015,8 +1064,9 @@ release:
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
- if (!link || ((status & E1000_STATUS_SPEED_100) &&
- (status & E1000_STATUS_FD)))
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
goto update_fextnvm6;
ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
@@ -1068,6 +1118,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
u32 mac_reg;
s32 ret_val = E1000_SUCCESS;
u16 phy_reg;
+ u16 oem_reg = 0;
if ((hw->mac.type < e1000_pch_lpt) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
@@ -1128,6 +1179,25 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &oem_reg);
+ if (ret_val)
+ goto release;
+
+ phy_reg = oem_reg;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+
+ if (ret_val)
+ goto release;
+ }
+
skip_smbus:
if (!to_sx) {
/* Change the 'Link Status Change' interrupt to trigger
@@ -1184,6 +1254,14 @@ skip_smbus:
E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
}
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
+ to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ oem_reg);
+ if (ret_val)
+ goto release;
+ }
+
release:
hw->phy.ops.release(hw);
out:
@@ -1240,10 +1318,10 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
}
- /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
while (E1000_READ_REG(hw, E1000_FWSM) &
E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 10) {
+ if (i++ == 30) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1343,6 +1421,8 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
I218_ULP_CONFIG1_RESET_TO_SMBUS |
I218_ULP_CONFIG1_WOL_HOST |
I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
+ I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
I218_ULP_CONFIG1_DISABLE_SMB_PERST);
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
@@ -1360,6 +1440,8 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
if (hw->mac.autoneg)
e1000_phy_setup_autoneg(hw);
+ else
+ e1000_setup_copper_link_generic(hw);
e1000_sw_lcd_config_ich8lan(hw);
@@ -1397,6 +1479,8 @@ out:
}
#endif /* ULP_SUPPORT */
+
+
/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
@@ -1456,8 +1540,7 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt)) && link) {
+ if ((hw->mac.type >= e1000_pch2lan) && link) {
u16 speed, duplex;
e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
@@ -1468,6 +1551,10 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
emi_val = 0;
+ } else if (hw->mac.type >= e1000_pch_spt &&
+ duplex == FULL_DUPLEX && speed != SPEED_1000) {
+ tipg_reg |= 0xC;
+ emi_val = 1;
} else {
/* Roll back the default values */
tipg_reg |= 0x08;
@@ -1486,10 +1573,78 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
emi_addr = I217_RX_CONFIG;
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u16 phy_reg;
+
+ hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
+ &phy_reg);
+ phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
+ if (speed == SPEED_100 || speed == SPEED_10)
+ phy_reg |= 0x3E8;
+ else
+ phy_reg |= 0xFA;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_PLL_CLOCK_GATE_REG,
+ phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
+ }
hw->phy.ops.release(hw);
if (ret_val)
return ret_val;
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ u16 data;
+ u16 ptr_gap;
+
+ if (speed == SPEED_1000) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(776, 20),
+ &data);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+
+ ptr_gap = (data & (0x3FF << 2)) >> 2;
+ if (ptr_gap < 0x18) {
+ data &= ~(0x3FF << 2);
+ data |= (0x18 << 2);
+ ret_val =
+ hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20), data);
+ }
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20),
+ 0xC023);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ }
+ }
}
/* I217 Packet Loss issue:
@@ -1497,7 +1652,7 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* on power up.
* Set the Beacon Duration for I217 to 8 usec
*/
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 mac_reg;
mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
@@ -1519,10 +1674,29 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
hw->dev_spec.ich8lan.eee_lp_ability = 0;
/* Configure K0s minimum time */
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
}
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+ if (hw->mac.type == e1000_pch_spt) {
+ /* FEXTNVM6 K1-off workaround - for SPT only */
+ u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ }
+
+ if (hw->dev_spec.ich8lan.disable_k1_off == true)
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+ }
+
if (!link)
return E1000_SUCCESS; /* No link detected */
@@ -1616,6 +1790,8 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
break;
default:
@@ -2081,6 +2257,8 @@ STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3204,6 +3382,41 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ bank1_offset = nvm->flash_bank_size;
+ act_offset = E1000_ICH_NVM_SIG_WORD;
+
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return E1000_SUCCESS;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
+ bank1_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return E1000_SUCCESS;
+ }
+
+ DEBUGOUT("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
case e1000_ich8lan:
case e1000_ich9lan:
eecd = E1000_READ_REG(hw, E1000_EECD);
@@ -3251,6 +3464,99 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
}
/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_nvm_spt");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = E1000_SUCCESS;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset+i].modified) ||
+ !(dev_spec->shadow_ram[offset+i+1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ else
+ data[i] = (u16) (dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i+1] =
+ dev_spec->shadow_ram[offset+i+1].value;
+ else
+ data[i+1] = (u16) (dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_read_nvm_ich8lan - Read word(s) from the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to read.
@@ -3337,7 +3643,11 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
@@ -3353,7 +3663,12 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
ret_val = E1000_SUCCESS;
} else {
s32 i;
@@ -3375,8 +3690,12 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
- hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
} else {
DEBUGOUT("Flash controller busy, cannot get access\n");
}
@@ -3401,10 +3720,17 @@ STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
DEBUGFUNC("e1000_flash_cycle_ich8lan");
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
@@ -3421,6 +3747,29 @@ STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
}
/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ DEBUGFUNC("e1000_read_flash_dword_ich8lan");
+
+ if (!data)
+ return -E1000_ERR_NVM;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
* e1000_read_flash_word_ich8lan - Read word from flash
* @hw: pointer to the HW structure
* @offset: offset to data location
@@ -3457,7 +3806,13 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 word = 0;
- ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
if (ret_val)
return ret_val;
@@ -3543,6 +3898,83 @@ STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type < e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ (u32)hsflctl.regval << 16);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == E1000_SUCCESS) {
+ *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
@@ -3581,6 +4013,175 @@ STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u32 dword = 0;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_spt");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usec_delay(100);
+
+ /* Write the data to the new bank. Offset in words*/
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ DEBUGOUT("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword*/
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword*/
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
* @hw: pointer to the HW structure
*
@@ -3757,6 +4358,8 @@ STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -3804,8 +4407,13 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
DEBUGFUNC("e1000_write_ich8_data");
- if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
- return -E1000_ERR_NVM;
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -3816,12 +4424,29 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
@@ -3859,6 +4484,94 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_write_flash_data32_ich8lan");
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
@@ -3878,7 +4591,42 @@ STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
}
+/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+ DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usec_delay(100);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
@@ -3988,12 +4736,22 @@ STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- hsflctl.regval =
- E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
- hsflctl.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
/* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
@@ -4426,7 +5184,7 @@ STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
reg = E1000_READ_REG(hw, E1000_PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
@@ -4858,7 +5616,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
- (device_id == E1000_DEV_ID_PCH_I218_V3)) {
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type >= e1000_pch_spt)) {
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
E1000_WRITE_REG(hw, E1000_FEXTNVM6,
diff --git a/drivers/net/e1000/base/e1000_ich8lan.h b/drivers/net/e1000/base/e1000_ich8lan.h
index 33e77fb8..bc4ed1dd 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.h
+++ b/drivers/net/e1000/base/e1000_ich8lan.h
@@ -121,6 +121,18 @@ POSSIBILITY OF SUCH DAMAGE.
#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -198,6 +210,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+/* enable ULP even if when phy powered down via lanphypc */
+#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400
+/* disable clear of sticky ULP on PERST */
+#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800
#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
@@ -234,9 +250,12 @@ POSSIBILITY OF SUCH DAMAGE.
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
+#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/e1000/base/e1000_mbx.c b/drivers/net/e1000/base/e1000_mbx.c
index 6daf16b0..a92fd22e 100644
--- a/drivers/net/e1000/base/e1000_mbx.c
+++ b/drivers/net/e1000/base/e1000_mbx.c
@@ -430,15 +430,21 @@ STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw,
STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
+ int count = 10;
DEBUGFUNC("e1000_obtain_mbx_lock_vf");
- /* Take ownership of the buffer */
- E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
- /* reserve mailbox for vf use */
- if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
- ret_val = E1000_SUCCESS;
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
return ret_val;
}
@@ -645,18 +651,26 @@ STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
+ int count = 10;
DEBUGFUNC("e1000_obtain_mbx_lock_pf");
- /* Take ownership of the buffer */
- E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
+ E1000_P2VMAILBOX_PFU);
- /* reserve mailbox for vf use */
- p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
- ret_val = E1000_SUCCESS;
+ /* reserve mailbox for pf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
return ret_val;
+
}
/**
diff --git a/drivers/net/e1000/base/e1000_nvm.c b/drivers/net/e1000/base/e1000_nvm.c
index 762acd16..75c22827 100644
--- a/drivers/net/e1000/base/e1000_nvm.c
+++ b/drivers/net/e1000/base/e1000_nvm.c
@@ -1295,6 +1295,7 @@ void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
+ case e1000_i354:
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
/* Use this format, unless EETRACK ID exists,
* then use alternate format
diff --git a/drivers/net/e1000/base/e1000_osdep.h b/drivers/net/e1000/base/e1000_osdep.h
index 47a19481..b8868049 100644
--- a/drivers/net/e1000/base/e1000_osdep.h
+++ b/drivers/net/e1000/base/e1000_osdep.h
@@ -44,6 +44,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_byteorder.h>
+#include <rte_io.h>
#include "../e1000_logs.h"
@@ -94,17 +95,18 @@ typedef int bool;
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
-#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define E1000_PCI_REG(reg) rte_read32(reg)
-#define E1000_PCI_REG16(reg) (*((volatile uint16_t *)(reg)))
+#define E1000_PCI_REG16(reg) rte_read16(reg)
-#define E1000_PCI_REG_WRITE(reg, value) do { \
- E1000_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while (0)
+#define E1000_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
-#define E1000_PCI_REG_WRITE16(reg, value) do { \
- E1000_PCI_REG16((reg)) = (rte_cpu_to_le_16(value)); \
-} while (0)
+#define E1000_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define E1000_PCI_REG_WRITE16(reg, value) \
+ rte_write16((rte_cpu_to_le_16(value)), reg)
#define E1000_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
diff --git a/drivers/net/e1000/base/e1000_regs.h b/drivers/net/e1000/base/e1000_regs.h
index 84531a99..364a7261 100644
--- a/drivers/net/e1000/base/e1000_regs.h
+++ b/drivers/net/e1000/base/e1000_regs.h
@@ -66,6 +66,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
@@ -109,6 +111,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
@@ -591,6 +594,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
+#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
+#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
+#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c
index 7845b48e..44ab0188 100644
--- a/drivers/net/e1000/base/e1000_vf.c
+++ b/drivers/net/e1000/base/e1000_vf.c
@@ -421,12 +421,13 @@ void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+
if (mc_addr_count > 30) {
msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
mc_addr_count = 30;
}
- msgbuf[0] = E1000_VF_SET_MULTICAST;
msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
for (i = 0; i < mc_addr_count; i++) {
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 6c25c8da..8352d0a7 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -138,6 +138,11 @@
#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IGB_TX_MAX_SEG UINT8_MAX
+#define IGB_TX_MAX_MTU_SEG UINT8_MAX
+#define EM_TX_MAX_SEG UINT8_MAX
+#define EM_TX_MAX_MTU_SEG UINT8_MAX
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -286,6 +291,8 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
+#define E1000_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/*
* RX/TX IGB function prototypes
*/
@@ -304,10 +311,15 @@ uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
int eth_igb_rx_init(struct rte_eth_dev *dev);
void eth_igb_tx_init(struct rte_eth_dev *dev);
@@ -315,6 +327,9 @@ void eth_igb_tx_init(struct rte_eth_dev *dev);
uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -365,6 +380,9 @@ uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -376,6 +394,9 @@ void eth_em_tx_init(struct rte_eth_dev *dev);
uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index aee3d340..57eb017c 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
@@ -83,9 +84,9 @@ static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_em_interrupt_action(struct rte_eth_dev *dev);
-static void eth_em_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_em_interrupt_handler(void *param);
static int em_hw_init(struct e1000_hw *hw);
static int em_hardware_init(struct e1000_hw *hw);
@@ -119,8 +120,8 @@ static int eth_em_led_on(struct rte_eth_dev *dev);
static int eth_em_led_off(struct rte_eth_dev *dev);
static int em_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
@@ -168,6 +169,19 @@ static const struct rte_pci_id pci_id_em_map[] = {
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -191,6 +205,8 @@ static const struct eth_dev_ops eth_em_ops = {
.rx_queue_release = eth_em_rx_queue_release,
.rx_queue_count = eth_em_rx_queue_count,
.rx_descriptor_done = eth_em_rx_descriptor_done,
+ .rx_descriptor_status = eth_em_rx_descriptor_status,
+ .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
@@ -278,6 +294,19 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_I218_LM2:
case E1000_DEV_ID_PCH_I218_V3:
case E1000_DEV_ID_PCH_I218_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
return 1;
default:
return 0;
@@ -287,7 +316,8 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
@@ -295,11 +325,10 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
-
eth_dev->dev_ops = &eth_em_ops;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
+ eth_dev->tx_pkt_prepare = (eth_tx_prep_t)&eth_em_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -312,6 +341,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
@@ -351,8 +381,8 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&(pci_dev->intr_handle),
- eth_em_interrupt_handler, (void *)eth_dev);
+ rte_intr_callback_register(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
return 0;
}
@@ -360,17 +390,16 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
- pci_dev = eth_dev->pci_dev;
-
if (adapter->stopped == 0)
eth_em_close(eth_dev);
@@ -382,24 +411,30 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- eth_em_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
return 0;
}
-static struct eth_driver rte_em_pmd = {
- .pci_drv = {
- .id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_em_dev_init,
- .eth_dev_uninit = eth_em_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_em_dev_init);
+}
+
+static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit);
+}
+
+static struct rte_pci_driver rte_em_pmd = {
+ .id_table = pci_id_em_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_em_pci_probe,
+ .remove = eth_em_pci_remove,
};
static int
@@ -540,6 +575,8 @@ em_set_pba(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
pba = E1000_PBA_26K;
break;
default:
@@ -556,7 +593,9 @@ eth_em_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev =
+ E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
uint32_t *speeds;
@@ -609,7 +648,7 @@ eth_em_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
@@ -706,7 +745,7 @@ eth_em_start(struct rte_eth_dev *dev)
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplexn");
}
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
@@ -738,7 +777,8 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_disable(hw);
em_lsc_intr_disable(hw);
@@ -853,7 +893,9 @@ em_hardware_init(struct e1000_hw *hw)
hw->fc.low_water = 0x5048;
hw->fc.pause_time = 0x0650;
hw->fc.refresh_time = 0x0400;
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if (hw->mac.type == e1000_pch_lpt ||
+ hw->mac.type == e1000_pch_spt ||
+ hw->mac.type == e1000_pch_cnp) {
hw->fc.requested_mode = e1000_fc_full;
}
@@ -999,9 +1041,11 @@ static int
eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -1026,6 +1070,8 @@ em_get_max_pktlen(const struct e1000_hw *hw)
case e1000_ich10lan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
case e1000_82574:
case e1000_80003es2lan: /* 9K Jumbo Frame size */
case e1000_82583:
@@ -1045,9 +1091,20 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1079,6 +1136,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = EM_TXD_ALIGN,
+ .nb_seg_max = EM_TX_MAX_SEG,
+ .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
@@ -1536,8 +1595,10 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-eth_em_interrupt_action(struct rte_eth_dev *dev)
+eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
@@ -1550,7 +1611,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
return -1;
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/* set get_link_status to check register later */
hw->mac.get_link_status = 1;
@@ -1571,8 +1632,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
}
PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1604,13 +1665,12 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
* void
*/
static void
-eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_em_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_em_interrupt_get_status(dev);
- eth_em_interrupt_action(dev);
+ eth_em_interrupt_action(dev, dev->intr_handle);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -1734,13 +1794,13 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
return -EIO;
}
-static void
+static int
eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- e1000_rar_set(hw, mac_addr->addr_bytes, index);
+ return e1000_rar_set(hw, mac_addr->addr_bytes, index);
}
static void
@@ -1805,5 +1865,6 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
return 0;
}
-RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 41f51c0f..31819c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
@@ -77,6 +78,14 @@
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+#define E1000_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_VLAN_PKT)
+
+#define E1000_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -610,7 +619,7 @@ end_of_tx:
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -618,6 +627,43 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1390,11 +1436,6 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct em_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -1427,6 +1468,57 @@ eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->status & E1000_RXD_STAT_DD);
}
+int
+eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct em_rx_queue *rxq = rx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].status;
+ if (*status & E1000_RXD_STAT_DD)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct em_tx_queue *txq = tx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].upper.fields.status;
+ if (*status & E1000_TXD_STAT_DD)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
em_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 2fddf0cb..e1702d8b 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -45,6 +45,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
@@ -115,11 +116,19 @@ static void eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igb_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ uint64_t *values, unsigned int n);
static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- unsigned limit);
+ unsigned int size);
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit);
static void eth_igb_stats_reset(struct rte_eth_dev *dev);
static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
+static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
@@ -132,9 +141,9 @@ static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
-static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_igb_interrupt_handler(void *param);
static int igb_hardware_init(struct e1000_hw *hw);
static void igb_hw_control_acquire(struct e1000_hw *hw);
static void igb_hw_control_release(struct e1000_hw *hw);
@@ -162,9 +171,9 @@ static int eth_igb_led_off(struct rte_eth_dev *dev);
static void igb_intr_disable(struct e1000_hw *hw);
static int igb_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_igb_rar_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
@@ -280,8 +289,7 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset);
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
-static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void eth_igbvf_interrupt_handler(void *param);
static void igbvf_mbx_process(struct rte_eth_dev *dev);
/*
@@ -369,6 +377,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = IGB_RXD_ALIGN,
+ .nb_seg_max = IGB_TX_MAX_SEG,
+ .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
};
static const struct eth_dev_ops eth_igb_ops = {
@@ -385,9 +395,12 @@ static const struct eth_dev_ops eth_igb_ops = {
.link_update = eth_igb_link_update,
.stats_get = eth_igb_stats_get,
.xstats_get = eth_igb_xstats_get,
+ .xstats_get_by_id = eth_igb_xstats_get_by_id,
+ .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
.xstats_get_names = eth_igb_xstats_get_names,
.stats_reset = eth_igb_stats_reset,
.xstats_reset = eth_igb_xstats_reset,
+ .fw_version_get = eth_igb_fw_version_get,
.dev_infos_get = eth_igb_infos_get,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.mtu_set = eth_igb_mtu_set,
@@ -400,8 +413,11 @@ static const struct eth_dev_ops eth_igb_ops = {
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
+ .tx_done_cleanup = eth_igb_tx_done_cleanup,
.dev_led_on = eth_igb_led_on,
.dev_led_off = eth_igb_led_off,
.flow_ctrl_get = eth_igb_flow_ctrl_get,
@@ -668,15 +684,16 @@ igb_pf_reset_hw(struct e1000_hw *hw)
}
static void
-igb_identify_hardware(struct rte_eth_dev *dev)
+igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->vendor_id = dev->pci_dev->id.vendor_id;
- hw->device_id = dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
e1000_set_mac_type(hw);
@@ -743,7 +760,7 @@ static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
@@ -755,11 +772,10 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
uint32_t ctrl_ext;
- pci_dev = eth_dev->pci_dev;
-
eth_dev->dev_ops = &eth_igb_ops;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -771,10 +787,11 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
- igb_identify_hardware(eth_dev);
+ igb_identify_hardware(eth_dev, pci_dev);
if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
@@ -908,6 +925,7 @@ static int
eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct e1000_hw *hw;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
@@ -918,7 +936,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
+ pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
if (adapter->stopped == 0)
eth_igb_close(eth_dev);
@@ -937,9 +956,9 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
igb_pf_host_uninit(eth_dev);
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- eth_igb_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_igb_interrupt_handler, eth_dev);
return 0;
}
@@ -951,6 +970,7 @@ static int
eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
@@ -963,6 +983,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &igbvf_eth_dev_ops;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -973,9 +994,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
+ pci_dev = E1000_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1012,12 +1033,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
/* Generate a random MAC address, if none was assigned by PF. */
if (is_zero_ether_addr(perm_addr)) {
eth_random_addr(perm_addr->addr_bytes);
- diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
- if (diag) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- return diag;
- }
PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x",
@@ -1029,6 +1044,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
perm_addr->addr_bytes[5]);
}
+ diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
&eth_dev->data->mac_addrs[0]);
@@ -1038,9 +1059,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "igb_mac_82576_vf");
- rte_intr_callback_register(&pci_dev->intr_handle,
- eth_igbvf_interrupt_handler,
- (void *)eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ rte_intr_callback_register(intr_handle,
+ eth_igbvf_interrupt_handler, eth_dev);
return 0;
}
@@ -1050,7 +1071,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1076,32 +1097,46 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct eth_driver rte_igb_pmd = {
- .pci_drv = {
- .id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_igb_dev_init,
- .eth_dev_uninit = eth_igb_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igb_dev_init);
+}
+
+static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
+}
+
+static struct rte_pci_driver rte_igb_pmd = {
+ .id_table = pci_id_igb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_igb_pci_probe,
+ .remove = eth_igb_pci_remove,
};
+
+static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igbvf_dev_init);
+}
+
+static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_igbvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_igbvf_dev_init,
- .eth_dev_uninit = eth_igbvf_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static struct rte_pci_driver rte_igbvf_pmd = {
+ .id_table = pci_id_igbvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_igbvf_pci_probe,
+ .remove = eth_igbvf_pci_remove,
};
static void
@@ -1217,7 +1252,8 @@ eth_igb_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
uint32_t ctrl_ext;
@@ -1281,7 +1317,7 @@ eth_igb_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1388,7 +1424,7 @@ eth_igb_start(struct rte_eth_dev *dev)
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
@@ -1425,11 +1461,12 @@ eth_igb_stop(struct rte_eth_dev *dev)
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
struct rte_eth_link link;
struct e1000_flex_filter *p_flex;
struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
igb_intr_disable(hw);
@@ -1529,7 +1566,8 @@ eth_igb_close(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
eth_igb_stop(dev);
adapter->stopped = 1;
@@ -1549,10 +1587,9 @@ eth_igb_close(struct rte_eth_dev *dev)
igb_dev_free_queues(dev);
- pci_dev = dev->pci_dev;
- if (pci_dev->intr_handle.intr_vec) {
- rte_free(pci_dev->intr_handle.intr_vec);
- pci_dev->intr_handle.intr_vec = NULL;
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
}
memset(&link, 0, sizeof(link));
@@ -1816,7 +1853,7 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev)
static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned limit)
+ __rte_unused unsigned int size)
{
unsigned i;
@@ -1833,6 +1870,41 @@ static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return IGB_NB_XSTATS;
}
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (!ids) {
+ if (xstats_names == NULL)
+ return IGB_NB_XSTATS;
+
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+ }
+}
+
static int
eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
@@ -1863,6 +1935,53 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return IGB_NB_XSTATS;
}
+static int
+eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i;
+
+ if (!ids) {
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ if (n < IGB_NB_XSTATS)
+ return IGB_NB_XSTATS;
+
+ igb_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!values)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ values[i] = *(uint64_t *)(((char *)hw_stats) +
+ rte_igb_stats_strings[i].offset);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ uint64_t values_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_by_id(dev, NULL, values_copy,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+ }
+}
+
static void
igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
{
@@ -1976,11 +2095,64 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev)
offsetof(struct e1000_vf_stats, gprc));
}
+static int
+eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_fw_version fw;
+ int ret;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ ret = snprintf(fw_version, fw_size,
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+ /* if option rom is valid, display its version too */
+ if (fw.or_valid) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id);
+ } else {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor,
+ fw.eep_build);
+ }
+ }
+ break;
+ }
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2109,6 +2281,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2633,12 +2806,14 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-eth_igb_interrupt_action(struct rte_eth_dev *dev)
+eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2649,7 +2824,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
}
igb_intr_enable(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
@@ -2677,10 +2852,10 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
}
PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
if (link.link_status) {
@@ -2713,13 +2888,12 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
* void
*/
static void
-eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_igb_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igb_interrupt_get_status(dev);
- eth_igb_interrupt_action(dev);
+ eth_igb_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -2759,7 +2933,7 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
}
static int
-eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
+eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -2770,19 +2944,18 @@ eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
}
igbvf_intr_enable(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
static void
-eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_igbvf_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igbvf_interrupt_get_status(dev);
- eth_igbvf_interrupt_action(dev);
+ eth_igbvf_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -2906,7 +3079,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
#define E1000_RAH_POOLSEL_SHIFT (18)
-static void
+static int
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
@@ -2917,6 +3090,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
rah = E1000_READ_REG(hw, E1000_RAH(index));
rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
E1000_WRITE_REG(hw, E1000_RAH(index), rah);
+ return 0;
}
static void
@@ -3055,8 +3229,9 @@ igbvf_dev_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -3091,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -3110,7 +3285,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -3309,7 +3485,7 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
@@ -3350,7 +3526,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
@@ -3471,7 +3647,7 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -5095,6 +5271,8 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask = 1 << queue_id;
uint32_t regval;
@@ -5102,7 +5280,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
E1000_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5166,8 +5344,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
uint32_t vec = E1000_MISC_VEC_ID;
uint32_t base = E1000_MISC_VEC_ID;
uint32_t misc_shift = 0;
-
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
* between intr vector and event fd
@@ -5238,7 +5416,9 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
-RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
-RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio");
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 5845bc22..923c78a1 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -57,7 +57,9 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- return eth_dev->pci_dev->max_vfs;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
}
static inline
@@ -330,12 +332,16 @@ igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int rah;
if (is_unicast_ether_addr((struct ether_addr *)new_mac)) {
if (!is_zero_ether_addr((struct ether_addr *)new_mac))
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
sizeof(vfinfo[vf].vf_mac_addresses));
hw->mac.ops.rar_set(hw, new_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
return 0;
}
return -1;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index dbd37acc..b3b601b7 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,18 +65,28 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IGB_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+ PKT_TX_TCP_SEG | \
+ IGB_TX_IEEE1588_TMST)
+
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@@ -605,7 +615,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set the Transmit Descriptor Tail (TDT).
*/
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
@@ -616,6 +626,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1227,6 +1283,132 @@ eth_igb_tx_queue_release(void *txq)
igb_tx_queue_release(txq);
}
+static int
+igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+{
+ struct igb_tx_entry *sw_ring;
+ volatile union e1000_adv_tx_desc *txr;
+ uint16_t tx_first; /* First segment analyzed. */
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_last; /* Last segment in the current packet. */
+ uint16_t tx_next; /* First segment of the next packet. */
+ int count;
+
+ if (txq != NULL) {
+ count = 0;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+
+ /*
+ * tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
+
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
+
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
+
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /*
+ * Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /*
+ * Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /*
+ * Loop through all segments in a
+ * packet.
+ */
+ do {
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ } while (tx_id != tx_next);
+
+ if (unlikely(count == (int)free_cnt))
+ break;
+ } else
+ /*
+ * mbuf still in use, nothing left to
+ * free.
+ */
+ break;
+ } else {
+ /*
+ * There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely((tx_id == tx_first) && (count != 0)))
+ break;
+
+ /*
+ * Walk the list and find the next mbuf, if any.
+ */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ if (sw_ring[tx_id].mbuf)
+ break;
+
+ } while (tx_id != tx_first);
+
+ /*
+ * Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+ }
+ }
+ } else
+ count = -ENODEV;
+
+ return count;
+}
+
+int
+eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ return igb_tx_done_cleanup(txq, free_cnt);
+}
+
static void
igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
{
@@ -1364,6 +1546,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
igb_reset_tx_queue(txq, dev);
dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = &eth_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
return 0;
@@ -1512,11 +1695,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -1549,6 +1727,51 @@ eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
+int
+eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct igb_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index a0d3358d..bf1f5da0 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -51,11 +51,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c
SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net
-
CFLAGS += $(INCLUDES)
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index bd6f3c6b..38a05877 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -2242,7 +2242,6 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
int ret;
@@ -2269,7 +2268,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("memory address set failed\n");
return ret;
}
- cmd.control_buffer.length = sizeof(*hash_ctrl);
+ cmd.control_buffer.length =
+ sizeof(struct ena_admin_feature_rss_hash_control);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
@@ -2278,7 +2278,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret)) {
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- ret = ENA_COM_INVAL;
+ return ENA_COM_INVAL;
}
return 0;
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 87c3bf13..7eaebf40 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -44,6 +44,7 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
+#include <rte_io.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
@@ -224,18 +225,8 @@ typedef uint64_t dma_addr_t;
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
-static inline void writel(u32 value, volatile void *addr)
-{
- *(volatile u32 *)addr = value;
-}
-
-static inline u32 readl(const volatile void *addr)
-{
- return *(const volatile u32 *)addr;
-}
-
-#define ENA_REG_WRITE32(value, reg) writel((value), (reg))
-#define ENA_REG_READ32(reg) readl((reg))
+#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg))
+#define ENA_REG_READ32(reg) rte_read32_relaxed((reg))
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index c1fd7bb3..64fee05d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -33,12 +33,14 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_tcp.h>
#include <rte_atomic.h>
#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_version.h>
#include <rte_eal_memconfig.h>
+#include <rte_net.h>
#include "ena_ethdev.h"
#include "ena_logs.h"
@@ -168,7 +170,15 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
-static struct rte_pci_id pci_id_ena_map[] = {
+#define ENA_TX_OFFLOAD_MASK (\
+ PKT_TX_L4_MASK | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define ENA_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+
+static const struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
{ .device_id = 0 },
@@ -179,6 +189,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -216,7 +228,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static struct eth_dev_ops ena_dev_ops = {
+static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
.dev_infos_get = ena_infos_get,
.rx_queue_setup = ena_rx_queue_setup,
@@ -669,7 +681,7 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
ring->rx_buffer_info[ring->next_to_clean & ring_mask];
if (m)
- __rte_mbuf_raw_free(m);
+ rte_mbuf_raw_free(m);
ring->next_to_clean++;
}
@@ -730,7 +742,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
if (rc) {
PMD_INIT_LOG(ERR,
- "failed to restart queue %d type(%d)\n",
+ "failed to restart queue %d type(%d)",
i, ring_type);
return -1;
}
@@ -756,7 +768,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len);
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
return -1;
}
@@ -783,7 +795,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
queue_size = rte_align32pow2(queue_size >> 1);
if (queue_size == 0) {
- PMD_INIT_LOG(ERR, "Invalid queue size\n");
+ PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
@@ -908,7 +920,7 @@ static int ena_start(struct rte_eth_dev *dev)
static int ena_queue_restart(struct ena_ring *ring)
{
- int rc;
+ int rc, bufs_num;
ena_assert_msg(ring->configured == 1,
"Trying to restart unconfigured queue\n");
@@ -919,9 +931,10 @@ static int ena_queue_restart(struct ena_ring *ring)
if (ring->type == ENA_RING_TYPE_TX)
return 0;
- rc = ena_populate_rx_queue(ring, ring->ring_size);
- if ((unsigned int)rc != ring->ring_size) {
- PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+ bufs_num = ring->ring_size - 1;
+ rc = ena_populate_rx_queue(ring, bufs_num);
+ if (rc != bufs_num) {
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
return (-1);
}
@@ -1132,7 +1145,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
return 0;
in_use = rxq->next_to_use - rxq->next_to_clean;
- ena_assert_msg(((in_use + count) <= ring_size), "bad ring state");
+ ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
count = RTE_MIN(count,
(uint16_t)(ring_size - (next_to_use & ring_mask)));
@@ -1160,6 +1173,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
&ebuf, next_to_use_masked);
if (unlikely(rc)) {
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
+ count - i);
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
break;
}
@@ -1267,16 +1282,17 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ena_dev_ops;
eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
adapter->rte_eth_dev_data = eth_dev->data;
adapter->rte_dev = eth_dev;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
- PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
+ PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
pci_dev->addr.domain,
pci_dev->addr.bus,
pci_dev->addr.devid,
@@ -1293,7 +1309,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
else if (adapter->regs)
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
else
- PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+ PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
ena_dev->reg_bar = adapter->regs;
@@ -1307,7 +1323,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
/* device specific initialization routine */
rc = ena_device_init(ena_dev, &get_feat_ctx);
if (rc) {
- PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+ PMD_INIT_LOG(CRIT, "Failed to init ENA device");
return -1;
}
@@ -1315,7 +1331,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (get_feat_ctx.max_queues.max_llq_num == 0) {
PMD_INIT_LOG(ERR,
"Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.\n");
+ "Fall back into regular queues.");
ena_dev->tx_mem_queue_type =
ENA_ADMIN_PLACEMENT_POLICY_HOST;
adapter->num_queues =
@@ -1343,6 +1359,10 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
/* Set max MTU for this device */
adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
+ /* set device support for TSO */
+ adapter->tso4_supported = get_feat_ctx.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+
/* Copy MAC address and point DPDK to it */
eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
@@ -1369,7 +1389,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n",
+ PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
adapter->state);
return -1;
}
@@ -1431,6 +1451,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
@@ -1459,7 +1481,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
- if (feat.offload.tx &
+ if (feat.offload.rx_supported &
ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -1556,15 +1578,86 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
recv_idx++;
}
+ rx_ring->next_to_clean = next_to_clean;
+
+ desc_in_use = desc_in_use - completed + 1;
/* Burst refill to save doorbells, memory barriers, const interval */
if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
- rx_ring->next_to_clean = next_to_clean;
-
return recv_idx;
}
+static uint16_t
+eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ struct rte_mbuf *m;
+ struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+ struct ipv4_hdr *ip_hdr;
+ uint64_t ol_flags;
+ uint16_t frag_field;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (!(ol_flags & PKT_TX_IPV4))
+ continue;
+
+ /* If there was not L2 header length specified, assume it is
+ * length of the ethernet header.
+ */
+ if (unlikely(m->l2_len == 0))
+ m->l2_len = sizeof(struct ether_hdr);
+
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ m->l2_len);
+ frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+
+ if ((frag_field & IPV4_HDR_DF_FLAG) != 0) {
+ m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+
+ /* If IPv4 header has DF flag enabled and TSO support is
+ * disabled, partial chcecksum should not be calculated.
+ */
+ if (!tx_ring->adapter->tso4_supported)
+ continue;
+ }
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+
+ /* In case we are supposed to TSO and have DF not set (DF=0)
+ * hardware must be provided with partial checksum, otherwise
+ * it will take care of necessary calculations.
+ */
+
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1698,16 +1791,25 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return sent_idx;
}
-static struct eth_driver rte_ena_pmd = {
- .pci_drv = {
- .id_table = pci_id_ena_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ena_dev_init,
- .dev_private_size = sizeof(struct ena_adapter),
+static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ena_adapter), eth_ena_dev_init);
+}
+
+static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_ena_pmd = {
+ .id_table = pci_id_ena_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_ena_pci_probe,
+ .remove = eth_ena_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 4c7edbb9..dc3080ff 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -162,6 +162,7 @@ struct ena_adapter {
u16 num_queues;
u16 max_mtu;
+ u8 tso4_supported;
int id_number;
char name[ENA_NAME_MAX_LEN];
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 3926b795..2c7496dc 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -63,10 +63,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rss.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_hash
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a3d2a0fb..d17a35f4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -60,6 +60,7 @@
#define ENIC_RQ_MAX 16
#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_MAX_MAC_ADDR 64
#define VLAN_ETH_HLEN 18
@@ -278,8 +279,8 @@ extern void enic_dev_stats_get(struct enic *enic,
struct rte_eth_stats *r_stats);
extern void enic_dev_stats_clear(struct enic *enic);
extern void enic_add_packet_filter(struct enic *enic);
-extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-extern void enic_del_mac_address(struct enic *enic);
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+void enic_del_mac_address(struct enic *enic, int mac_index);
extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index 5dbd983b..fc58bb41 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -41,6 +41,7 @@
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_log.h>
+#include <rte_io.h>
#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
@@ -95,42 +96,52 @@ typedef unsigned long long dma_addr_t;
static inline uint32_t ioread32(volatile void *addr)
{
- return *(volatile uint32_t *)addr;
+ return rte_read32(addr);
}
static inline uint16_t ioread16(volatile void *addr)
{
- return *(volatile uint16_t *)addr;
+ return rte_read16(addr);
}
static inline uint8_t ioread8(volatile void *addr)
{
- return *(volatile uint8_t *)addr;
+ return rte_read8(addr);
}
static inline void iowrite32(uint32_t val, volatile void *addr)
{
- *(volatile uint32_t *)addr = val;
+ rte_write32(val, addr);
+}
+
+static inline void iowrite32_relaxed(uint32_t val, volatile void *addr)
+{
+ rte_write32_relaxed(val, addr);
}
static inline void iowrite16(uint16_t val, volatile void *addr)
{
- *(volatile uint16_t *)addr = val;
+ rte_write16(val, addr);
}
static inline void iowrite8(uint8_t val, volatile void *addr)
{
- *(volatile uint8_t *)addr = val;
+ rte_write8(val, addr);
}
static inline unsigned int readl(volatile void __iomem *addr)
{
- return *(volatile unsigned int *)addr;
+ return rte_read32(addr);
+}
+
+static inline unsigned int readl_relaxed(volatile void __iomem *addr)
+{
+ return rte_read32_relaxed(addr);
}
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
- *(volatile unsigned int *)addr = val;
+ rte_write32(val, addr);
}
#define min_t(type, x, y) ({ \
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 2b154ec2..372bae73 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,6 +38,7 @@
#include <rte_dev.h>
#include <rte_pci.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -272,11 +273,6 @@ static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t cq_idx;
int rq_num;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
cq = &enic->cq[enic_cq_rq(enic, rq_num)];
cq_idx = cq->to_clean;
@@ -459,12 +455,13 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
- device_info->max_mac_addrs = 1;
+ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
device_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -474,7 +471,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
@@ -535,22 +533,22 @@ static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
enic_add_packet_filter(enic);
}
-static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_set_mac_address(enic, mac_addr->addr_bytes);
+ return enic_set_mac_address(enic, mac_addr->addr_bytes);
}
-static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic);
+ enic_del_mac_address(enic, index);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -621,7 +619,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = eth_dev->pci_dev;
+ pdev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
@@ -632,16 +630,25 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
return enic_probe(enic);
}
-static struct eth_driver rte_enic_pmd = {
- .pci_drv = {
- .id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_enicpmd_dev_init,
- .dev_private_size = sizeof(struct enic),
+static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
+ eth_enicpmd_dev_init);
+}
+
+static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_enic_pmd = {
+ .id_table = pci_id_enic_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_enic_pci_probe,
+ .remove = eth_enic_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 1861a32c..d0262418 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -193,35 +193,28 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
}
-void enic_del_mac_address(struct enic *enic)
+void enic_del_mac_address(struct enic *enic, int mac_index)
{
- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+ if (vnic_dev_del_addr(enic->vdev, mac_addr))
dev_err(enic, "del mac addr failed\n");
}
-void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
{
int err;
if (!is_eth_addr_valid(mac_addr)) {
dev_err(enic, "invalid mac address\n");
- return;
- }
-
- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
- if (err) {
- dev_err(enic, "del mac addr failed\n");
- return;
+ return -EINVAL;
}
- ether_addr_copy((struct ether_addr *)mac_addr,
- (struct ether_addr *)enic->mac_addr);
-
err = vnic_dev_add_addr(enic->vdev, mac_addr);
- if (err) {
+ if (err)
dev_err(enic, "add mac addr failed\n");
- return;
- }
+ return err;
}
static void
@@ -429,8 +422,7 @@ int enic_link_update(struct enic *enic)
}
static void
-enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
- void *arg)
+enic_intr_handler(void *arg)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
@@ -1311,13 +1303,14 @@ static int enic_dev_init(struct enic *enic)
/* Get the supported filters */
enic_fdir_info(enic);
- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
+ * ENIC_MAX_MAC_ADDR, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
return -1;
}
ether_addr_copy((struct ether_addr *) enic->mac_addr,
- &eth_dev->data->mac_addrs[0]);
+ eth_dev->data->mac_addrs);
vnic_dev_set_reset_flag(enic->vdev, 0);
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 912ea157..ba0cfd01 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -37,6 +37,9 @@
#include "enic_compat.h"
#include "rq_enet_desc.h"
#include "enic.h"
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
#define RTE_PMD_USE_PREFETCH
@@ -129,6 +132,60 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
+/* Find the offset to L5. This is needed by enic TSO implementation.
+ * Return 0 if not a TCP packet or can't figure out the length.
+ */
+static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eh;
+ struct vlan_hdr *vh;
+ struct ipv4_hdr *ip4;
+ struct ipv6_hdr *ip6;
+ struct tcp_hdr *th;
+ uint8_t hdr_len;
+ uint16_t ether_type;
+
+ /* offset past Ethernet header */
+ eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ ether_type = eh->ether_type;
+ hdr_len = sizeof(struct ether_hdr);
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
+ ether_type = vh->eth_proto;
+ hdr_len += sizeof(struct vlan_hdr);
+ }
+
+ /* offset past IP header */
+ switch (rte_be_to_cpu_16(ether_type)) {
+ case ETHER_TYPE_IPv4:
+ ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
+ if (ip4->next_proto_id != IPPROTO_TCP)
+ return 0;
+ hdr_len += (ip4->version_ihl & 0xf) * 4;
+ break;
+ case ETHER_TYPE_IPv6:
+ ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
+ if (ip6->proto != IPPROTO_TCP)
+ return 0;
+ hdr_len += sizeof(struct ipv6_hdr);
+ break;
+ default:
+ return 0;
+ }
+
+ if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
+ return 0;
+
+ /* offset past TCP header */
+ th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
+ hdr_len += (th->data_off >> 4) * 4;
+
+ if (hdr_len > mbuf->pkt_len)
+ return 0;
+
+ return hdr_len;
+}
+
static inline uint8_t
enic_cq_rx_check_err(struct cq_desc *cqd)
{
@@ -203,16 +260,25 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
/* checksum flags */
- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
- uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
-
- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- if (l4_flags == RTE_PTYPE_L4_UDP ||
- l4_flags == RTE_PTYPE_L4_TCP) {
- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_csum_not_calc(cqrd))
+ pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ else {
+ uint32_t l4_flags;
+ l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
}
}
@@ -320,7 +386,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rq->is_sop) {
first_seg = rxmb;
- first_seg->nb_segs = 1;
first_seg->pkt_len = seg_length;
} else {
first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
@@ -329,7 +394,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
last_seg->next = rxmb;
}
- rxmb->next = NULL;
rxmb->port = enic->port_id;
rxmb->data_len = seg_length;
@@ -380,10 +444,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_mb();
if (data_rq->in_use)
- iowrite32(data_rq->posted_index,
- &data_rq->ctrl->posted_index);
+ iowrite32_relaxed(data_rq->posted_index,
+ &data_rq->ctrl->posted_index);
rte_compiler_barrier();
- iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
+ iowrite32_relaxed(sop_rq->posted_index,
+ &sop_rq->ctrl->posted_index);
}
@@ -406,7 +471,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
pool = ((struct rte_mbuf *)buf->mb)->pool;
for (i = 0; i < nb_to_free; i++) {
buf = &wq->bufs[tail_idx];
- m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
+ m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
buf->mb = NULL;
if (unlikely(m == NULL)) {
@@ -466,6 +531,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint8_t vlan_tag_insert;
uint8_t eop;
uint64_t bus_addr;
+ uint8_t offload_mode;
+ uint16_t header_len;
enic_cleanup_wq(enic, wq);
wq_desc_avail = vnic_wq_desc_avail(wq);
@@ -504,13 +571,17 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
desc_p = descs + head_idx;
eop = (data_len == pkt_len);
-
- if (ol_flags & ol_flags_mask) {
- if (ol_flags & PKT_TX_VLAN_PKT) {
- vlan_tag_insert = 1;
- vlan_id = tx_pkt->vlan_tci;
+ offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
+ header_len = 0;
+
+ if (tx_pkt->tso_segsz) {
+ header_len = tso_header_len(tx_pkt);
+ if (header_len) {
+ offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+ mss = tx_pkt->tso_segsz;
}
-
+ }
+ if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
@@ -523,8 +594,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
- eop, 0, vlan_tag_insert, vlan_id, 0);
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ vlan_tag_insert = 1;
+ vlan_id = tx_pkt->vlan_tci;
+ }
+
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
+ offload_mode, eop, eop, 0, vlan_tag_insert,
+ vlan_id, 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -544,8 +621,9 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, 0, eop, eop, 0,
- vlan_tag_insert, vlan_id, 0);
+ mss, 0, offload_mode, eop, eop,
+ 0, vlan_tag_insert, vlan_id,
+ 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -557,7 +635,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
post:
rte_wmb();
- iowrite32(head_idx, &wq->ctrl->posted_index);
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
done:
wq->ring.desc_avail = wq_desc_avail;
wq->head_idx = head_idx;
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index afcbd1d8..e0024f05 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -71,6 +71,9 @@ CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
@@ -84,6 +87,7 @@ VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
+# base driver is based on the package of cid-fm10k.2017.01.24.tar.gz
#
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c
@@ -96,10 +100,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/fm10k/base/fm10k_common.c b/drivers/net/fm10k/base/fm10k_common.c
index a90d2f0b..29f35d7d 100644
--- a/drivers/net/fm10k/base/fm10k_common.c
+++ b/drivers/net/fm10k/base/fm10k_common.c
@@ -230,6 +230,9 @@ s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
+ if (FM10K_REMOVED(hw->hw_addr))
+ return FM10K_SUCCESS;
+
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
@@ -542,7 +545,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* if we somehow dropped the Tx enable we should reset */
- if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
@@ -558,8 +561,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* interface cannot receive traffic without logical ports */
- if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
+
goto out;
+ }
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
diff --git a/drivers/net/fm10k/base/fm10k_mbx.c b/drivers/net/fm10k/base/fm10k_mbx.c
index 2e704340..16ab98d3 100644
--- a/drivers/net/fm10k/base/fm10k_mbx.c
+++ b/drivers/net/fm10k/base/fm10k_mbx.c
@@ -2066,9 +2066,10 @@ STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
* function can also be used to respond to an error as the connection
* resetting would also be a means of dealing with errors.
**/
-STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
- struct fm10k_mbx_info *mbx)
+STATIC s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
{
+ s32 err = FM10K_SUCCESS;
const enum fm10k_mbx_state state = mbx->state;
switch (state) {
@@ -2081,6 +2082,7 @@ STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_OPEN:
/* flush any incomplete work */
fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
break;
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
@@ -2090,6 +2092,8 @@ STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
}
fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
}
/**
@@ -2172,7 +2176,7 @@ STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
case 0:
- fm10k_sm_mbx_process_reset(hw, mbx);
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
break;
case FM10K_SM_MBX_VERSION:
err = fm10k_sm_mbx_process_version_1(hw, mbx);
diff --git a/drivers/net/fm10k/base/fm10k_mbx.h b/drivers/net/fm10k/base/fm10k_mbx.h
index edc57dff..2fac012c 100644
--- a/drivers/net/fm10k/base/fm10k_mbx.h
+++ b/drivers/net/fm10k/base/fm10k_mbx.h
@@ -54,6 +54,8 @@ struct fm10k_mbx_info;
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
diff --git a/drivers/net/fm10k/base/fm10k_osdep.h b/drivers/net/fm10k/base/fm10k_osdep.h
index a21daa2a..199ebd8e 100644
--- a/drivers/net/fm10k/base/fm10k_osdep.h
+++ b/drivers/net/fm10k/base/fm10k_osdep.h
@@ -39,6 +39,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include <rte_atomic.h>
#include <rte_byteorder.h>
#include <rte_cycles.h>
+#include <rte_io.h>
+
#include "../fm10k_logs.h"
/* TODO: this does not look like it should be used... */
@@ -88,17 +90,16 @@ typedef int bool;
#endif
/* offsets are WORD offsets, not BYTE offsets */
-#define FM10K_WRITE_REG(hw, reg, val) \
- ((((volatile uint32_t *)(hw)->hw_addr)[(reg)]) = ((uint32_t)(val)))
-#define FM10K_READ_REG(hw, reg) \
- (((volatile uint32_t *)(hw)->hw_addr)[(reg)])
+#define FM10K_WRITE_REG(hw, reg, val) \
+ rte_write32((val), ((hw)->hw_addr + (reg)))
+
+#define FM10K_READ_REG(hw, reg) rte_read32(((hw)->hw_addr + (reg)))
+
#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL)
-#define FM10K_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define FM10K_PCI_REG(reg) rte_read32(reg)
-#define FM10K_PCI_REG_WRITE(reg, value) do { \
- FM10K_PCI_REG((reg)) = (value); \
-} while (0)
+#define FM10K_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
/* not implemented */
#define FM10K_READ_PCI_WORD(hw, reg) 0
@@ -162,23 +163,6 @@ typedef int bool;
#define FM10K_RXD_PKTTYPE_MASK 0x03F0
#define FM10K_RXD_PKTTYPE_SHIFT 4
-enum fm10k_rdesc_pkt_type {
- /* L3 type */
- FM10K_PKTTYPE_OTHER = 0x00,
- FM10K_PKTTYPE_IPV4 = 0x01,
- FM10K_PKTTYPE_IPV4_EX = 0x02,
- FM10K_PKTTYPE_IPV6 = 0x03,
- FM10K_PKTTYPE_IPV6_EX = 0x04,
-
- /* L4 type */
- FM10K_PKTTYPE_TCP = 0x08,
- FM10K_PKTTYPE_UDP = 0x10,
- FM10K_PKTTYPE_GRE = 0x18,
- FM10K_PKTTYPE_VXLAN = 0x20,
- FM10K_PKTTYPE_NVGRE = 0x28,
- FM10K_PKTTYPE_GENEVE = 0x30
-};
-
#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */
diff --git a/drivers/net/fm10k/base/fm10k_pf.c b/drivers/net/fm10k/base/fm10k_pf.c
index 105babf4..db5f4912 100644
--- a/drivers/net/fm10k/base/fm10k_pf.c
+++ b/drivers/net/fm10k/base/fm10k_pf.c
@@ -66,21 +66,21 @@ STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* shut down all rings */
err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
return err;
+ }
/* Verify that DMA is no longer active */
reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
- /* verify the switch is ready for reset */
- reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
- if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
-
+force_reset:
/* Inititate data path reset */
- reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
/* Flush write and allow 100us for reset to complete */
@@ -90,10 +90,9 @@ STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* Verify we made it out of reset */
reg = FM10K_READ_REG(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
-out:
- return err;
+ return FM10K_SUCCESS;
}
/**
@@ -255,8 +254,8 @@ STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -412,7 +411,7 @@ STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
DEBUGFUNC("fm10k_update_uc_addr_pf");
/* verify MAC address is valid */
- if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ if (!IS_VALID_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
@@ -435,7 +434,7 @@ STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
DEBUGFUNC("fm10k_update_mc_addr_pf");
/* verify multicast address is valid */
- if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
@@ -536,6 +535,10 @@ STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -908,13 +911,13 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
qmap_idx = qmap_stride * vf_idx;
- /* MAP Tx queue back to 0 temporarily, and disable it */
- FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
-
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -923,9 +926,35 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
vf_info->mac, vf_vid);
- /* load onto outgoing mailbox, ignore any errors on enqueue */
- if (vf_info->mbx.ops.enqueue_tx)
- vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = FM10K_SUCCESS;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
/* verify ring has disabled before modifying base address registers */
txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
@@ -941,7 +970,7 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
}
/* Update base address registers to contain MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
tdbal = (((u32)vf_info->mac[3]) << 24) |
(((u32)vf_info->mac[4]) << 16) |
(((u32)vf_info->mac[5]) << 8);
@@ -964,16 +993,6 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
FM10K_TDLEN_ITR_SCALE_SHIFT);
err_out:
- /* configure Queue control register */
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
- txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
- FM10K_TXQCTL_VF | vf_idx;
-
- /* assign VLAN ID */
- for (i = 0; i < queues_per_pool; i++)
- FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
-
/* restore the queue back to VF ownership */
FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
return err;
@@ -1081,7 +1100,7 @@ STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
/* Update base address registers to contain MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
tdbal = (((u32)vf_info->mac[3]) << 24) |
(((u32)vf_info->mac[4]) << 16) |
(((u32)vf_info->mac[5]) << 8);
@@ -1267,18 +1286,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
@@ -1293,7 +1326,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
return err;
/* block attempts to set MAC for a locked device */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac) &&
+ if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
memcmp(mac, vf_info->mac, ETH_ALEN))
return FM10K_ERR_PARAM;
@@ -1670,13 +1703,12 @@ STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
{
- s32 ret_val = FM10K_SUCCESS;
u32 dma_ctrl2;
DEBUGFUNC("fm10k_get_host_state_pf");
@@ -1684,23 +1716,16 @@ STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
/* verify the switch is ready for interaction */
dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
+ return FM10K_SUCCESS;
/* retrieve generic host state info */
- ret_val = fm10k_get_host_state_generic(hw, switch_ready);
- if (ret_val)
- goto out;
-
- /* interface cannot receive traffic without logical ports */
- if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
- ret_val = fm10k_request_lport_map_pf(hw);
-
-out:
- return ret_val;
+ return fm10k_get_host_state_generic(hw, switch_ready);
}
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -2082,6 +2107,7 @@ s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
mac->ops.get_fault = &fm10k_get_fault_pf;
mac->ops.get_host_state = &fm10k_get_host_state_pf;
+ mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
mac->ops.notify_offset = &fm10k_notify_offset_pf;
mac->ops.read_systime = &fm10k_read_systime_pf;
diff --git a/drivers/net/fm10k/base/fm10k_pf.h b/drivers/net/fm10k/base/fm10k_pf.h
index c84b1bc5..ca125c27 100644
--- a/drivers/net/fm10k/base/fm10k_pf.h
+++ b/drivers/net/fm10k/base/fm10k_pf.h
@@ -91,6 +91,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
diff --git a/drivers/net/fm10k/base/fm10k_tlv.c b/drivers/net/fm10k/base/fm10k_tlv.c
index e6150c1d..0328ede2 100644
--- a/drivers/net/fm10k/base/fm10k_tlv.c
+++ b/drivers/net/fm10k/base/fm10k_tlv.c
@@ -520,7 +520,8 @@ STATIC s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -559,14 +560,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
diff --git a/drivers/net/fm10k/base/fm10k_type.h b/drivers/net/fm10k/base/fm10k_type.h
index 3fc8f136..1f38a02c 100644
--- a/drivers/net/fm10k/base/fm10k_type.h
+++ b/drivers/net/fm10k/base/fm10k_type.h
@@ -40,6 +40,7 @@ struct fm10k_hw;
#include "fm10k_osdep.h"
#include "fm10k_mbx.h"
+#define FM10K_INTEL_VENDOR_ID 0x8086
#define FM10K_DEV_ID_PF 0x15A4
#define FM10K_DEV_ID_VF 0x15A5
#ifdef BOULDER_RAPIDS_HW
@@ -125,11 +126,15 @@ struct fm10k_hw;
/* Interrupt control registers */
#define FM10K_EICR 0x0006
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
#define FM10K_EICR_FAULT_MASK 0x0000003F
#define FM10K_EICR_MAILBOX 0x00000040
#define FM10K_EICR_SWITCHREADY 0x00000080
#define FM10K_EICR_SWITCHNOTREADY 0x00000100
#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
+#define FM10K_EICR_SRAMERROR 0x00000400
#define FM10K_EICR_VFLR 0x00000800
#define FM10K_EICR_MAXHOLDTIME 0x00001000
#define FM10K_EIMR 0x0007
@@ -183,6 +188,7 @@ struct fm10k_hw;
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
#define FM10K_TUNNEL_CFG 0x0040
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
#define FM10K_SWPRI_MAX 16
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
@@ -211,6 +217,7 @@ struct fm10k_hw;
#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
@@ -283,6 +290,7 @@ struct fm10k_hw;
#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
+#define FM10K_RXINT_TIMER_SHIFT 8
#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
@@ -336,6 +344,7 @@ struct fm10k_hw;
#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
+#define FM10K_TXINT_TIMER_SHIFT 8
/* Tx Statistics */
#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
@@ -374,6 +383,7 @@ struct fm10k_hw;
/* Switch manager interrupt registers */
#define FM10K_IP 0x13000
#define FM10K_IP_NOTINRESET 0x00000100
+#define FM10K_SRAM_IP 0x13003
/* VLAN registers */
#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
@@ -384,6 +394,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -422,20 +433,20 @@ struct fm10k_hw;
#define ETH_ALEN 6
#endif /* ETH_ALEN */
-#ifndef FM10K_IS_ZERO_ETHER_ADDR
+#ifndef IS_ZERO_ETHER_ADDR
/* make certain address is not 0 */
-#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+#define IS_ZERO_ETHER_ADDR(addr) \
(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
#endif
-#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
-#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#ifndef IS_MULTICAST_ETHER_ADDR
+#define IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
#endif
-#ifndef FM10K_IS_VALID_ETHER_ADDR
+#ifndef IS_VALID_ETHER_ADDR
/* make certain address is not multicast or 0 */
-#define FM10K_IS_VALID_ETHER_ADDR(addr) \
-(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#define IS_VALID_ETHER_ADDR(addr) \
+(!IS_MULTICAST_ETHER_ADDR(addr) && !IS_ZERO_ETHER_ADDR(addr))
#endif
enum fm10k_int_source {
@@ -587,6 +598,7 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
bool (*is_slot_appropriate)(struct fm10k_hw *);
#endif
@@ -629,6 +641,7 @@ struct fm10k_mac_info {
bool tx_ready;
u32 dglort_map;
u8 itr_scale;
+ u64 reset_while_pending;
};
struct fm10k_swapi_table_info {
@@ -676,6 +689,9 @@ struct fm10k_vf_info {
u8 vf_flags; /* flags indicating what modes
* are supported for the port
*/
+#ifndef NO_FM10K_VF_TRUSTED_MODE
+ bool trusted; /* VF trust mode */
+#endif
};
#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
@@ -812,6 +828,24 @@ enum fm10k_rdesc_rss_type {
/* Reserved 0x9 - 0xF */
};
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+enum fm10k_rdesc_pkt_type {
+ /* L3 type */
+ FM10K_PKTTYPE_OTHER = 0x00,
+ FM10K_PKTTYPE_IPV4 = 0x01,
+ FM10K_PKTTYPE_IPV4_EX = 0x02,
+ FM10K_PKTTYPE_IPV6 = 0x03,
+ FM10K_PKTTYPE_IPV6_EX = 0x04,
+
+ /* L4 type */
+ FM10K_PKTTYPE_TCP = 0x08,
+ FM10K_PKTTYPE_UDP = 0x10,
+ FM10K_PKTTYPE_GRE = 0x18,
+ FM10K_PKTTYPE_VXLAN = 0x20,
+ FM10K_PKTTYPE_NVGRE = 0x28,
+ FM10K_PKTTYPE_GENEVE = 0x30
+};
#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
enum fm10k_rxdesc_xc {
@@ -823,6 +857,7 @@ enum fm10k_rxdesc_xc {
#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
diff --git a/drivers/net/fm10k/base/fm10k_vf.c b/drivers/net/fm10k/base/fm10k_vf.c
index efbdbd1e..bd449773 100644
--- a/drivers/net/fm10k/base/fm10k_vf.c
+++ b/drivers/net/fm10k/base/fm10k_vf.c
@@ -49,11 +49,11 @@ STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
- if (err)
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
return err;
/* If permanent address is set then we need to restore it */
- if (FM10K_IS_VALID_ETHER_ADDR(perm_addr)) {
+ if (IS_VALID_ETHER_ADDR(perm_addr)) {
bal = (((u32)perm_addr[3]) << 24) |
(((u32)perm_addr[4]) << 16) |
(((u32)perm_addr[5]) << 8);
@@ -82,7 +82,7 @@ STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen);
}
- return FM10K_SUCCESS;
+ return err;
}
/**
@@ -100,7 +100,9 @@ STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
return err;
/* Inititate VF reset */
@@ -113,9 +115,9 @@ STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* Clear reset bit and verify it was cleared */
FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0);
if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
- return err;
+ return FM10K_SUCCESS;
}
/**
@@ -225,7 +227,7 @@ STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -268,7 +270,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return FM10K_SUCCESS;
}
@@ -339,11 +341,11 @@ STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
return FM10K_ERR_PARAM;
/* verify MAC address is valid */
- if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ if (!IS_VALID_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
/* verify we are not locked down on the MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
+ if (IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
return FM10K_ERR_PARAM;
@@ -385,7 +387,7 @@ STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
return FM10K_ERR_PARAM;
/* verify multicast address is valid */
- if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
/* add bit to notify us if this is a set or clear operation */
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 05aa1a25..8e1a9506 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -69,6 +69,9 @@
#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc))
#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc))
+#define FM10K_TX_MAX_SEG UINT8_MAX
+#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
+
/*
* byte aligment for HW RX data buffer
* Datasheet requires RX buffer addresses shall either be 512-byte aligned or
@@ -356,14 +359,17 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
uint16_t);
-uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7c51d3b5..a742eec1 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -32,6 +32,7 @@
*/
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
@@ -59,7 +60,7 @@
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
/* default 1:1 map from queue ID to interrupt vector ID */
-#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])
+#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
#define MAX_LPORT_NUM 128
@@ -197,9 +198,9 @@ fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
}
uint16_t __attribute__((weak))
-fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
- __rte_unused struct rte_mbuf **tx_pkts,
- __rte_unused uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
{
return 0;
}
@@ -677,7 +678,7 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev)
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
* register is read-only for VF.
*/
- if (fm10k_check_ftag(dev->pci_dev->device.devargs)) {
+ if (fm10k_check_ftag(dev->device->devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
@@ -711,7 +712,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
uint64_t base_addr;
@@ -725,13 +727,13 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
i = 0;
if (rte_intr_dp_is_en(intr_handle)) {
for (; i < dev->data->nb_rx_queues; i++) {
- FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
}
@@ -1171,7 +1173,8 @@ static void
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
PMD_INIT_FUNC_TRACE();
@@ -1190,10 +1193,10 @@ fm10k_dev_stop(struct rte_eth_dev *dev)
FM10K_WRITE_REG(hw, FM10K_RXINT(i),
3 << FM10K_RXINT_TIMER_SHIFT);
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
}
}
@@ -1390,16 +1393,18 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
+ dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
dev_info->max_tx_queues = hw->mac.max_queues;
dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
dev_info->max_hash_mac_addrs = 0;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pdev->max_vfs;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
@@ -1450,6 +1455,8 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
.nb_max = FM10K_MAX_TX_DESC,
.nb_min = FM10K_MIN_TX_DESC,
.nb_align = FM10K_MULT_TX_DESC,
+ .nb_seg_max = FM10K_TX_MAX_SEG,
+ .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
@@ -1683,7 +1690,7 @@ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
}
/* Add a MAC address, and update filters */
-static void
+static int
fm10k_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index,
@@ -1694,6 +1701,7 @@ fm10k_macaddr_add(struct rte_eth_dev *dev,
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
macvlan->mac_vmdq_id[index] = pool;
+ return 0;
}
/* Remove a MAC address, and update filters */
@@ -1797,7 +1805,8 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
@@ -2336,15 +2345,16 @@ static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pdev->intr_handle);
return 0;
}
@@ -2352,13 +2362,14 @@ static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
return 0;
}
@@ -2367,7 +2378,8 @@ static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
int result = 0;
@@ -2383,7 +2395,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
intr_vector = dev->data->nb_rx_queues;
/* disable interrupt first */
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_disable_intr_pf(dev);
else
@@ -2418,7 +2430,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
fm10k_dev_enable_intr_pf(dev);
else
fm10k_dev_enable_intr_vf(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
return result;
}
@@ -2532,9 +2544,7 @@ error:
* void
*/
static void
-fm10k_dev_interrupt_handler_pf(
- __rte_unused struct rte_intr_handle *handle,
- void *param)
+fm10k_dev_interrupt_handler_pf(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2584,7 +2594,7 @@ fm10k_dev_interrupt_handler_pf(
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
/**
@@ -2599,9 +2609,7 @@ fm10k_dev_interrupt_handler_pf(
* void
*/
static void
-fm10k_dev_interrupt_handler_vf(
- __rte_unused struct rte_intr_handle *handle,
- void *param)
+fm10k_dev_interrupt_handler_vf(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2618,7 +2626,7 @@ fm10k_dev_interrupt_handler_vf(
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
/* Mailbox message handler in VF */
@@ -2732,6 +2740,28 @@ fm10k_check_ftag(struct rte_devargs *devargs)
return 1;
}
+static uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
static void __attribute__((cold))
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
@@ -2740,7 +2770,22 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
int use_sse = 1;
uint16_t tx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* primary process has set the ftag flag and txq_flags */
+ txq = dev->data->tx_queues[0];
+ if (fm10k_tx_vec_condition_check(txq)) {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ } else {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ }
+ return;
+ }
+
+ if (fm10k_check_ftag(dev->device->devargs))
tx_ftag_en = 1;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2758,8 +2803,10 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
fm10k_txq_vec_setup(txq);
}
dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
} else {
dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
PMD_INIT_LOG(DEBUG, "Use regular Tx func");
}
}
@@ -2767,11 +2814,12 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
static void __attribute__((cold))
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (fm10k_check_ftag(dev->device->devargs))
rx_ftag_en = 1;
/* In order to allow Vector Rx there are a few configuration
@@ -2797,6 +2845,9 @@ fm10k_set_rx_function(struct rte_eth_dev *dev)
else
PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
@@ -2809,7 +2860,8 @@ static void
fm10k_params_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
@@ -2830,6 +2882,8 @@ static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
@@ -2838,23 +2892,31 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &fm10k_eth_dev_ops;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = &fm10k_prep_pkts;
- /* only initialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ /*
+ * Primary process does the whole initialization, for secondary
+ * processes, we just select the same Rx and Tx function as primary.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ fm10k_set_rx_function(dev);
+ fm10k_set_tx_function(dev);
return 0;
+ }
- rte_eth_copy_pci_info(dev, dev->pci_dev);
+ rte_eth_copy_pci_info(dev, pdev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
/* Vendor and Device ID need to be set before init of shared code */
memset(hw, 0, sizeof(*hw));
- hw->device_id = dev->pci_dev->id.device_id;
- hw->vendor_id = dev->pci_dev->id.vendor_id;
- hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
- hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+ hw->device_id = pdev->id.device_id;
+ hw->vendor_id = pdev->id.vendor_id;
+ hw->subsystem_device_id = pdev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
hw->revision_id = 0;
- hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pdev->mem_resource[0].addr;
if (hw->hw_addr == NULL) {
PMD_INIT_LOG(ERR, "Bad mem resource."
" Try to blacklist unused devices.");
@@ -2924,20 +2986,20 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
/* register callback func to eal lib */
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
/* enable MISC interrupt */
fm10k_dev_enable_intr_pf(dev);
} else { /* VF */
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
fm10k_dev_enable_intr_vf(dev);
}
/* Enable intr after callback registered */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
@@ -3007,7 +3069,8 @@ static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
/* only uninitialize in the primary process */
@@ -3022,7 +3085,7 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
dev->tx_pkt_burst = NULL;
/* disable uio/vfio intr */
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
@@ -3030,13 +3093,13 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
fm10k_dev_disable_intr_pf(dev);
/* unregister callback func to eal lib */
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
} else {
/* disable interrupt */
fm10k_dev_disable_intr_vf(dev);
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
}
@@ -3051,6 +3114,18 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
return 0;
}
+static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
+}
+
+static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
+}
+
/*
* The set of PCI devices this driver supports. This driver will enable both PF
* and SRIOV-VF devices.
@@ -3062,18 +3137,13 @@ static const struct rte_pci_id pci_id_fm10k_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static struct eth_driver rte_pmd_fm10k = {
- .pci_drv = {
- .id_table = pci_id_fm10k_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_fm10k_dev_init,
- .eth_dev_uninit = eth_fm10k_dev_uninit,
- .dev_private_size = sizeof(struct fm10k_adapter),
+static struct rte_pci_driver rte_pmd_fm10k = {
+ .id_table = pci_id_fm10k_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_fm10k_pci_probe,
+ .remove = eth_fm10k_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv);
+RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 32cc7ff9..c9bb04a0 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_net.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
@@ -65,6 +66,15 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
}
#endif
+#define FM10K_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
+
/* @note: When this function is changed, make corresponding change to
* fm10k_dev_supported_ptypes_get()
*/
@@ -424,12 +434,12 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
if (unlikely(num == 0))
return;
- m = __rte_pktmbuf_prefree_seg(txep[0]);
+ m = rte_pktmbuf_prefree_seg(txep[0]);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < num; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -445,7 +455,7 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < num; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (m != NULL)
rte_mempool_put(m->pool, m);
txep[i] = NULL;
@@ -597,3 +607,41 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return count;
}
+
+uint16_t
+fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if ((m->ol_flags & PKT_TX_TCP_SEG) &&
+ (m->tso_segsz < FM10K_TSO_MINMSS)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 27f3e43f..411bc445 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -324,9 +324,6 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
/* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
p0 = (uintptr_t)&mb0->rearm_data;
*(uint64_t *)p0 = rxq->mbuf_initializer;
@@ -470,9 +467,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -480,11 +481,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -493,8 +496,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
@@ -754,12 +759,12 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
* next_dd - (rs_thresh-1)
*/
txep = &txq->sw_ring[txq->next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0]);
+ m = rte_pktmbuf_prefree_seg(txep[0]);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -774,7 +779,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -800,8 +805,8 @@ tx_backlog_entry(struct rte_mbuf **txep,
}
uint16_t
-fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
volatile struct fm10k_tx_desc *txdp;
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 13085fb7..56f210d6 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,7 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
-CFLAGS += -DX722_SUPPORT -DX722_A0_SUPPORT
+CFLAGS += -DX722_A0_SUPPORT
EXPORT_MAP := rte_pmd_i40e_version.map
@@ -99,23 +99,23 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
else
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
endif
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
new file mode 100644
index 00000000..0da9f674
--- /dev/null
+++ b/drivers/net/i40e/base/README
@@ -0,0 +1,59 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® I40E driver
+==================
+
+This directory contains source code of FreeBSD i40e driver of version
+cid-i40e.2017.03.21.tar.gz released by the team which develops
+basic drivers for any i40e NIC. The directory of base/ contains the
+original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® Ethernet Converged Network Adapters X710
+* Intel® Ethernet Converged Network Adapters XL710
+* Intel® Ethernet Network Adapter XXV710
+* Intel® Ethernet Connection X722 for 10GBASE-T
+* Intel® Ethernet Connection X722 for 10GbE backplane
+* Intel® Ethernet Connection X722 for 10GbE SFP+
+* Intel® Ethernet Connection X722 for 1GbE
+* Intel® Ethernet Controller X710 and XL710 Family
+* Intel® Ethernet Controller XXV710 for 25GbE backplane
+* Intel® Ethernet Controller XXV710 for 25GbE SFP28
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ i40e_osdep.h
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 0d3a83fa..a60292a3 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -944,8 +944,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- i40e_msec_delay(1);
- total_delay++;
+ i40e_usec_delay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -1077,11 +1077,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index 750973c5..182e40b9 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -158,9 +158,9 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
#ifdef I40E_ESS_SUPPORT
-#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT_ESS 50000000 /* usecs */
#endif
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 4f067720..09f5bf5c 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -139,12 +139,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
-#ifdef X722_SUPPORT
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-#endif
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -152,12 +150,11 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
-#ifdef X722_SUPPORT
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
-#endif
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -197,10 +194,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -282,12 +284,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
-#ifdef X722_SUPPORT
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
-#endif
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -540,7 +540,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -561,6 +562,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -584,7 +586,6 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-#ifdef X722_SUPPORT
/* Set WoL Filter (0x0120) */
struct i40e_aqc_set_wol_filter {
@@ -600,6 +601,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
@@ -635,7 +637,6 @@ struct i40e_aqc_get_wake_reason_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-#endif /* X722_SUPPORT */
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -774,6 +775,7 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
u8 reserved[12];
};
@@ -940,16 +942,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#endif
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
-#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1332,7 +1330,9 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
@@ -1369,6 +1369,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1403,6 +1404,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+ struct i40e_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1414,6 +1455,54 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1449,6 +1538,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Dynamic Device Personalization */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1781,11 +1900,20 @@ struct i40e_aq_get_phy_abilities_resp {
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 mod_type_ext;
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@@ -1809,16 +1937,14 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 eeer;
u8 low_power_ctrl;
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 fec_config;
-#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0)
-#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1)
-#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
-#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
-#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
u8 reserved;
};
@@ -2416,7 +2542,6 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-#ifdef X722_SUPPORT
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
@@ -2457,7 +2582,6 @@ struct i40e_aqc_get_set_rss_lut {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif
/* tunnel key structure 0x0B10 */
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 9a6b3ed6..03e94bc8 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -71,7 +71,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0:
#endif
@@ -83,18 +82,14 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
-#endif
-#ifdef X722_SUPPORT
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0_VF:
#endif
hw->mac.type = I40E_MAC_X722_VF;
break;
#endif /* INTEGRATED_VF || VF_DRIVER */
-#endif /* X722_SUPPORT */
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
@@ -114,7 +109,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
return status;
}
-#ifndef I40E_NDIS_SUPPORT
/**
* i40e_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
@@ -321,7 +315,6 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return hw->err_str;
}
-#endif /* I40E_NDIS_SUPPORT */
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -447,7 +440,6 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
-#ifdef X722_SUPPORT
/**
* i40e_aq_get_set_rss_lut
@@ -606,7 +598,6 @@ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -1022,9 +1013,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
-#ifdef X722_SUPPORT
case I40E_MAC_X722:
-#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1044,11 +1033,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
-#ifdef X722_SUPPORT
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
-#endif
status = i40e_init_nvm(hw);
return status;
}
@@ -1126,7 +1113,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
return status;
}
@@ -1149,7 +1137,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1207,7 +1196,8 @@ enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1288,6 +1278,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1302,6 +1294,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1309,6 +1302,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1789,10 +1783,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1952,6 +1949,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@@ -1974,12 +1973,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2344,6 +2344,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2412,6 +2449,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2745,14 +2816,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@@ -3603,6 +3677,14 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@@ -3822,7 +3904,6 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
p->update_disabled = true;
break;
-#ifdef X722_SUPPORT
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
hw->wol_proxy_vsi_seid = (u16)logical_id;
@@ -3832,12 +3913,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
break;
-#endif
default:
break;
}
@@ -3874,8 +3953,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4360,11 +4441,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@@ -5548,6 +5633,59 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
* i40e_aq_remove_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
@@ -5560,9 +5698,9 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
*
**/
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5587,6 +5725,103 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_remove_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+ enum i40e_status_code status = I40E_SUCCESS;
+ int i = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_replace_cloud_filters);
+
+ desc.datalen = CPU_TO_LE16(32);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->old_filter_type = filters->old_filter_type;
+ cmd->new_filter_type = filters->new_filter_type;
+ cmd->valid_flags = filters->valid_flags;
+ cmd->tr_bit = filters->tr_bit;
+
+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
+
+ /* for get cloud filters command */
+ for (i = 0; i < 32; i += 4) {
+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+ }
+
+ return status;
+}
+
+
+/**
* i40e_aq_alternate_write
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
@@ -6007,9 +6242,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@@ -6018,7 +6250,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6027,9 +6344,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6039,8 +6355,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6062,8 +6378,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6093,7 +6409,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6102,9 +6418,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6114,8 +6429,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6139,8 +6454,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6161,6 +6476,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -6202,14 +6589,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -6221,20 +6610,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@@ -6242,8 +6629,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -6274,8 +6662,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -6308,41 +6698,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
#endif /* PF_DRIVER */
@@ -6393,7 +6784,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -6452,7 +6845,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -6568,7 +6963,6 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
/**
* i40e_aq_set_arp_proxy_config
@@ -6591,10 +6985,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@@ -6625,10 +7022,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@@ -6675,9 +7075,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@@ -6688,6 +7090,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@@ -6724,4 +7132,236 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
-#endif /* X722_SUPPORT */
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ddp_resp *resp;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 26c344fd..9b5405db 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -396,6 +396,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 8bd5793d..4546689a 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -55,7 +55,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -70,9 +69,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#endif /* VF_DRIVER */
-#endif /* X722_SUPPORT */
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index 22606484..f03f3813 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1239,11 +1239,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index 4fa1220b..e8965024 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -219,19 +219,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -249,14 +245,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -348,14 +340,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -375,7 +363,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
@@ -386,9 +373,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -901,9 +885,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1253,6 +1248,7 @@ retry:
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
{
if (opcode == hw->nvm_wait_opcode) {
+
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
@@ -1261,6 +1257,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@@ -1423,7 +1424,8 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
@@ -1496,7 +1498,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@@ -1510,7 +1512,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 38e7ba5b..c57ecded 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#include "../i40e_logs.h"
@@ -153,15 +154,18 @@ do { \
* I40E_PRTQF_FD_MSK
*/
-#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG(reg) rte_read32(reg)
#define I40E_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
static inline uint32_t i40e_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(I40E_PCI_REG(addr));
}
-#define I40E_PCI_REG_WRITE(reg, value) \
- do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
+
+#define I40E_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 3aab5ca9..4bd589e7 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -78,7 +78,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -90,11 +89,8 @@ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-#endif
-#ifndef I40E_NDIS_SUPPORT
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
-#endif /* I40E_NDIS_SUPPORT */
#ifdef PF_DRIVER
@@ -172,12 +168,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -402,11 +404,21 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -521,7 +533,6 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -537,12 +548,38 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-#endif
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index fd0a7230..3a305b67 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -3401,7 +3401,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#ifdef X722_SUPPORT
#ifdef PF_DRIVER
#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
@@ -5366,5 +5365,4 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* X722_SUPPORT */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index b5f72c32..84d57576 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -133,6 +133,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -157,15 +158,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@@ -189,9 +197,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#ifdef X722_SUPPORT
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#endif
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -205,13 +211,10 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
-#ifdef X722_SUPPORT
I40E_MAC_X722,
I40E_MAC_X722_VF,
-#endif
I40E_MAC_GENERIC,
};
@@ -266,6 +269,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -332,25 +336,35 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
-#ifdef X722_SUPPORT
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
-#define I40E_WOL_SUPPORT_MASK 1
-#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
-#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
-#endif
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -359,6 +373,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -414,11 +432,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
-#ifdef X722_SUPPORT
bool apm_wol_support;
enum i40e_acpi_programming_method acpi_prog_method;
bool proxy_support;
-#endif
};
struct i40e_mac_info {
@@ -476,6 +492,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -554,6 +571,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -678,30 +696,22 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#ifdef X722_SUPPORT
/* WoL and proxy support */
u16 num_wol_proxy_filters;
u16 wol_proxy_vsi_seid;
-#endif
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
/* debug mask */
u32 debug_mask;
-#ifndef I40E_NDIS_SUPPORT
char err_str[16];
-#endif /* I40E_NDIS_SUPPORT */
};
STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
-#ifdef X722_SUPPORT
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
-#else
- return hw->mac.type == I40E_MAC_VF;
-#endif
}
struct i40e_driver_version {
@@ -805,11 +815,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
-#else
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
-#endif
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -817,11 +823,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
-#else
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
-#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1199,10 +1201,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1239,38 +1239,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
-#ifdef X722_SUPPORT
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
-#else
- /* Note: Values 0-30 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
-#else
- /* Note: Value 32 is reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
-#ifdef X722_SUPPORT
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
-#else
- /* Note: Values 37-40 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1325,12 +1311,10 @@ enum i40e_filter_program_desc_pcmd {
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
-#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1502,6 +1486,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
@@ -1894,4 +1879,83 @@ struct i40e_lldp_variables {
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_DDP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ddp_version version;
+ u32 size;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ u32 track_id;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ddp_version version;
+ u8 op;
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_DDP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
index fd51ec32..7a24c0f1 100644
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ b/drivers/net/i40e/base/i40e_virtchnl.h
@@ -170,6 +170,13 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bf7e5a05..4c49673f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -40,10 +40,12 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -51,6 +53,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -138,60 +141,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -284,11 +233,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -324,6 +268,8 @@ static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void i40e_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
@@ -345,10 +291,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
-static void i40e_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index,
- uint32_t pool);
+static int i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -373,8 +319,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
uint64_t *offset,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
- __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -388,10 +333,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
struct i40e_vsi *vsi);
static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
-static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
- struct i40e_macvlan_filter *mv_f,
- int num,
- struct ether_addr *addr);
static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num,
@@ -406,9 +347,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -461,6 +399,27 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
+
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
+
+int i40e_logtype_init;
+int i40e_logtype_driver;
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -503,6 +462,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
@@ -520,6 +480,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
@@ -668,17 +630,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static struct eth_driver rte_i40e_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_i40e_dev_init,
- .eth_dev_uninit = eth_i40e_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), eth_i40e_dev_init);
+}
+
+static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+}
+
+static struct rte_pci_driver rte_i40e_pmd = {
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_i40e_pci_probe,
+ .remove = eth_i40e_pci_remove,
};
static inline int
@@ -709,8 +677,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
-RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
@@ -718,6 +687,9 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
#ifndef I40E_GLQF_PIT
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
#endif
+#ifndef I40E_GLQF_L3_MAP
+#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
+#endif
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
@@ -763,8 +735,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
pf->main_vsi_seid, 0,
TRUE, NULL, NULL);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
- " frames from VSIs.");
+ PMD_INIT_LOG(ERR,
+ "Failed to add filter to drop flow control frames from VSIs.");
}
static int
@@ -907,7 +879,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -928,9 +900,144 @@ config_floating_veb(struct rte_eth_dev *dev)
#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(&ethertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
@@ -943,6 +1050,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -952,9 +1060,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_tx_function(dev);
return 0;
}
- pci_dev = dev->pci_dev;
+ i40e_set_default_ptype_table(dev);
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
@@ -963,8 +1074,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->back = I40E_PF_TO_ADAPTER(pf);
hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
if (!hw->hw_addr) {
- PMD_INIT_LOG(ERR, "Hardware is not available, "
- "as address is NULL");
+ PMD_INIT_LOG(ERR,
+ "Hardware is not available, as address is NULL");
return -ENODEV;
}
@@ -1021,6 +1132,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
+ /* initialise the L3_MAP register */
+ ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
+
/* Need the special FW version to support floating VEB */
config_floating_veb(dev);
/* Clear PXE mode */
@@ -1100,8 +1217,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Set the global registers with default ether type value */
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer VLAN ether type");
goto err_setup_pf_switch;
}
@@ -1137,26 +1254,35 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
if (!dev->data->mac_addrs) {
- PMD_INIT_LOG(ERR, "Failed to allocated memory "
- "for storing mac address");
+ PMD_INIT_LOG(ERR,
+ "Failed to allocated memory for storing mac address");
goto err_mac_alloc;
}
ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
&dev->data->mac_addrs[0]);
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+ /* Update HW struct after DCB configuration */
+ i40e_get_cap(hw);
+
/* initialize pf host driver to setup SRIOV resource if applicable */
i40e_pf_host_init(dev);
/* register callback func to eal lib */
- rte_intr_callback_register(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
/* configure and enable device interrupt */
i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1172,15 +1298,26 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
- /* Init dcb to sw mode by default */
- ret = i40e_dcb_init_configure(dev, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(INFO, "Failed to init dcb.");
- pf->flags &= ~I40E_FLAG_DCB;
- }
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
return 0;
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1200,12 +1337,73 @@ err_sync_phy_type:
return ret;
}
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(&ethertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
@@ -1214,8 +1412,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1245,11 +1445,21 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* register callback func to eal lib */
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
return 0;
}
@@ -1315,6 +1525,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
}
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_dcb:
@@ -1335,7 +1547,8 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
@@ -1448,7 +1661,8 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
@@ -1519,7 +1733,8 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1550,7 +1765,8 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1629,7 +1845,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
/* use get_phy_abilities_resp value for the rest */
phy_conf.phy_type = phy_ab.phy_type;
phy_conf.phy_type_ext = phy_ab.phy_type_ext;
- phy_conf.fec_config = phy_ab.mod_type_ext;
+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1676,8 +1892,10 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
+ struct i40e_vsi *vsi;
hw->adapter_stopped = 0;
@@ -1693,8 +1911,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
!RTE_ETH_DEV_SRIOV(dev).active) &&
dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
@@ -1703,8 +1922,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int),
0);
if (!intr_handle->intr_vec) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1754,6 +1974,15 @@ i40e_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
}
+ /* Enable the VLAN promiscuous mode. */
+ if (pf->vfs) {
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
+ true, NULL);
+ }
+ }
+
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
@@ -1777,8 +2006,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_pf_enable_irq0(hw);
if (dev->data->dev_conf.intr_conf.lsc != 0)
- PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ PMD_INIT_LOG(INFO,
+ "lsc won't enable because of no intr multiplex");
} else if (dev->data->dev_conf.intr_conf.lsc != 0) {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
@@ -1794,6 +2023,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -1809,7 +2040,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
/* Disable all queues */
@@ -1860,6 +2092,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -1871,7 +2105,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2069,6 +2303,8 @@ out:
if (link.link_status == old.link_status)
return -1;
+ i40e_notify_all_vfs_link_status(dev);
+
return 0;
}
@@ -2579,19 +2815,49 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
return -ENOSYS;
}
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2648,6 +2914,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = I40E_MAX_RING_DESC,
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
};
if (pf->flags & I40E_FLAG_VMDQ) {
@@ -2708,7 +2976,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
else {
ret = -EINVAL;
PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.\n");
+ "Unsupported vlan type in single vlan.");
return ret;
}
break;
@@ -2720,13 +2988,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Fail to debug read from "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
ret = -EIO;
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
- "0x%08"PRIx64"", reg_id, reg_r);
+ PMD_DRV_LOG(DEBUG,
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
@@ -2740,12 +3010,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
reg_w, NULL);
if (ret != I40E_SUCCESS) {
ret = -EIO;
- PMD_DRV_LOG(ERR, "Fail to debug write to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+ PMD_DRV_LOG(DEBUG,
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
return ret;
}
@@ -2889,8 +3161,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
- "High_water must <= %d.", max_high_water);
+ PMD_INIT_LOG(ERR,
+ "Invalid high/low water setup value in KB, High_water must be <= %d.",
+ max_high_water);
return -EINVAL;
}
@@ -2994,7 +3267,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
}
/* Add a MAC address, and update filters */
-static void
+static int
i40e_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index,
@@ -3011,13 +3284,13 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
pool);
- return;
+ return -ENOTSUP;
}
if (pool > pf->nb_cfg_vmdq_vsi) {
PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
pool, pf->nb_cfg_vmdq_vsi);
- return;
+ return -EINVAL;
}
(void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
@@ -3034,8 +3307,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
- return;
+ return -ENODEV;
}
+ return 0;
}
/* Remove a MAC address, and update filters */
@@ -3062,8 +3336,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
/* No VMDQ pool enabled or configured */
if (!(pf->flags & I40E_FLAG_VMDQ) ||
(i > pf->nb_cfg_vmdq_vsi)) {
- PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
- "/configured");
+ PMD_DRV_LOG(ERR,
+ "No VMDQ pool enabled/configured");
return;
}
vsi = pf->vmdq[i - 1].vsi;
@@ -3264,9 +3538,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3305,9 +3579,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3362,8 +3636,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->va = mz->addr;
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
- PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
- "%"PRIu64, mz->name, mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
return I40E_SUCCESS;
}
@@ -3380,9 +3655,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
if (!mem)
return I40E_ERR_PARAM;
- PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
- "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
- mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
rte_memzone_free((const struct rte_memzone *)mem->zone);
mem->zone = NULL;
mem->va = NULL;
@@ -3493,9 +3768,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
- if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
@@ -3536,13 +3812,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
/* VF queue/VSI allocation */
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
- if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- pf->vf_num = dev->pci_dev->max_vfs;
- PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
- "in total %u queues", pf->vf_num, pf->vf_nb_qps,
- pf->vf_nb_qps * pf->vf_num);
+ pf->vf_num = pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG,
+ "%u VF VSIs, %u queues per VF VSI, in total %u queues",
+ pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
} else {
pf->vf_nb_qps = 0;
pf->vf_num = 0;
@@ -3570,14 +3846,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
- "per VMDQ VSI, in total %u queues",
- pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps, pf->vmdq_nb_qps *
- pf->max_nb_vmdq_vsi);
+ PMD_DRV_LOG(DEBUG,
+ "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
+ pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
} else {
- PMD_DRV_LOG(INFO, "No enough queues left for "
- "VMDq");
+ PMD_DRV_LOG(INFO,
+ "No enough queues left for VMDq");
}
} else {
PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
@@ -3590,15 +3865,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->flags |= I40E_FLAG_DCB;
if (qp_count > hw->func_caps.num_tx_qp) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
- "the hardware maximum %u", qp_count,
- hw->func_caps.num_tx_qp);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u queues, which exceeds the hardware maximum %u",
+ qp_count, hw->func_caps.num_tx_qp);
return -EINVAL;
}
if (vsi_count > hw->func_caps.num_vsis) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
- "the hardware maximum %u", vsi_count,
- hw->func_caps.num_vsis);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
+ vsi_count, hw->func_caps.num_vsis);
return -EINVAL;
}
@@ -3844,8 +4119,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
*/
entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
if (entry == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool");
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate memory for resource pool");
return -ENOMEM;
}
entry->base = valid_entry->base;
@@ -3885,9 +4160,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
- PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
- "HW support 0x%x", hw->func_caps.enabled_tcmap,
- enabled_tcmap);
+ PMD_DRV_LOG(ERR,
+ "Enabled TC map 0x%x not applicable to HW support 0x%x",
+ hw->func_caps.enabled_tcmap, enabled_tcmap);
return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
@@ -4105,12 +4380,13 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
/* get statistics index */
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
hw->aq.asq_last_status);
goto fail;
}
@@ -4232,8 +4508,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct ether_addr *mac;
- PMD_DRV_LOG(WARNING, "Cannot remove the default "
- "macvlan filter");
+ PMD_DRV_LOG(DEBUG,
+ "Cannot remove the default macvlan filter");
/* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
@@ -4283,8 +4559,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
- "configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "VSI failed to get TC bandwdith configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -4351,7 +4628,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
- PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
hw->aq.asq_last_status);
}
@@ -4372,14 +4649,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
uplink_vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, "
- "VSI link shouldn't be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, VSI link shouldn't be NULL");
return NULL;
}
if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
- "uplink VSI should be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, MAIN VSI uplink VSI should be NULL");
return NULL;
}
@@ -4423,6 +4700,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
vsi->user_param = user_param;
+ vsi->vlan_anti_spoof_on = 0;
+ vsi->vlan_filter_on = 0;
/* Allocate queues */
switch (vsi->type) {
case I40E_VSI_MAIN :
@@ -4530,8 +4809,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.seid = vsi->seid;
@@ -4599,13 +4878,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
- I40E_DEFAULT_TCMAP);
+ hw->func_caps.enabled_tcmap);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
- ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+
+ ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
/**
@@ -4644,8 +4924,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4662,8 +4942,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping.");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping.");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4926,8 +5206,9 @@ i40e_pf_setup(struct i40e_pf *pf)
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
if (ret != I40E_FDIR_QUEUE_ID) {
- PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
- " ret =%d", ret);
+ PMD_DRV_LOG(ERR,
+ "queue allocation fails for FDIR: ret =%d",
+ ret);
pf->flags &= ~I40E_FLAG_FDIR;
}
}
@@ -4946,12 +5227,12 @@ i40e_pf_setup(struct i40e_pf *pf)
else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
- PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
- hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
return I40E_ERR_PARAM;
}
- PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
- "size: %u\n", hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
+ hw->func_caps.rss_table_size);
pf->hash_lut_size = hw->func_caps.rss_table_size;
/* Enable ethtype and macvlan filters */
@@ -5201,8 +5482,8 @@ i40e_dev_rx_init(struct i40e_pf *pf)
ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to do RX queue "
- "initialization");
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
break;
}
}
@@ -5452,18 +5733,10 @@ static void
i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_virtchnl_pf_event event;
int i;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- event.event_data.link_event.link_status =
- dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
-
for (i = 0; i < pf->vf_num; i++)
- i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ i40e_notify_vf_link_status(dev, &pf->vfs[i]);
}
static void
@@ -5486,8 +5759,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_clean_arq_element(hw, &info, &pending);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
- "aq_err: %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, aq_err: %u",
+ hw->aq.asq_last_status);
break;
}
opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -5504,14 +5778,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
break;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
- if (!ret) {
- i40e_notify_all_vfs_link_status(dev);
+ if (!ret)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
- }
break;
default:
- PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
opcode);
break;
}
@@ -5532,8 +5804,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40e_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5550,7 +5821,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(INFO, "No interrupt event");
goto done;
}
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
@@ -5565,7 +5835,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(ERR, "ICR0: HMC error");
if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
@@ -5579,10 +5848,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
-static int
+int
i40e_add_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5632,7 +5901,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5656,7 +5925,7 @@ DONE:
return ret;
}
-static int
+int
i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5707,7 +5976,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5762,14 +6031,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
}
static void
-i40e_set_vlan_filter(struct i40e_vsi *vsi,
- uint16_t vlan_id, bool on)
+i40e_store_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
- return;
-
vid_idx = I40E_VFTA_IDX(vlan_id);
vid_bit = I40E_VFTA_BIT(vlan_id);
@@ -5779,11 +6045,43 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
vsi->vfta[vid_idx] &= ~vid_bit;
}
+void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ i40e_store_vlan_filter(vsi, vlan_id, on);
+
+ if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
+ return;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+
+ if (on) {
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ } else {
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Failed to remove vlan filter");
+ }
+}
+
/**
* Find all vlan options for specific mac addr,
* return with actual vlan found.
*/
-static inline int
+int
i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num, struct ether_addr *addr)
@@ -5804,8 +6102,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
if (vsi->vfta[j] & (1 << k)) {
if (i > num - 1) {
- PMD_DRV_LOG(ERR, "vlan number "
- "not match");
+ PMD_DRV_LOG(ERR,
+ "vlan number doesn't match");
return I40E_ERR_PARAM;
}
(void)rte_memcpy(&mv_f[i].macaddr,
@@ -6098,7 +6396,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
@@ -6207,18 +6505,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
@@ -6227,18 +6521,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
@@ -6289,8 +6579,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key "
- "via AQ");
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -6436,7 +6725,95 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
+ if ((rte_le_to_cpu_16(cld_filter->element.flags) &
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ else
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ tunnel_filter->input.flags = cld_filter->element.flags;
+ tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
+ tunnel_filter->queue = cld_filter->element.queue_number;
+ rte_memcpy(tunnel_filter->input.general_fields,
+ cld_filter->general_fields,
+ sizeof(cld_filter->general_fields));
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -6449,37 +6826,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
int val, ret = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
- struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
- sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
- 0);
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
if (NULL == cld_filter) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -EINVAL;
+ return -ENOMEM;
}
pfilter = cld_filter;
- ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
- ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
- pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
- rte_memcpy(&pfilter->ipaddr.v4.data,
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
&rte_cpu_to_le_32(ipv4_addr),
- sizeof(pfilter->ipaddr.v4.data));
+ sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
for (i = 0; i < 4; i++) {
convert_ipv6[i] =
rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
}
- rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
- sizeof(pfilter->ipaddr.v6.data));
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
}
/* check tunneled type */
@@ -6501,23 +6885,369 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
}
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
- &pfilter->flags);
+ &pfilter->element.flags);
if (val < 0) {
rte_free(cld_filter);
return -EINVAL;
}
- pfilter->flags |= rte_cpu_to_le_16(
+ pfilter->element.flags |= rte_cpu_to_le_16(
I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
- pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
- pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
- if (add)
- ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
+#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
+#define I40E_TR_GENEVE_KEY_MASK 0x8
+#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
+#define I40E_TR_GRE_KEY_MASK 0x400
+#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
+#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+
+static enum
+i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[7] = 0xF0;
+ filter_replace_buf.data[8]
+ = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
+ I40E_TR_GENEVE_KEY_MASK |
+ I40E_TR_GENERIC_UDP_TUNNEL_MASK;
+ filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
+ I40E_TR_GRE_KEY_WITH_XSUM_MASK |
+ I40E_TR_GRE_NO_KEY_MASK) >> 8;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* For MPLSoUDP */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* For MPLSoGRE */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr;
+ uint8_t i, tun_type = 0;
+ /* internal variable to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_pf_vf *vf = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+ uint32_t teid_le;
+ bool big_buffer = 0;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (cld_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &rte_cpu_to_le_32(ipv4_addr),
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(
+ tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case I40E_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case I40E_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case I40E_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoUDP:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x40;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoGRE:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ break;
+ case I40E_TUNNEL_TYPE_QINQ:
+ if (!pf->qinq_replace_flag) {
+ ret = i40e_cloud_filter_qinq_create(pf);
+ if (ret < 0)
+ PMD_DRV_LOG(DEBUG,
+ "QinQ tunnel filter already created.");
+ pf->qinq_replace_flag = 1;
+ }
+ /* Add in the General fields the values of
+ * the Outer and Inner VLAN
+ * Big Buffer should be set, see changes in
+ * i40e_aq_add_cloud_filters
+ */
+ pfilter->general_fields[0] = tunnel_filter->inner_vlan;
+ pfilter->general_fields[1] = tunnel_filter->outer_vlan;
+ big_buffer = 1;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ else {
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->element.flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+ }
+
+ pfilter->element.flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ if (!tunnel_filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ if (tunnel_filter->vf_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[tunnel_filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ check_filter.is_to_vf = tunnel_filter->is_to_vf;
+ check_filter.vf_id = tunnel_filter->vf_id;
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ if (big_buffer)
+ ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(
+ hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
@@ -6554,8 +7284,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
/* Now check if there is space to add the new port */
idx = i40e_get_vxlan_port_idx(pf, 0);
if (idx < 0) {
- PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
- "not adding port %d", port);
+ PMD_DRV_LOG(ERR,
+ "Maximum number of UDP ports reached, not adding port %d",
+ port);
return -ENOSPC;
}
@@ -6794,7 +7525,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
int ret = -EINVAL;
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
if (len == 3) {
reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
@@ -6813,7 +7544,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
} else {
ret = 0;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
return ret;
@@ -6926,15 +7657,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
if (enable > 0) {
if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been enabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been enabled");
return;
}
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been disabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been disabled");
return;
}
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
@@ -7051,23 +7782,60 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
pctype = i40e_flowtype_to_pctype(i);
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
+ reg);
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
+ }
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
/* Toeplitz */
if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Toeplitz");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
goto out;
}
reg |= I40E_GLQF_CTL_HTOEP_MASK;
} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
/* Simple XOR */
if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Simple XOR");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Simple XOR");
goto out;
}
reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
@@ -7110,7 +7878,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7129,7 +7896,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7139,7 +7905,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7149,7 +7914,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7183,7 +7947,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7202,7 +7965,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7212,7 +7974,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7222,7 +7983,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7262,7 +8022,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7273,19 +8032,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7307,7 +8063,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7318,19 +8073,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7374,7 +8126,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
@@ -7383,22 +8135,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7410,22 +8158,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7681,10 +8425,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
@@ -7995,16 +8739,95 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -8015,13 +8838,29 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
}
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
- PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
- " control packet filter.", filter->ether_type);
+ PMD_DRV_LOG(ERR,
+ "unsupported ether_type(0x%04x) in control packet filter.",
+ filter->ether_type);
return -EINVAL;
}
if (filter->ether_type == ETHER_TYPE_VLAN)
- PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
- " not supported.");
+ PMD_DRV_LOG(WARNING,
+ "filter vlan ether_type in first tag is not supported.");
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
@@ -8036,14 +8875,25 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
pf->main_vsi->seid,
filter->queue, add, &stats, NULL);
- PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
- " mac_etype_used = %u, etype_used = %u,"
- " mac_etype_free = %u, etype_free = %u\n",
- ret, stats.mac_etype_used, stats.etype_used,
- stats.mac_etype_free, stats.etype_free);
+ PMD_DRV_LOG(INFO,
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
@@ -8078,7 +8928,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
FALSE);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -ENOSYS;
break;
}
@@ -8116,6 +8966,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -8133,10 +8988,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CAP_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8149,7 +9005,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
}
buf = 0;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8161,7 +9017,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
return;
}
buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
- ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
@@ -8224,18 +9080,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
@@ -8243,18 +9095,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
@@ -8369,9 +9217,9 @@ i40e_configure_registers(struct i40e_hw *hw)
ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
reg_table[i].val, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
- "address of 0x%"PRIx32, reg_table[i].val,
- reg_table[i].addr);
+ PMD_DRV_LOG(ERR,
+ "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
+ reg_table[i].val, reg_table[i].addr);
break;
}
PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
@@ -8416,8 +9264,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
I40E_VSI_L2TAGSTXVALID(
vsi->vsi_id), reg, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to update "
- "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ PMD_DRV_LOG(ERR,
+ "Failed to update VSI_L2TAGSTXVALID[%d]",
+ vsi->vsi_id);
return I40E_ERR_CONFIG;
}
}
@@ -8468,11 +9317,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw,
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
- PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
- "rule_id = %u"
- " mirror_rules_used = %u, mirror_rules_free = %u,",
- hw->aq.asq_last_status, resp->rule_id,
- resp->mirror_rules_used, resp->mirror_rules_free);
+ PMD_DRV_LOG(INFO,
+ "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
*rule_id = rte_le_to_cpu_16(resp->rule_id);
return status;
@@ -8550,8 +9398,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
- PMD_DRV_LOG(ERR, "mirror rule can not be configured"
- " without veb or vfs.");
+ PMD_DRV_LOG(ERR,
+ "mirror rule can not be configured without veb or vfs.");
return -ENOSYS;
}
if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
@@ -8583,9 +9431,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -8674,9 +9522,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->rule_type, mirr_rule->entries,
j, &rule_id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to add mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to add mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
rte_free(mirr_rule);
return -ENOSYS;
}
@@ -8728,9 +9576,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " status = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -9162,9 +10010,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
&veb_bw, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
- " per TC failed = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "AQ command Config switch_comp BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9172,16 +10020,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
&ets_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp ETS configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
memset(&bw_query, 0, sizeof(bw_query));
ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
&bw_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp bandwidth configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9246,8 +10096,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
}
ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
- " per TC failed = %d",
+ PMD_INIT_LOG(ERR,
+ "AQ command Config VSI BW allocation per TC failed = %d",
hw->aq.asq_last_status);
goto out;
}
@@ -9268,9 +10118,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure "
- "TC queue mapping = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
+ hw->aq.asq_last_status);
goto out;
}
/* update the local VSI info with updated queue map */
@@ -9322,8 +10171,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
/* Use the FW API if FW > v4.4*/
if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
(hw->aq.fw_maj_ver >= 5))) {
- PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
- " to configure DCB");
+ PMD_INIT_LOG(ERR,
+ "FW < v4.4, can not use FW LLDP API to configure DCB");
return I40E_ERR_FIRMWARE_API_VERSION;
}
@@ -9338,8 +10187,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
old_cfg->etsrec = old_cfg->etscfg;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Set DCB Config failed, err %s aq_err %s\n",
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
@@ -9371,7 +10219,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VEB seid=%d\n",
+ "Failed configuring TC for VEB seid=%d",
main_vsi->veb->seid);
}
/* Update each VSI */
@@ -9389,8 +10237,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
I40E_DEFAULT_TCMAP);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VSI seid=%d\n",
- vsi_list->vsi->seid);
+ "Failed configuring TC for VSI seid=%d",
+ vsi_list->vsi->seid);
/* continue */
}
}
@@ -9409,7 +10257,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret = 0;
+ int i, ret = 0;
if ((pf->flags & I40E_FLAG_DCB) == 0) {
PMD_INIT_LOG(ERR, "HW doesn't support DCB");
@@ -9436,6 +10284,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
hw->local_dcbx_config.etscfg.tsatable[0] =
I40E_IEEE_TSA_ETS;
+ /* all UPs mapping to TC0 */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9450,15 +10301,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
I40E_APP_PROTOID_FCOE;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR, "default dcb config fails."
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "default dcb config fails. err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCB initialization in FW fails, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
} else {
@@ -9469,14 +10320,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
ret = i40e_init_dcb(hw);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- PMD_INIT_LOG(ERR, "HW doesn't support"
- " DCBX offload.");
+ PMD_INIT_LOG(ERR,
+ "HW doesn't support DCBX offload.");
return -ENOTSUP;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCBX configuration failed, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
}
@@ -9585,7 +10436,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -9610,7 +10462,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -9618,7 +10470,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -9740,8 +10593,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + ETHER_HDR_LEN
- + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
int ret = 0;
/* check if mtu is within the allowed range */
@@ -9750,8 +10602,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
@@ -9765,3 +10616,230 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ bool big_buffer = 0;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ if (!f->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[f->vf_id];
+ vsi = vf->vsi;
+ }
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = f->input.inner_vlan;
+ cld_filter.element.flags = f->input.flags;
+ cld_filter.element.tenant_id = f->input.tenant_id;
+ cld_filter.element.queue_number = f->queue;
+ rte_memcpy(cld_filter.general_fields,
+ f->input.general_fields,
+ sizeof(f->input.general_fields));
+
+ if (((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, &cld_filter, 1);
+ else
+ i40e_aq_add_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->data->drv_name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_i40e_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40e_pmd);
+}
+
+/* Create a QinQ cloud filter
+ *
+ * The Fortville NIC has limited resources for tunnel filters,
+ * so we can only reuse existing filters.
+ *
+ * In step 1 we define which Field Vector fields can be used for
+ * filter types.
+ * As we do not have the inner tag defined as a field,
+ * we have to define it first, by reusing one of L1 entries.
+ *
+ * In step 2 we are replacing one of existing filter types with
+ * a new one for QinQ.
+ * As we reusing L1 and replacing L2, some of the default filter
+ * types will disappear,which depends on L1 and L2 entries we reuse.
+ *
+ * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
+ *
+ * 1. Create L1 filter of outer vlan (12b) which will be in use
+ * later when we define the cloud filter.
+ * a. Valid_flags.replace_cloud = 0
+ * b. Old_filter = 10 (Stag_Inner_Vlan)
+ * c. New_filter = 0x10
+ * d. TR bit = 0xff (optional, not used here)
+ * e. Buffer – 2 entries:
+ * i. Byte 0 = 8 (outer vlan FV index).
+ * Byte 1 = 0 (rsv)
+ * Byte 2-3 = 0x0fff
+ * ii. Byte 0 = 37 (inner vlan FV index).
+ * Byte 1 =0 (rsv)
+ * Byte 2-3 = 0x0fff
+ *
+ * Step 2:
+ * 2. Create cloud filter using two L1 filters entries: stag and
+ * new filter(outer vlan+ inner vlan)
+ * a. Valid_flags.replace_cloud = 1
+ * b. Old_filter = 1 (instead of outer IP)
+ * c. New_filter = 0x10
+ * d. Buffer – 2 entries:
+ * i. Byte 0 = 0x80 | 7 (valid | Stag).
+ * Byte 1-3 = 0 (rsv)
+ * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
+ * Byte 9-11 = 0 (rsv)
+ */
+static int
+i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+{
+ int ret = -ENOTSUP;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /* Init */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[2] = 0xff;
+ filter_replace_buf.data[3] = 0x0f;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[6] = 0xff;
+ filter_replace_buf.data[7] = 0x0f;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Apply the second L2 cloud filter */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L2 filter, input for L2 filter will be L1 filter */
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return ret;
+}
+
+RTE_INIT(i40e_init_log);
+static void
+i40e_init_log(void)
+{
+ i40e_logtype_init = rte_log_register("pmd.i40e.init");
+ if (i40e_logtype_init >= 0)
+ rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
+ i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
+ if (i40e_logtype_driver >= 0)
+ rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5f3ecd9a..2ff8282f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,8 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -126,6 +128,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FLAG_FDIR (1ULL << 6)
#define I40E_FLAG_VXLAN (1ULL << 7)
#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -134,7 +137,8 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_HEADER_SPLIT_ENABLED | \
I40E_FLAG_FDIR | \
I40E_FLAG_VXLAN | \
- I40E_FLAG_RSS_AQ_CAPABLE)
+ I40E_FLAG_RSS_AQ_CAPABLE | \
+ I40E_FLAG_VF_MAC_BY_PF)
#define I40E_RSS_OFFLOAD_ALL ( \
ETH_RSS_FRAG_IPV4 | \
@@ -188,6 +192,72 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/**
+ * The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define I40E_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+
struct i40e_adapter;
/**
@@ -242,6 +312,7 @@ struct i40e_veb {
uint16_t stats_idx;
struct i40e_eth_stats stats;
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t strict_prio_tc; /* bit map of TCs set to strict priority mode */
struct i40e_bw_info bw_info; /* VEB bandwidth information */
};
@@ -300,6 +371,8 @@ struct i40e_vsi {
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
uint16_t nb_msix; /* The max number of msix vector */
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
+ uint8_t vlan_filter_on; /* The VLAN filter enabled */
struct i40e_bw_info bw_info; /* VSI bandwidth information */
};
@@ -376,6 +449,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -394,6 +475,122 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
+#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
+#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+
+enum i40e_tunnel_iptype {
+ I40E_TUNNEL_IPTYPE_IPV4,
+ I40E_TUNNEL_IPTYPE_IPV6,
+};
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ enum i40e_tunnel_iptype ip_type;
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+ uint16_t general_fields[32]; /* Big buffer */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/**
+ * Tunnel type.
+ */
+enum i40e_tunnel_type {
+ I40E_TUNNEL_TYPE_NONE = 0,
+ I40E_TUNNEL_TYPE_VXLAN,
+ I40E_TUNNEL_TYPE_GENEVE,
+ I40E_TUNNEL_TYPE_TEREDO,
+ I40E_TUNNEL_TYPE_NVGRE,
+ I40E_TUNNEL_TYPE_IP_IN_GRE,
+ I40E_L2_TUNNEL_TYPE_E_TAG,
+ I40E_TUNNEL_TYPE_MPLSoUDP,
+ I40E_TUNNEL_TYPE_MPLSoGRE,
+ I40E_TUNNEL_TYPE_QINQ,
+ I40E_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct i40e_tunnel_filter_conf {
+ struct ether_addr outer_mac; /**< Outer MAC address to match. */
+ struct ether_addr inner_mac; /**< Inner MAC address to match. */
+ uint16_t inner_vlan; /**< Inner VLAN to match. */
+ uint32_t outer_vlan; /**< Outer VLAN to match */
+ enum i40e_tunnel_iptype ip_type; /**< IP address type. */
+ /**
+ * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
+ * is set in filter_type, or inner destination IP address to match
+ * if ETH_TUNNEL_FILTER_IIP is set in filter_type.
+ */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 address in big endian. */
+ uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */
+ } ip_addr;
+ /** Flags from ETH_TUNNEL_FILTER_XX - see above. */
+ uint16_t filter_type;
+ enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
+ uint16_t queue_id; /**< Queue assigned to if match. */
+ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
@@ -418,6 +615,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -466,12 +674,17 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
+ bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
+ bool qinq_replace_flag; /* QINQ filter replace is done */
};
enum pending_msg {
@@ -538,6 +751,8 @@ struct i40e_vf {
uint64_t flags;
};
+#define I40E_MAX_PKT_TYPE 256
+
/*
* Structure to store private data for each PF/VF instance.
*/
@@ -562,6 +777,29 @@ struct i40e_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+
+ /* ptype mapping table */
+ uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
+};
+
+extern const struct rte_flow_ops i40e_flow_ops;
+
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -605,6 +843,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
@@ -616,6 +855,46 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
+int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr);
+int i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on);
+int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+bool is_i40e_supported(struct rte_eth_dev *dev);
+
+#define I40E_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 640d316a..859b5e8f 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -55,6 +55,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -135,10 +136,10 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
-static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
- uint32_t index,
- uint32_t pool);
+static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -151,6 +152,9 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
@@ -214,6 +218,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
@@ -225,6 +231,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+ .mtu_set = i40evf_dev_mtu_set,
+ .mac_addr_set = i40evf_set_default_mac_addr,
};
/*
@@ -640,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -693,7 +701,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -719,7 +727,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
sizeof(struct i40e_virtchnl_vector_map)];
struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -846,7 +855,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
return 0;
}
-static void
+static int
i40evf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr,
__rte_unused uint32_t index,
@@ -864,7 +873,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
addr->addr_bytes[4], addr->addr_bytes[5]);
- return;
+ return I40E_ERR_INVALID_MAC_ADDR;
}
list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
@@ -883,23 +892,20 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
- return;
+ return err;
}
static void
-i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
{
struct i40e_virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
sizeof(struct i40e_virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
- addr = &(data->mac_addrs[index]);
-
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
addr->addr_bytes[0], addr->addr_bytes[1],
@@ -926,6 +932,17 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
return;
}
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+
+ addr = &data->mac_addrs[index];
+
+ i40evf_del_mac_addr_by_addr(dev, addr);
+}
+
static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
@@ -953,7 +970,7 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
}
static int
-i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int ret;
struct i40e_eth_stats *pstats = NULL;
@@ -1088,7 +1105,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -1182,7 +1198,6 @@ i40evf_init_vf(struct rte_eth_dev *dev)
int i, err, bufsz;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct ether_addr *p_mac_addr;
uint16_t interval =
i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
@@ -1259,9 +1274,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
- if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
- ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
+ if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
else
eth_random_addr(hw->mac.addr); /* Generate a random one */
@@ -1314,16 +1328,16 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
@@ -1385,7 +1399,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
"expect %u, get %u",
vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", msg_opc);
+ " opcode = %d", msg_opc);
}
break;
default:
@@ -1409,8 +1423,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40evf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1423,31 +1436,31 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
/* No interrupt event indicated */
if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
goto done;
}
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
- PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
i40evf_handle_aq_msg(dev);
}
/* Link Status Change interrupt */
if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
- " do nothing\n");
+ " do nothing");
done:
i40evf_enable_irq0(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(dev->intr_handle);
}
static int
i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
- eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct i40e_hw *hw
+ = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1465,16 +1478,17 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
i40e_set_tx_function(eth_dev);
return 0;
}
-
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
-
- hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
- hw->device_id = eth_dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
- hw->bus.device = eth_dev->pci_dev->addr.devid;
- hw->bus.func = eth_dev->pci_dev->addr.function;
- hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ i40e_set_default_ptype_table(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->adapter_stopped = 0;
if(i40evf_init_vf(eth_dev) != 0) {
@@ -1530,23 +1544,32 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+
+static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), i40evf_dev_init);
+}
+
+static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_i40evf_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = i40evf_dev_init,
- .eth_dev_uninit = i40evf_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static struct rte_pci_driver rte_i40evf_pmd = {
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_i40evf_pci_probe,
+ .remove = eth_i40evf_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
@@ -1861,7 +1884,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw,
@@ -1893,7 +1917,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
@@ -1919,7 +1944,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1945,7 +1971,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -1953,7 +1979,8 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -1997,6 +2024,10 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
@@ -2033,7 +2064,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -2057,7 +2089,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2098,7 +2130,8 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -2142,6 +2175,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
new_link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
@@ -2225,6 +2261,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2285,15 +2322,16 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static void
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- if (i40evf_get_statics(dev, stats))
- PMD_DRV_LOG(ERR, "Get statics failed");
+ if (i40evf_get_statistics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statistics failed");
}
static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
hw->adapter_stopped = 1;
@@ -2301,11 +2339,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2382,7 +2420,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2421,7 +2459,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2566,7 +2604,7 @@ i40evf_config_rss(struct i40e_vf *vf)
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
return 0;
}
@@ -2583,7 +2621,7 @@ i40evf_config_rss(struct i40e_vf *vf)
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
}
@@ -2643,3 +2681,54 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+
+static int
+i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = vf->dev_data;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static void
+i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return;
+ }
+
+ if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
+ return;
+
+ if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
+ return;
+
+ i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+
+ i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15c..28cc554f 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,7 +119,13 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -251,7 +257,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -1017,13 +1023,81 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
@@ -1032,6 +1106,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1131,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1170,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
@@ -1220,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -1481,3 +1584,30 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
+ guarant_cnt, best_cnt);
+}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 00000000..24e1c658
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
+};
+
+union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_mpls_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched QINQ */
+static enum rte_flow_item_type pattern_qinq_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* VXLAN */
+ { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* MPLSoUDP & MPLSoGRE */
+ { pattern_mpls_1, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_2, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_3, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* QINQ */
+ { pattern_qinq_1, i40e_flow_parse_qinq_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == ETHER_TYPE_LLDP ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Parse to get the action info of a tunnel filter
+ * Tunnel action only supports PF, VF and QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is PF or VF. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
+ act->type != RTE_FLOW_ACTION_TYPE_VF) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->vf_id = act_vf->id;
+ filter->is_to_vf = 1;
+ if (filter->vf_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid VF ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is QUEUE */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if ((!filter->is_to_vf) &&
+ (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ } else if (filter->is_to_vf &&
+ (filter->queue_id >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+ uint32_t tenant_id_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MPLS label.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_mpls *mpls_spec;
+ const struct rte_flow_item_mpls *mpls_mask;
+ enum rte_flow_item_type item_type;
+ bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
+ const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
+ uint32_t label_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ is_mplsoudp = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ /* GRE is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ mpls_spec =
+ (const struct rte_flow_item_mpls *)item->spec;
+ mpls_mask =
+ (const struct rte_flow_item_mpls *)item->mask;
+
+ if (!mpls_spec || !mpls_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS item");
+ return -rte_errno;
+ }
+
+ if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS label mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&label_be + 1),
+ mpls_spec->label_tc_s, 3);
+ filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (is_mplsoudp)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
+ else
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_mpls_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: QINQ.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vlan *i_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *i_vlan_mask = NULL;
+ const struct rte_flow_item_vlan *o_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *o_vlan_mask = NULL;
+
+ enum rte_flow_item_type item_type;
+ bool vlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (!vlan_flag) {
+ o_vlan_spec = vlan_spec;
+ o_vlan_mask = vlan_mask;
+ vlan_flag = 1;
+ } else {
+ i_vlan_spec = vlan_spec;
+ i_vlan_mask = vlan_mask;
+ vlan_flag = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Get filter specification */
+ if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
+ return 0;
+}
+
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_qinq_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_consistent_tunnel_filter_set(pf,
+ &cons_filter.consistent_tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ bool big_buffer = 0;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = filter->input.inner_vlan;
+ cld_filter.element.flags = filter->input.flags;
+ cld_filter.element.tenant_id = filter->input.tenant_id;
+ cld_filter.element.queue_number = filter->queue;
+ rte_memcpy(cld_filter.general_fields,
+ filter->input.general_fields,
+ sizeof(cld_filter.general_fields));
+
+ if (!filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ if (ret < 0)
+ return -ENOTSUP;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h
index e042e242..8e99cd52 100644
--- a/drivers/net/i40e/i40e_logs.h
+++ b/drivers/net/i40e/i40e_logs.h
@@ -34,14 +34,11 @@
#ifndef _I40E_LOGS_H_
#define _I40E_LOGS_H_
+extern int i40e_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
+ rte_log(RTE_LOG_ ## level, i40e_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_I40E_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,12 +61,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+extern int i40e_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, i40e_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 97b8eccc..0758503e 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,6 +55,7 @@
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
#define I40E_CFG_CRCSTRIP_DEFAULT 1
@@ -274,14 +275,30 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_version_info info;
- info.major = I40E_DPDK_VERSION_MAJOR;
- info.minor = I40E_DPDK_VERSION_MINOR;
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+ /* Respond like a Linux PF host in order to support both DPDK VF and
+ * Linux VF driver. The expense is original DPDK host specific feature
+ * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
+ *
+ * DPDK VF also can't identify host driver by version number returned.
+ * It always assume talking with Linux PF.
+ */
+ info.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS,
+ (uint8_t *)&info,
+ sizeof(info));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&info,
+ sizeof(info));
}
static int
@@ -294,13 +311,20 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
/* only have 1 VSI by default */
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
@@ -323,8 +347,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- /* As assume Vf only has single VSI now, always return 0 */
- vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
@@ -382,6 +405,29 @@ i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
return err;
}
+static inline uint8_t
+i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
+ uint16_t queue_id)
+{
+ struct i40e_aqc_vsi_properties_data *info = &vsi->info;
+ uint16_t bsf, qp_idx;
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
+ bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+ if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
+ return i;
+ }
+ }
+ return 0;
+}
+
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
@@ -389,16 +435,20 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi = vf->vsi;
uint32_t qtx_ctl;
- uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
-
+ uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
+ uint8_t dcb_tc;
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
- tx_ctx.new_context = 1;
tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->ring_len;
- tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
+ tx_ctx.head_wb_ena = txq->headwb_enabled;
+ tx_ctx.head_wb_addr = txq->dma_headwb_addr;
+
err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
if (err != I40E_SUCCESS)
return err;
@@ -425,7 +475,8 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
static int
i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -434,11 +485,18 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -484,7 +542,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -493,11 +552,19 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -539,13 +606,125 @@ send_msg:
return ret;
}
+static void
+i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_vector_map *vvm)
+{
+#define BITS_PER_CHAR 8
+ uint64_t linklistmap = 0, tempmap;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t qid;
+ bool b_first_q = true;
+ enum i40e_queue_type qtype;
+ uint16_t vector_id;
+ uint32_t reg, reg_idx;
+ uint16_t itr_idx = 0, i;
+
+ vector_id = vvm->vector_id;
+ /* setup the head */
+ if (!vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
+ + (vector_id - 1));
+
+ if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
+ I40E_WRITE_REG(hw, reg_idx,
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto cfg_irq_done;
+ }
+
+ /* sort all rx and tx queues */
+ tempmap = vvm->rxq_map;
+ for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i));
+ tempmap >>= 1;
+ }
+
+ tempmap = vvm->txq_map;
+ for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i + 1));
+ tempmap >>= 1;
+ }
+
+ /* Link all rx and tx queues into a chained list */
+ tempmap = linklistmap;
+ i = 0;
+ b_first_q = true;
+ do {
+ if (tempmap & 0x1) {
+ qtype = (enum i40e_queue_type)(i % 2);
+ qid = vf->vsi->base_queue + i / 2;
+ if (b_first_q) {
+ /* This is header */
+ b_first_q = false;
+ reg = ((qtype <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ | qid);
+ } else {
+ /* element in the link list */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ }
+ I40E_WRITE_REG(hw, reg_idx, reg);
+ /* find next register to program */
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(qid);
+ itr_idx = vvm->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(qid);
+ itr_idx = vvm->txitr_idx;
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ tempmap >>= 1;
+ } while (tempmap);
+
+ /* Terminate the link list */
+ reg = (vector_id) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, reg_idx, reg);
+
+cfg_irq_done:
+ I40E_WRITE_FLUSH(hw);
+}
+
static int
i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_virtchnl_irq_map_info *irqmap =
(struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ int i;
+ uint16_t vector_id;
+ unsigned long qbit_max;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
@@ -553,23 +732,46 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
goto send_msg;
}
- /* Assume VF only have 1 vector to bind all queues */
- if (irqmap->num_vectors != 1) {
- PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
- ret = I40E_ERR_PARAM;
+ /* PF host will support both DPDK VF or Linux VF driver, identify by
+ * number of vectors requested.
+ */
+
+ /* DPDK VF only requires single vector */
+ if (irqmap->num_vectors == 1) {
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
goto send_msg;
}
- /* This MSIX intr store the intr in VF range */
- vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
- vf->vsi->nb_msix = irqmap->num_vectors;
- vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ /* Then, it's Linux VF driver */
+ qbit_max = 1 << pf->vf_nb_qp_max;
+ for (i = 0; i < irqmap->num_vectors; i++) {
+ map = &irqmap->vecmap[i];
+
+ vector_id = map->vector_id;
+ /* validate msg params */
+ if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ i40e_pf_config_irq_link_list(vf, map);
+ } else {
+ /* configured queue size excceed limit */
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
- /* Don't care how the TX/RX queue mapping with this vector.
- * Link all VF RX queues together. Only did mapping work.
- * VF can disable/enable the intr by itself.
- */
- i40e_vsi_queues_bind_intr(vf->vsi);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
@@ -648,12 +850,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_queue_select *q_sel =
(struct i40e_virtchnl_queue_select *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -671,7 +882,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -680,6 +892,14 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
if (msg == NULL || msglen <= sizeof(*addr_list)) {
@@ -692,8 +912,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
mac = (struct ether_addr *)(addr_list->list[i].addr);
(void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if(!is_valid_assigned_ether_addr(mac) ||
- i40e_vsi_add_mac(vf->vsi, &filter)) {
+ if (is_zero_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
}
@@ -709,7 +929,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -717,6 +938,14 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*addr_list)) {
PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
ret = I40E_ERR_PARAM;
@@ -725,7 +954,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(!is_valid_assigned_ether_addr(mac) ||
+ if(is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
@@ -741,7 +970,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -749,6 +979,14 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "add_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -773,7 +1011,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -781,6 +1020,14 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "delete_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -805,7 +1052,8 @@ static int
i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_promisc_info *promisc =
@@ -813,6 +1061,14 @@ i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*promisc)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -838,13 +1094,20 @@ send_msg:
}
static int
-i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
{
i40e_update_vsi_stats(vf->vsi);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
- sizeof(vf->vsi->eth_stats));
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
return I40E_SUCCESS;
}
@@ -853,12 +1116,21 @@ static int
i40e_pf_host_process_cmd_cfg_vlan_offload(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_offload_info *offload =
(struct i40e_virtchnl_vlan_offload_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*offload)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -879,12 +1151,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_pvid_info *tpid_info =
(struct i40e_virtchnl_pvid_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*tpid_info)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -899,7 +1180,7 @@ send_msg:
return ret;
}
-static void
+void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
struct i40e_virtchnl_pf_event event;
@@ -907,8 +1188,33 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ I40E_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -925,6 +1231,8 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+ struct rte_pmd_i40e_mb_event_param cb_param;
+ bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
@@ -939,10 +1247,35 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
return;
}
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ cb_param.vfid = vf_id;
+ cb_param.msg_type = opcode;
+ cb_param.msg = (void *)msg;
+ cb_param.msglen = msglen;
+
+ /**
+ * Ask user application if we're allowed to perform those functions.
+ * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * then business as usual.
+ * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
+ * do nothing and send not_supported to VF. As PF must send a response
+ * to VF and ACK/NACK is not defined.
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
+ opcode);
+ b_op = FALSE;
+ }
+
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION :
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf);
+ i40e_pf_host_process_cmd_version(vf, b_op);
break;
case I40E_VIRTCHNL_OP_RESET_VF :
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
@@ -950,61 +1283,72 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
- i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen);
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
- i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
- i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
- i40e_notify_vf_link_status(dev, vf);
+ if (b_op) {
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
+ } else {
+ i40e_pf_host_send_msg_to_vf(
+ vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ }
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
- i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
- i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
- i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
- i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
- i40e_pf_host_process_cmd_get_stats(vf);
+ i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
break;
/* Don't add command supported below, which will
* return an error code.
@@ -1055,9 +1399,9 @@ i40e_pf_host_init(struct rte_eth_dev *dev)
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
- eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
}
+ RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
/* restore irq0 */
i40e_pf_enable_irq0(hw);
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 244bac37..b4c22876 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -118,5 +118,7 @@ void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+void i40e_notify_vf_link_status(struct rte_eth_dev *dev,
+ struct i40e_pf_vf *vf);
#endif /* _I40E_PF_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1b25b2f2..351cb94d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,8 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -59,7 +61,6 @@
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
-#define I40E_MAX_PKT_TYPE 256
#define I40E_TX_MAX_BURST 32
@@ -73,12 +74,31 @@
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ I40E_TX_IEEE1588_TMST)
+
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -256,7 +276,7 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
- PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
return;
}
@@ -412,15 +432,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "I40E_MAX_RING_DESC=%d, "
- "RTE_PMD_I40E_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, I40E_MAX_RING_DESC,
- RTE_PMD_I40E_RX_MAX_BURST);
- ret = -EINVAL;
}
#else
ret = -EINVAL;
@@ -446,6 +457,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
int32_t s[I40E_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -494,9 +506,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
mb->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
@@ -584,7 +596,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
/* Update rx tail regsiter */
rte_wmb();
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -598,6 +610,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -615,9 +628,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -679,6 +693,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
@@ -688,6 +703,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t rx_id, nb_hold;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl;
nb_rx = 0;
nb_hold = 0;
@@ -695,6 +711,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -707,10 +724,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
@@ -751,8 +771,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
rxm->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -802,10 +822,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -818,8 +840,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
@@ -913,8 +939,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
first_seg->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -1022,7 +1048,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_tx;
uint32_t td_cmd;
uint32_t td_offset;
- uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
uint16_t nb_used;
@@ -1046,7 +1071,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
td_cmd = 0;
td_tag = 0;
td_offset = 0;
- tx_flags = 0;
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1093,12 +1117,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- tx_flags |= tx_pkt->vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
- tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ td_tag = tx_pkt->vlan_tci;
}
/* Always enable CRC offload insertion */
@@ -1231,7 +1251,7 @@ end_of_tx:
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -1383,7 +1403,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
/* Update the tx tail register */
rte_wmb();
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -1414,6 +1434,85 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+static uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * m->nb_segs is uint8_t, so nb_segs is always less than
+ * I40E_TX_MAX_SEG.
+ * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
+ */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+ (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -1701,8 +1800,17 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
- ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
- ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ len = I40E_MAX_RING_DESC;
+
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
@@ -1717,11 +1825,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
@@ -1796,11 +1900,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
@@ -1831,7 +1930,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
return 0;
}
@@ -1849,6 +1948,64 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
}
int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+ << I40E_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -2129,11 +2286,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
@@ -2765,12 +2922,25 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
+ dev->tx_pkt_prepare = NULL;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
}
}
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
int __attribute__((weak))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
@@ -2815,9 +2985,9 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
}
uint16_t __attribute__((weak))
-i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
- struct rte_mbuf __rte_unused **tx_pkts,
- uint16_t __rte_unused nb_pkts)
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
{
return 0;
}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ecdb13cb..20084d64 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -34,16 +34,6 @@
#ifndef _I40E_RXTX_H_
#define _I40E_RXTX_H_
-/**
- * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN),
- * low 16 bits for others.
- */
-#define I40E_TX_FLAG_L2TAG1_SHIFT 16
-#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000
-#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0))
-#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
-#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-
#define RTE_PMD_I40E_RX_MAX_BURST 32
#define RTE_PMD_I40E_TX_MAX_BURST 32
@@ -63,6 +53,12 @@
#define I40E_MIN_RING_DESC 64
#define I40E_MAX_RING_DESC 4096
+#define I40E_MIN_TSO_MSS 256
+#define I40E_MAX_TSO_MSS 9674
+
+#define I40E_TX_MAX_SEG UINT8_MAX
+#define I40E_TX_MAX_MTU_SEG 8
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -113,11 +109,11 @@ struct i40e_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc */
struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< number of staged packets ready */
uint16_t rx_next_avail; /**< index of next staged packets */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
@@ -223,6 +219,8 @@ uint16_t i40e_recv_scattered_pkts(void *rx_queue,
uint16_t i40e_xmit_pkts(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int i40e_tx_queue_init(struct i40e_tx_queue *txq);
int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -238,6 +236,8 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -248,19 +248,20 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
-uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
void i40e_set_rx_function(struct rte_eth_dev *dev);
void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
+void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
/* For each value it means, datasheet of hardware can tell more details
*
* @note: fix i40e_dev_supported_ptypes_get() if any change here.
*/
static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
+i40e_get_default_pkt_type(uint8_t ptype)
{
static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
/* L2 types */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644
index 00000000..f4036ea2
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -0,0 +1,645 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+
+ vector unsigned long hdr_room = (vector unsigned long){
+ RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM};
+ vector unsigned long dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = (vector unsigned long){};
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vec_st(dma_addr0, 0,
+ (vector unsigned long *)&rxdp[i].read);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ vector unsigned long vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = vec_mergel(vaddr0, vaddr0);
+ dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = vec_add(dma_addr0, hdr_room);
+ dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const vector unsigned int rss_vlan_msk = (vector unsigned int){
+ (int32_t)0x1c03804, (int32_t)0x1c03804,
+ (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const vector unsigned char vlan_flags = (vector unsigned char){
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char rss_flags = (vector unsigned char){
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char l3_l4e_flags = (vector unsigned char){
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+ vlan1 = vec_and(vlan0, rss_vlan_msk);
+ vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&vlan1);
+
+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+ *(vector unsigned char *)&rss);
+
+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&l3_l4e);
+
+ vlan0 = vec_or(vlan0, rss);
+ vlan0 = vec_or(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+ rx_pkts[0]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+ rx_pkts[1]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+ rx_pkts[2]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+ rx_pkts[3]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ vector unsigned char shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ vector unsigned short crc_adjust = (vector unsigned short){
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+ vector unsigned long dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = (vector unsigned long){0x0000000100000001ULL,
+ 0x0000000100000001ULL};
+
+ /* 4 packets EOP mask */
+ eop_check = (vector unsigned long){0x0000000200000002ULL,
+ 0x0000000200000002ULL};
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = (vector unsigned char){
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+ vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ * in one XMM reg.
+ */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = *(vector unsigned long *)&sw_ring[pos];
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = *(vector unsigned long *)(rxdp + 3);
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+ descs[2] = *(vector unsigned long *)(rxdp + 2);
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = *(vector unsigned long *)(rxdp + 1);
+ rte_compiler_barrier();
+ descs[0] = *(vector unsigned long *)(rxdp);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len3 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[3]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ const vector unsigned int len2 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[2]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = (vector unsigned long)len3;
+ descs[2] = (vector unsigned long)len2;
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+ (vector unsigned short)descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+ (vector unsigned short)descs[0]);
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb4, crc_adjust);
+ pkt_mb3 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len1 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[1]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ const vector unsigned int len0 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[0]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = (vector unsigned long)len1;
+ descs[0] = (vector unsigned long)len0;
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = (vector unsigned short)vec_mergeh(
+ sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vec_st(pkt_mb4, 0,
+ (vector unsigned char *)&rx_pkts[pos + 3]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb3, 0,
+ (vector unsigned char *)&rx_pkts[pos + 2]
+ ->rx_descriptor_fields1
+ );
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb2, crc_adjust);
+ pkt_mb1 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ vector unsigned char eop_shuf_mask =
+ (vector unsigned char){
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ };
+
+ /* and with mask to extract bits, flipping 1-0 */
+ vector unsigned char eop_bits = vec_and(
+ (vector unsigned char)vec_nor(staterr, staterr),
+ (vector unsigned char)eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+ eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *split_packet = (vec_ld(0,
+ (vector unsigned int *)&eop_bits))[0];
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vec_st(pkt_mb2, 0,
+ (vector unsigned char *)&rx_pkts[pos + 1]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb1, 0,
+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ );
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll((vec_ld(0,
+ (vector unsigned long *)&staterr)[0]));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ vector unsigned long descriptor = (vector unsigned long){
+ pkt->buf_physaddr + pkt->data_off, high_qw};
+ *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_commit = nb_pkts;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 990520f3..69209668 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -65,9 +65,9 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
start->ol_flags = end->ol_flags;
/* we need to strip crc for the whole packet */
start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len) {
+ if (end->data_len > rxq->crc_len)
end->data_len -= rxq->crc_len;
- } else {
+ else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
@@ -78,7 +78,6 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
end->data_len);
secondlast->next = NULL;
rte_pktmbuf_free_seg(end);
- end = secondlast;
}
pkts[pkt_idx++] = start;
start = end = NULL;
@@ -124,12 +123,12 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool)) {
free[nb_free++] = m;
@@ -145,7 +144,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -225,14 +224,6 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0 ||
- rxmode->hw_ip_checksum != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
@@ -243,6 +234,10 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (rxmode->header_split == 1)
return -1;
+ /* no QinQ support */
+ if (rxmode->hw_vlan_extend == 1)
+ return -1;
+
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 011c54e0..694e91f3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -57,7 +57,6 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
uint64x2_t dma_addr0, dma_addr1;
uint64x2_t zero = vdupq_n_u64(0);
uint64_t paddr;
- uint8x8_t p;
rxdp = rxq->rx_ring + rxq->rxrearm_start;
@@ -77,27 +76,17 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
return;
}
- p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
-
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vdupq_n_u64(paddr);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
- vst1_u8((uint8_t *)&mb1->rearm_data, p);
paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vdupq_n_u64(paddr);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
@@ -116,18 +105,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
+ struct rte_mbuf **rx_pkts)
{
uint32x4_t vlan0, vlan1, rss, l3_l4e;
+ const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
/* mask everything except RSS, flow director and VLAN flags
* bit2 is for VLAN tag, bit11 for flow director indication
@@ -136,6 +120,20 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
const uint32x4_t rss_vlan_msk = {
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+ const uint32x4_t cksum_mask = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD};
+
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
@@ -150,14 +148,16 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
- 0,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
@@ -177,26 +177,32 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
l3_l4e = vshrq_n_u32(vlan1, 22);
l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
vreinterpretq_u8_u32(l3_l4e)));
-
+ /* then we shift left 1 bit */
+ l3_l4e = vshlq_n_u32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = vandq_u32(l3_l4e, cksum_mask);
vlan0 = vorrq_u32(vlan0, rss);
vlan0 = vorrq_u32(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0);
- rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1);
- rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2);
- rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3);
+ rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
+ rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
+ rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
+ rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
+
+ vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
+ vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
+ vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
+ vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(descs, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
static inline void
-desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
int i;
uint8_t ptype;
@@ -205,7 +211,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
for (i = 0; i < 4; i++) {
tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
ptype = vgetq_lane_u8(tmp, 8);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ rx_pkts[i]->packet_type = ptype_tbl[ptype];
}
}
@@ -225,6 +231,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
/* mask to shuffle from desc. to mbuf */
uint8x16_t shuf_msk = {
@@ -359,7 +366,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp2.val[1]).val[0];
stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
@@ -429,7 +436,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
nb_pkts_recd += var;
@@ -523,8 +530,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index b95cc8e1..3b4a352e 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -82,22 +82,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -128,17 +116,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+ struct rte_mbuf **rx_pkts)
{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
__m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
@@ -206,19 +190,30 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
vlan0 = _mm_or_si128(vlan0, rss);
vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
- rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
- rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
- rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
+ /*
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
static inline void
-desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
@@ -226,10 +221,10 @@ desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
ptype0 = _mm_srli_epi64(ptype0, 30);
ptype1 = _mm_srli_epi64(ptype1, 30);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
- rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
- rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
- rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+ rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
}
/*
@@ -248,6 +243,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
int pos;
uint64_t var;
__m128i shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
@@ -320,20 +316,26 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -342,8 +344,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -372,7 +376,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -424,12 +428,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_I40E_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -441,7 +439,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
@@ -536,8 +534,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
new file mode 100644
index 00000000..f7ce62bb
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -0,0 +1,1937 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
+int
+rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+ if (on) {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+ if (add)
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ else
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add/rm vlan filter");
+ return ret;
+ }
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->vlan_anti_spoof_on == on)
+ return 0; /* already on or off */
+
+ vsi->vlan_anti_spoof_on = on;
+ if (!vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
+ return -ENOTSUP;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* remove all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* restore all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return -ENOTSUP;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+ if (on) {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ /* remove all the MAC and VLAN first */
+ ret = i40e_vsi_rm_mac_filter(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+ return ret;
+ }
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+ return ret;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ if (on)
+ vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+ else
+ vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ return ret;
+ }
+
+ /* add all the MAC and VLAN back */
+ ret = i40e_vsi_restore_mac_filter(vsi);
+ if (ret)
+ return ret;
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ uint16_t vf_id;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* setup PF TX loopback */
+ vsi = pf->main_vsi;
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+
+ /* setup TX loopback for all the VFs */
+ if (!pf->vfs) {
+ /* if no VF, do nothing. */
+ return 0;
+ }
+
+ for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ on, NULL, true);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_mac_filter *f;
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ void *temp;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mac_addr, &vf->mac_addr);
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+
+ return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0)
+ return -ENODEV;
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = vlan_id;
+ if (vlan_id > 0)
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ else
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info filter;
+ struct ether_addr broadcast = {
+ .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (on) {
+ (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ } else {
+ ret = i40e_vsi_delete_mac(vsi, &broadcast);
+ }
+
+ if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ if (on) {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ } else {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vlan_filter_count(struct i40e_vsi *vsi)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ int count = 0;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ uint16_t vf_idx;
+ int ret = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ PMD_DRV_LOG(ERR, "No VF.");
+ return -EINVAL;
+ }
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ vsi = pf->vfs[vf_idx].vsi;
+ if (on) {
+ if (!vsi->vlan_filter_on) {
+ vsi->vlan_filter_on = true;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ false,
+ NULL);
+ if (!vsi->vlan_anti_spoof_on)
+ i40e_add_rm_all_vlan_filter(
+ vsi, true);
+ }
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ } else {
+ ret = i40e_vsi_delete_vlan(vsi, vlan_id);
+
+ if (!i40e_vlan_filter_count(vsi)) {
+ vsi->vlan_filter_on = false;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ true,
+ NULL);
+ }
+ }
+ }
+ }
+
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets = vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast;
+ stats->opackets = vsi->eth_stats.tx_unicast +
+ vsi->eth_stats.tx_multicast +
+ vsi->eth_stats.tx_broadcast;
+ stats->ibytes = vsi->eth_stats.rx_bytes;
+ stats->obytes = vsi->eth_stats.tx_bytes;
+ stats->ierrors = vsi->eth_stats.rx_discards;
+ stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->offset_loaded = false;
+ i40e_update_vsi_stats(vsi);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_limit) {
+ PMD_DRV_LOG(INFO,
+ "No change for VF max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, quit if TC bandwidth limitation is enabled.
+ *
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * check TC bandwidth limitation.
+ */
+ if (bw) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if ((vsi->enabled_tc & BIT_ULL(i)) &&
+ vsi->bw_info.bw_ets_credits[i])
+ break;
+ }
+ if (i != I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR,
+ "TC max bandwidth has been set on this VF,"
+ " please disable it first.");
+ return -EINVAL;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d bandwidth, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_limit = (uint16_t)bw;
+ vsi->bw_info.bw_max = 0;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+ uint8_t tc_num, uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
+ int ret = 0;
+ int i, j;
+ uint16_t sum;
+ bool b_change = false;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+ if (sum != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ sum);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < tc_num; i++) {
+ if (!bw_weight[i]) {
+ PMD_DRV_LOG(ERR,
+ "The weight should be 1 at least.");
+ return -EINVAL;
+ }
+ sum += bw_weight[i];
+ }
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ /**
+ * Create the configuration for all the TCs.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ if (bw_weight[j] !=
+ vsi->bw_info.bw_ets_share_credits[i])
+ b_change = true;
+
+ tc_bw.tc_bw_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ /* No change. */
+ if (!b_change) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC allocated bandwidth."
+ " Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC bandwidth weight, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+ uint8_t tc_no, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
+ PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
+ vf_id, tc_no);
+ return -EINVAL;
+ }
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, disable VF bandwidth limitation if it's
+ * enabled.
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * care about VF bandwidth limitation configuration.
+ */
+ if (bw && vsi->bw_info.bw_limit) {
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to disable VF(%d)"
+ " bandwidth limitation, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "VF max bandwidth is disabled according"
+ " to TC max bandwidth setting.");
+ }
+
+ /**
+ * Get all the TCs' info to create a whole picture.
+ * Because the incremental change isn't permitted.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ tc_bw.tc_bw_credits[i] =
+ rte_cpu_to_le_16(
+ vsi->bw_info.bw_ets_credits[i]);
+ }
+ }
+ tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC %d max bandwidth, err(%d).",
+ vf_id, tc_no, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ int i;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ vsi = pf->main_vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ veb = vsi->veb;
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Invalid VEB.");
+ return -EINVAL;
+ }
+
+ if ((tc_map & veb->enabled_tc) != tc_map) {
+ PMD_DRV_LOG(ERR,
+ "TC bitmap isn't the subset of enabled TCs 0x%x.",
+ veb->enabled_tc);
+ return -EINVAL;
+ }
+
+ if (tc_map == veb->strict_prio_tc) {
+ PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Disable DCBx if it's the first time to set strict priority. */
+ if (!veb->strict_prio_tc) {
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO,
+ "Failed to disable DCBx as it's already"
+ " disabled.");
+ else
+ PMD_DRV_LOG(INFO,
+ "DCBx is disabled according to strict"
+ " priority setting.");
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = veb->enabled_tc;
+ ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
+ ets_data.tc_strict_priority_flags = tc_map;
+ /* Get all TCs' bandwidth. */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (veb->enabled_tc & BIT_ULL(i)) {
+ /* For rubust, if bandwidth is 0, use 1 instead. */
+ if (veb->bw_info.bw_ets_share_credits[i])
+ ets_data.tc_bw_share_credits[i] =
+ veb->bw_info.bw_ets_share_credits[i];
+ else
+ ets_data.tc_bw_share_credits[i] =
+ I40E_QOS_BW_WEIGHT_MIN;
+ }
+ }
+
+ if (!veb->strict_prio_tc)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
+ NULL);
+ else if (tc_map)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
+ NULL);
+ else
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
+ NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set TCs' strict priority mode."
+ " err (%d)", ret);
+ return -EINVAL;
+ }
+
+ veb->strict_prio_tc = tc_map;
+
+ /* Enable DCBx again, if all the TCs' strict priority disabled. */
+ if (!tc_map) {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to enable DCBx, err(%d).", ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "DCBx is enabled again according to strict"
+ " priority setting.");
+ }
+
+ return ret;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+static void
+i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
+ uint32_t track_id, uint8_t *profile_info_sec,
+ bool add)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
+ memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
+ if (add)
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+}
+
+static enum i40e_status_code
+i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec;
+ uint32_t track_id;
+ uint32_t offset = 0;
+ uint32_t info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ track_id = ((struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset))->track_id;
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
+ "offset %d, info %d",
+ offset, info);
+
+ return status;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+/* Check if the profile info exists */
+static int
+i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t *buff;
+ struct rte_pmd_i40e_profile_list *p_list;
+ struct rte_pmd_i40e_profile_info *pinfo, *p;
+ uint32_t i;
+ int ret;
+
+ buff = rte_zmalloc("pinfo_list",
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0);
+ if (!buff) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return -1;
+ }
+
+ ret = i40e_aq_get_ddp_list(
+ hw, (void *)buff,
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get profile info list.");
+ rte_free(buff);
+ return -1;
+ }
+ p_list = (struct rte_pmd_i40e_profile_list *)buff;
+ pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+ sizeof(struct i40e_profile_section_header));
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((pinfo->track_id == p->track_id) &&
+ !memcmp(&pinfo->version, &p->version,
+ sizeof(struct i40e_ddp_version)) &&
+ !memcmp(&pinfo->name, &p->name,
+ I40E_DDP_NAME_SIZE)) {
+ PMD_DRV_LOG(INFO, "Profile exists.");
+ rte_free(buff);
+ return 1;
+ }
+ }
+
+ rte_free(buff);
+ return 0;
+}
+
+int
+rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *profile_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+ uint32_t track_id;
+ uint8_t *profile_info_sec;
+ int is_exist;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)buff;
+
+ if (!pkg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to fill the package structure");
+ return -EINVAL;
+ }
+
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+ track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ /* Find profile segment */
+ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
+ pkg_hdr);
+ if (!profile_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find profile segment header");
+ return -EINVAL;
+ }
+
+ profile_info_sec = rte_zmalloc(
+ "i40e_profile_info",
+ sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info),
+ 0);
+ if (!profile_info_sec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -EINVAL;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ /* Check if the profile exists */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec, 1);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist > 0) {
+ PMD_DRV_LOG(ERR, "Profile already exists.");
+ rte_free(profile_info_sec);
+ return 1;
+ } else if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
+ /* Write profile to HW */
+ status = i40e_write_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+
+ /* Add profile info to info list */
+ status = i40e_add_rm_profile_info(hw, profile_info_sec);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add profile info.");
+ } else {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ }
+
+ rte_free(profile_info_sec);
+ return status;
+}
+
+int
+rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ status = i40e_aq_get_ddp_list(hw, (void *)buff,
+ size, 0, NULL);
+
+ return status;
+}
+
+static int check_invalid_pkt_type(uint32_t pkt_type)
+{
+ uint32_t l2, l3, l4, tnl, il2, il3, il4;
+
+ l2 = pkt_type & RTE_PTYPE_L2_MASK;
+ l3 = pkt_type & RTE_PTYPE_L3_MASK;
+ l4 = pkt_type & RTE_PTYPE_L4_MASK;
+ tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
+ il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
+ il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
+ il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
+
+ if (l2 &&
+ l2 != RTE_PTYPE_L2_ETHER &&
+ l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
+ l2 != RTE_PTYPE_L2_ETHER_ARP &&
+ l2 != RTE_PTYPE_L2_ETHER_LLDP &&
+ l2 != RTE_PTYPE_L2_ETHER_NSH &&
+ l2 != RTE_PTYPE_L2_ETHER_VLAN &&
+ l2 != RTE_PTYPE_L2_ETHER_QINQ)
+ return -1;
+
+ if (l3 &&
+ l3 != RTE_PTYPE_L3_IPV4 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (l4 &&
+ l4 != RTE_PTYPE_L4_TCP &&
+ l4 != RTE_PTYPE_L4_UDP &&
+ l4 != RTE_PTYPE_L4_FRAG &&
+ l4 != RTE_PTYPE_L4_SCTP &&
+ l4 != RTE_PTYPE_L4_ICMP &&
+ l4 != RTE_PTYPE_L4_NONFRAG)
+ return -1;
+
+ if (tnl &&
+ tnl != RTE_PTYPE_TUNNEL_IP &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_VXLAN &&
+ tnl != RTE_PTYPE_TUNNEL_NVGRE &&
+ tnl != RTE_PTYPE_TUNNEL_GENEVE &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT)
+ return -1;
+
+ if (il2 &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
+ return -1;
+
+ if (il3 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (il4 &&
+ il4 != RTE_PTYPE_INNER_L4_TCP &&
+ il4 != RTE_PTYPE_INNER_L4_UDP &&
+ il4 != RTE_PTYPE_INNER_L4_FRAG &&
+ il4 != RTE_PTYPE_INNER_L4_SCTP &&
+ il4 != RTE_PTYPE_INNER_L4_ICMP &&
+ il4 != RTE_PTYPE_INNER_L4_NONFRAG)
+ return -1;
+
+ return 0;
+}
+
+static int check_invalid_ptype_mapping(
+ struct rte_pmd_i40e_ptype_mapping *mapping_table,
+ uint16_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uint16_t ptype = mapping_table[i].hw_ptype;
+ uint32_t pkt_type = mapping_table[i].sw_ptype;
+
+ if (ptype >= I40E_MAX_PKT_TYPE)
+ return -1;
+
+ if (pkt_type == RTE_PTYPE_UNKNOWN)
+ continue;
+
+ if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
+ continue;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_MAX_PKT_TYPE)
+ return -EINVAL;
+
+ if (check_invalid_ptype_mapping(mapping_items, count))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
+ }
+
+ for (i = 0; i < count; i++)
+ ad->ptype_tbl[mapping_items[i].hw_ptype]
+ = mapping_items[i].sw_ptype;
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_ptype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int n = 0;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (n >= size)
+ break;
+ if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
+ continue;
+ mapping_items[n].hw_ptype = i;
+ mapping_items[n].sw_ptype = ad->ptype_tbl[i];
+ n++;
+ }
+
+ *count = n;
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!mask && check_invalid_pkt_type(target))
+ return -EINVAL;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (mask) {
+ if ((target | ad->ptype_tbl[i]) == target &&
+ (target & ad->ptype_tbl[i]))
+ ad->ptype_tbl[i] = pkt_type;
+ } else {
+ if (ad->ptype_tbl[i] == target)
+ ad->ptype_tbl[i] = pkt_type;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
new file mode 100644
index 00000000..1efb2c4b
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -0,0 +1,590 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_I40E_H_
+#define _PMD_I40E_H_
+
+/**
+ * @file rte_pmd_i40e.h
+ *
+ * i40e PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev.h>
+
+/**
+ * Response sent back to i40e driver from user app after callback
+ */
+enum rte_pmd_i40e_mb_event_rsp {
+ RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_i40e_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+ uint16_t msglen; /**< length of the message */
+};
+
+/**
+ * Option of package processing.
+ */
+enum rte_pmd_i40e_package_op {
+ RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_MAX = 32
+};
+
+#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+
+/**
+ * Version for dynamic device personalization.
+ * Version in "major.minor.update.draft" format.
+ */
+struct rte_pmd_i40e_ddp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t update;
+ uint8_t draft;
+};
+
+/**
+ * Profile information in profile info list.
+ */
+struct rte_pmd_i40e_profile_info {
+ uint32_t track_id;
+ struct rte_pmd_i40e_ddp_version version;
+ uint8_t owner;
+ uint8_t reserved[7];
+ uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+/**
+ * Profile information list returned from HW.
+ */
+struct rte_pmd_i40e_profile_list {
+ uint32_t p_count;
+ struct rte_pmd_i40e_profile_info p_info[1];
+};
+
+/**
+ * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype.
+ * A ptype with MSB set will be regarded as a user defined ptype.
+ * Below macro help to create a user defined ptype.
+ */
+#define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000
+
+struct rte_pmd_i40e_ptype_mapping {
+ uint16_t hw_ptype; /**< hardware defined packet type*/
+ uint32_t sw_ptype; /**< software defined packet type */
+};
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable TX loopback on all the PF and VFs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF unicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF multicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * PF should set MAC address before VF initialized, if PF sets the MAC
+ * address after VF initialized, new MAC address won't be effective until
+ * VF reinitialize.
+ *
+ * This will remove all existing MAC filters.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable vf broadcast mode
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable broadcast.
+ * 1 - Enable broadcast.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable vf vlan tag
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable VF's vlan tag.
+ * n - Enable VF's vlan tag.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN filter
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * ID specifying VLAN
+ * @param vf_mask
+ * Mask to filter VF's
+ * @param on
+ * 0 - Disable VF's VLAN filter.
+ * 1 - Enable VF's VLAN filter.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+/**
+ * Set VF's max bandwidth.
+ *
+ * Per VF bandwidth limitation and per TC bandwidth limitation cannot
+ * be enabled in parallel. If per TC bandwidth is enabled, this function
+ * will disable it.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param bw
+ * Bandwidth for this VF.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint32_t bw);
+
+/**
+ * Set all the TCs' bandwidth weight on a specific VF.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
+ * Set a specific TC's max bandwidth on a specific VF.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_no
+ * Number specifying TC.
+ * @param bw
+ * Max bandwidth for this TC.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_no,
+ uint32_t bw);
+
+/**
+ * Set some TCs to strict priority mode on a physical port.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_map
+ * A bit map for the TCs.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
+
+/**
+ * Load/Unload a ddp package
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param buff
+ * buffer of package.
+ * @param size
+ * size of buffer.
+ * @param op
+ * Operation of package processing
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (1) if profile exists.
+ */
+int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op);
+
+/**
+ * rte_pmd_i40e_get_ddp_list - Get loaded profile list
+ * @param port
+ * port id
+ * @param buff
+ * buffer for response
+ * @param size
+ * buffer size
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
+
+/**
+ * Update hardware defined ptype to software defined packet type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different ptype mapping update method.
+ * -(0) only overwrite referred PTYPE mapping,
+ * keep other PTYPEs mapping unchanged.
+ * -(!0) overwrite referred PTYPE mapping,
+ * set other PTYPEs maps to PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Reset hardware defined ptype to software defined ptype
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
+
+/**
+ * Get hardware defined ptype to software defined ptype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * @param size
+ * the size of the input array.
+ * @param count
+ * the place to store the number of returned items.
+ * @param valid_only
+ * -(0) return full mapping table.
+ * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only);
+
+/**
+ * Replace a specific or a group of software defined ptypes
+ * with a new one
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param target
+ * the packet type to be replaced
+ * @param mask
+ * -(0) target represent a specific software defined ptype.
+ * -(!0) target is a mask to represent a group of software defined ptypes.
+ * @param pkt_type
+ * the new packet type to overwrite
+ */
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type);
+
+#endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index ef353984..3b0e805d 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -2,3 +2,39 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_i40e_get_vf_stats;
+ rte_pmd_i40e_ping_vfs;
+ rte_pmd_i40e_ptype_mapping_get;
+ rte_pmd_i40e_ptype_mapping_replace;
+ rte_pmd_i40e_ptype_mapping_reset;
+ rte_pmd_i40e_ptype_mapping_update;
+ rte_pmd_i40e_reset_vf_stats;
+ rte_pmd_i40e_set_tx_loopback;
+ rte_pmd_i40e_set_vf_broadcast;
+ rte_pmd_i40e_set_vf_mac_addr;
+ rte_pmd_i40e_set_vf_mac_anti_spoof;
+ rte_pmd_i40e_set_vf_multicast_promisc;
+ rte_pmd_i40e_set_vf_unicast_promisc;
+ rte_pmd_i40e_set_vf_vlan_anti_spoof;
+ rte_pmd_i40e_set_vf_vlan_filter;
+ rte_pmd_i40e_set_vf_vlan_insert;
+ rte_pmd_i40e_set_vf_vlan_stripq;
+ rte_pmd_i40e_set_vf_vlan_tag;
+
+} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_i40e_set_tc_strict_prio;
+ rte_pmd_i40e_set_vf_max_bw;
+ rte_pmd_i40e_set_vf_tc_bw_alloc;
+ rte_pmd_i40e_set_vf_tc_max_bw;
+ rte_pmd_i40e_process_ddp_package;
+ rte_pmd_i40e_get_ddp_list;
+
+} DPDK_17.02;
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 94ddc7b8..5529d81c 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -76,6 +76,9 @@ endif
ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
@@ -100,6 +103,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_hv_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
@@ -108,6 +112,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_flow.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
else
@@ -118,13 +123,9 @@ ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 6b54c31e..a61617be 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,8 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.08.15 released by ND. The sub-directory of base/
+cid-10g-shared-code.2017.03.29 released by the team which develop
+basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index 724dcbbc..d64abb2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -1222,9 +1222,9 @@ STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 0326e70b..20aab9fc 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -45,7 +45,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 832242ee..d9d11a8e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -2169,9 +2169,9 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h
index c034d3d9..d555dbce 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.h
+++ b/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -57,7 +57,7 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 094ee526..4117fb01 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -205,6 +205,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
hw->mac.type = ixgbe_mac_X550EM_x;
hw->mvals = ixgbe_mvals_X550EM_x;
break;
@@ -1147,12 +1148,15 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* @min: driver minor number to be sent to firmware
* @build: driver build number to be sent to firmware
* @ver: driver version number to be sent to firmware
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
**/
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver)
+ u8 ver, u16 len, char *driver_ver)
{
return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
- build, ver), IXGBE_NOT_IMPLEMENTED);
+ build, ver, len, driver_ver),
+ IXGBE_NOT_IMPLEMENTED);
}
@@ -1575,7 +1579,7 @@ s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
(hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 24c4ae8d..2f532aa8 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -133,7 +133,7 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver);
+ u8 ver, u16 len, char *driver_ver);
s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@@ -143,7 +143,7 @@ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index cca19efc..4dabb434 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -113,6 +113,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.led_off = ixgbe_led_off_generic;
mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
+ mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_rar = ixgbe_set_rar_generic;
@@ -188,7 +189,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
break;
case ixgbe_media_type_backplane:
- supported = true;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
+ supported = false;
+ else
+ supported = true;
break;
case ixgbe_media_type_copper:
/* only some copper devices support flow control autoneg */
@@ -409,8 +413,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED)
+ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
+ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
return ret_val;
+ }
/* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
@@ -492,11 +498,17 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
- if (status == IXGBE_SUCCESS) {
+ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
/* Start the HW */
status = hw->mac.ops.start_hw(hw);
}
+ /* Initialize the LED link active for LED blink support */
+ hw->mac.ops.init_led_link_act(hw);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
+
return status;
}
@@ -1136,6 +1148,47 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
+ * @hw: pointer to hardware structure
+ *
+ * Store the index for the link active LED. This will be used to support
+ * blinking the LED.
+ **/
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 led_reg, led_mode;
+ u8 i;
+
+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* Get LED link active from the LEDCTL register */
+ for (i = 0; i < 4; i++) {
+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
+
+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
+ IXGBE_LED_LINK_ACTIVE) {
+ mac->led_link_act = i;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ /*
+ * If LEDCTL register does not have the LED link active set, then use
+ * known MAC defaults.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
+ mac->led_link_act = 1;
+ break;
+ default:
+ mac->led_link_act = 2;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn on
@@ -3764,7 +3817,8 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
done:
return IXGBE_SUCCESS;
@@ -4184,7 +4238,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -4595,13 +4649,15 @@ rel_out:
* semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
int i;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+ UNREFERENCED_2PARAMETER(len, driver_ver);
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index 66dd5659..903f34d5 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -72,6 +72,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
@@ -155,12 +156,14 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
int strategy);
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
+ u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout);
-
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
new file mode 100644
index 00000000..47143a26
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -0,0 +1,240 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_vf.h"
+#include "ixgbe_hv_vf.h"
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ UNREFERENCED_2PARAMETER(hw, xcast_mode);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ UNREFERENCED_3PARAMETER(hw, index, addr);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind)
+{
+ UNREFERENCED_5PARAMETER(hw, index, addr, vlan, vind);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ DELAY(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Reserved for pre-x550 devices */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ UNREFERENCED_1PARAMETER(hw);
+
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* Set defaults for VF then override applicable Hyper-V
+ * specific functions
+ */
+ ixgbe_init_ops_vf(hw);
+
+ hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf;
+ hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf;
+ hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf;
+ hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf;
+ hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode;
+ hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf;
+ hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf;
+
+ return IXGBE_SUCCESS;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
new file mode 100644
index 00000000..9119f29f
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2016, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_HV_VF_H_
+#define _IXGBE_HV_VF_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_HV_VF_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index 7556a818..bde50a51 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -114,6 +114,14 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
+};
+
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 77f0af51..4aab278d 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_byteorder.h>
+#include <rte_io.h>
#include "../ixgbe_logs.h"
#include "../ixgbe_bypass_defines.h"
@@ -81,6 +82,7 @@
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
/* Shared code error reporting */
enum {
@@ -95,8 +97,9 @@ enum {
#define STATIC static
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE16(_i) rte_cpu_to_le_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
-#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
@@ -121,16 +124,18 @@ typedef int bool;
#define prefetch(x) rte_prefetch0(x)
-#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define IXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint32_t ixgbe_read_addr(volatile void* addr)
{
return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
}
-#define IXGBE_PCI_REG_WRITE(reg, value) do { \
- IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while(0)
+#define IXGBE_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 43c55d74..62c3080a 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -121,8 +121,6 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u8 reg_high;
u8 csum;
- if (hw->mac.type >= ixgbe_mac_X550)
- max_retry = 3;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
@@ -293,8 +291,11 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
{
u16 ext_ability = 0;
- if (!ixgbe_validate_phy_addr(hw, phy_addr))
+ if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
return false;
+ }
if (ixgbe_get_phy_id(hw))
return false;
@@ -413,6 +414,8 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = true;
+ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
+
return valid;
}
@@ -441,6 +444,9 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
+ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
return status;
}
@@ -459,7 +465,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID1:
case X550_PHY_ID2:
case X550_PHY_ID3:
case X540_PHY_ID:
@@ -477,7 +482,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
break;
case IXGBE_M88E1500_E_PHY_ID:
case IXGBE_M88E1543_E_PHY_ID:
- phy_type = ixgbe_phy_m88;
+ phy_type = ixgbe_phy_ext_1g_t;
break;
default:
phy_type = ixgbe_phy_unknown;
@@ -528,11 +533,30 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
- usec_delay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ usec_delay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
}
}
@@ -554,7 +578,7 @@ out:
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+ u16 *phy_data)
{
u32 i, data, command;
@@ -576,12 +600,13 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
+ break;
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -611,6 +636,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -768,91 +794,63 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
- if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
- /* Set or unset auto-negotiation 5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_5GB_FULL)
- autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
- /* Set or unset auto-negotiation 2.5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_2_5GB_FULL)
- autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
}
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
- autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
- IXGBE_MII_100BASE_T_ADVERTISE_HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
/* Blocked by MNG FW so don't reset PHY */
if (ixgbe_check_reset_blocked(hw))
@@ -1542,16 +1540,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
@@ -1583,9 +1575,9 @@ err_read_i2c_eeprom:
*
* Determines physical layer capabilities of the current SFP.
*/
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u8 comp_codes_10g = 0;
u8 comp_codes_1g = 0;
@@ -1804,16 +1796,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("QSFP module not supported\n");
@@ -1838,7 +1824,6 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
-
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index da14abcd..cf8cadd9 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -92,8 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -154,73 +155,6 @@ POSSIBILITY OF SUCH DAMAGE.
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-/* More phy definitions */
-#define IXGBE_M88E1500_COPPER_CTRL 0 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_COPPER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN (1u << 9)
-#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_COPPER_STATUS 1 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_STATUS_AN_DONE (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN 4 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_PAUSE (1u << 10)
-#define IXGBE_M88E1500_COPPER_AN_T4 (1u << 9)
-#define IXGBE_M88E1500_COPPER_AN_100TX_FD (1u << 8)
-#define IXGBE_M88E1500_COPPER_AN_100TX_HD (1u << 7)
-#define IXGBE_M88E1500_COPPER_AN_10TX_FD (1u << 6)
-#define IXGBE_M88E1500_COPPER_AN_10TX_HD (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN_LP_ABILITY 5 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_LP_PAUSE (1u << 10)
-#define IXGBE_M88E1500_1000T_CTRL 9 /* Page 0 reg */
-/* 1=Configure PHY as Master 0=Configure PHY as Slave */
-#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE (1u << 11)
-#define IXGBE_M88E1500_1000T_CTRL_1G_FD (1u << 9)
-/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
-#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE (1u << 12)
-#define IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX (1u << 9)
-#define IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_1000T_STATUS 10 /* Page 0 reg */
-#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
-#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
-#define IXGBE_M88E1500_STATUS_LINK (1u << 2) /* Interface Link Bit */
-#define IXGBE_M88E1500_MAC_CTRL_1 16 /* Page 0 reg */
-#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT 12
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_4X 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT 8
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_TM 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT 5
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN (1u << 2)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS 17 /* Page 0 reg */
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_SHIFT 14
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_MASK 3u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_10 0u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_100 1u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_1000 2u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_DUPLEX (1u << 13)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_RESOLVED (1u << 11)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_LINK (1u << 10)
-#define IXGBE_M88E1500_PAGE_ADDR 22 /* All pages reg */
-#define IXGBE_M88E1500_FIBER_CTRL 0 /* Page 1 reg */
-#define IXGBE_M88E1500_FIBER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB (1u << 13)
-#define IXGBE_M88E1500_FIBER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL (1u << 8)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_MAC_SPEC_CTRL 16 /* Page 2 reg */
-#define IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN (1u << 3)
-#define IXGBE_M88E1500_EEE_CTRL_1 0 /* Page 18 reg */
-#define IXGBE_M88E1500_EEE_CTRL_1_MS (1u << 0) /* EEE Master/Slave */
-#define IXGBE_M88E1500_GEN_CTRL 20 /* Page 18 reg */
-#define IXGBE_M88E1500_GEN_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER 1u /* Mode bits 0-2 */
-
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -258,7 +192,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4982e035..bda85589 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -146,6 +146,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
@@ -1045,7 +1046,7 @@ struct ixgbe_dmac_config {
#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
+#define IXGBE_LSWFW 0x15F14
#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
#define IXGBE_BMCIPVAL 0x05060
#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
@@ -1647,7 +1648,6 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID1 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
@@ -2622,6 +2622,7 @@ enum {
#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */
#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -3037,6 +3038,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -3062,6 +3064,59 @@ enum ixgbe_fdir_pballoc_type {
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0)
+#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1)
+#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3)
+#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4)
+#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5)
+#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6)
+#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7)
+#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8)
+#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9)
+#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18)
+#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19)
+#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20)
+#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
@@ -3111,6 +3166,16 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -3159,6 +3224,19 @@ struct ixgbe_hic_internal_phy_resp {
__be32 read_data;
};
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
#ifdef C99
#pragma pack(pop)
#else
@@ -3332,23 +3410,25 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
+typedef u64 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3567,7 +3647,9 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3586,7 +3668,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
ixgbe_phy_sgmii,
- ixgbe_phy_m88,
+ ixgbe_phy_fw,
ixgbe_phy_generic
};
@@ -3643,14 +3725,6 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
-/* Master/slave control */
-enum ixgbe_ms_type {
- ixgbe_ms_hw_default = 0,
- ixgbe_ms_force_master,
- ixgbe_ms_force_slave,
- ixgbe_ms_auto
-};
-
/* Smart Speed Settings */
#define IXGBE_SMARTSPEED_MAX_RETRIES 3
enum ixgbe_smart_speed {
@@ -3833,7 +3907,7 @@ struct ixgbe_mac_operations {
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ u64 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
@@ -3875,6 +3949,7 @@ struct ixgbe_mac_operations {
s32 (*led_off)(struct ixgbe_hw *, u32);
s32 (*blink_led_start)(struct ixgbe_hw *, u32);
s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
@@ -3907,7 +3982,8 @@ struct ixgbe_mac_operations {
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
@@ -4017,6 +4093,7 @@ struct ixgbe_mac_info {
struct ixgbe_dmac_config dmac_config;
bool set_lben;
u32 max_link_up_time;
+ u8 led_link_act;
};
struct ixgbe_phy_info {
@@ -4032,8 +4109,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
- enum ixgbe_ms_type ms_type;
- enum ixgbe_ms_type original_ms_type;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -4257,8 +4334,8 @@ struct ixgbe_hw {
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
-#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25)
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
(0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index e9c13f23..b513190a 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -432,6 +432,10 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
switch (hw->api_version) {
case ixgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
case ixgbe_mbox_api_13:
break;
default:
@@ -613,13 +617,29 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
}
/* if the read failed it could just be a mailbox collision, best wait
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index d288f31a..3efffe82 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -34,6 +34,8 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_VF_H_
#define _IXGBE_VF_H_
+#include "ixgbe_type.h"
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -114,6 +116,7 @@ struct ixgbevf_hw_stats {
u64 saved_reset_vfmprc;
};
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 6e778bc9..0e51813b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -208,6 +208,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X540");
@@ -220,10 +221,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
@@ -321,9 +329,9 @@ out:
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
@@ -491,7 +499,6 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
- u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/* Do not use hw->eeprom.ops.read because we do not want to take
@@ -501,14 +508,15 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i <= checksum_last_word; i++) {
+ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
+ * checksum itself
+ */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
- if (i != IXGBE_EEPROM_CHECKSUM)
- checksum += word;
+ checksum += word;
}
/* Include all data from pointers 0x3, 0x6-0xE. This excludes the
@@ -775,8 +783,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -798,6 +808,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"Failed to get SW only semaphore");
+ DEBUGOUT("Failed to get SW only semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
@@ -806,8 +817,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -829,9 +842,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index e4baf6ff..8a19ae2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -43,7 +43,7 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index acb8140c..674dc144 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -62,7 +62,7 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.dmac_config = ixgbe_dmac_config_X550;
mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
- mac->ops.setup_eee = ixgbe_setup_eee_X550;
+ mac->ops.setup_eee = NULL;
mac->ops.set_source_address_pruning =
ixgbe_set_source_address_pruning_X550;
mac->ops.set_ethertype_anti_spoofing =
@@ -83,6 +83,8 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.mdd_event = ixgbe_mdd_event_X550;
mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
mac->ops.disable_rx = ixgbe_disable_rx_x550;
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -448,23 +450,161 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u16 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc != IXGBE_SUCCESS)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
+ return IXGBE_SUCCESS;
+ }
+ usec_delay(20);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
@@ -601,18 +741,20 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_X550em;
switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_fw;
+ phy->ops.set_phy_power = NULL;
+ phy->ops.get_firmware_version = NULL;
break;
default:
- mac->ops.setup_eee = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
}
- /* PHY */
- phy->ops.init = ixgbe_init_phy_ops_X550em;
- phy->ops.identify = ixgbe_identify_phy_x550em;
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
phy->ops.set_phy_power = NULL;
@@ -631,6 +773,92 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_setup_eee_fw - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * This function controls EEE for firmware-based PHY implementations.
+ */
+static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
+{
+ if (!!hw->phy.eee_speeds_advertised == enable_eee)
+ return IXGBE_SUCCESS;
+ if (enable_eee)
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ else
+ hw->phy.eee_speeds_advertised = 0;
+ return hw->phy.ops.setup_link(hw);
+}
+
+/**
* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
@@ -671,10 +899,18 @@ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
break;
}
- if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
- (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
- mac->ops.setup_fc = ixgbe_setup_fc_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_eee = ixgbe_setup_eee_fw;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ break;
+ default:
+ break;
}
return ret_val;
@@ -709,6 +945,7 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
+
return ret_val;
}
@@ -876,159 +1113,6 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enable_eee_x550 - Enable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- /* Don't advertise FEC capability when EEE enabled. */
- link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_disable_eee_x550 - Disable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- /* Advertise FEC capability when EEE is disabled. */
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_setup_eee_X550 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- * @enable_eee: boolean flag to enable EEE
- *
- * Enable/disable EEE based on enable_eee flag.
- * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- * are modified.
- *
- **/
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
-{
- s32 status;
- u32 eeer;
-
- DEBUGFUNC("ixgbe_setup_eee_X550");
-
- eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
- /* Enable or disable EEE per flag */
- if (enable_eee) {
- eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- /* Not supported on first revision of X550EM_x. */
- if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
- !(IXGBE_FUSES0_REV_MASK &
- IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
- return IXGBE_SUCCESS;
-
- status = ixgbe_enable_eee_x550(hw);
- if (status)
- return status;
- } else {
- eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- status = ixgbe_disable_eee_x550(hw);
- if (status)
- return status;
- }
- IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
@@ -1227,13 +1311,20 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
sizeof(token_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
- if (status)
+ if (status) {
+ DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
+ status);
return status;
+ }
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return IXGBE_SUCCESS;
- if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
+ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
+ token_cmd.hdr.cmd_or_resp.ret_status);
return IXGBE_ERR_FW_RESP_INVALID;
+ }
+ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
return IXGBE_ERR_TOKEN_RETRY;
}
@@ -1492,6 +1583,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@@ -1722,11 +1814,11 @@ STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
- * ixgbe_setup_sgmii_m88 - Set up link for sgmii with Marvell PHYs
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg_wait)
+STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
@@ -1776,7 +1868,7 @@ STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
return rc;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
- flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
@@ -1826,7 +1918,9 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
- mac->ops.setup_link = ixgbe_setup_sgmii_m88;
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link =
+ ixgbe_check_mac_link_generic;
} else {
mac->ops.setup_link =
ixgbe_setup_mac_link_t_X550em;
@@ -1859,6 +1953,12 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -1882,11 +1982,7 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
switch (hw->phy.type) {
- case ixgbe_phy_m88:
- *speed = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
- break;
+ case ixgbe_phy_ext_1g_t:
case ixgbe_phy_sgmii:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
@@ -2024,19 +2120,32 @@ STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
/* Enable link status change alarm */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- if (status != IXGBE_SUCCESS)
- return status;
+ /* Enable the LASI interrupts on X552 devices to receive notifications
+ * of the link configurations of the external PHY and correspondingly
+ * support the configuration of the internal iXFI link, since iXFI does
+ * not support auto-negotiation. This is not required for X553 devices
+ * having KR support, which performs auto-negotiations and which is used
+ * as the internal link to the external PHY. Hence adding a check here
+ * to avoid enabling LASI interrupts for X553 devices.
+ */
+ if (hw->mac.type != ixgbe_mac_X550EM_a) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+ if (status != IXGBE_SUCCESS)
+ return status;
- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- if (status != IXGBE_SUCCESS)
- return status;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* Enable high temperature failure and global fault alarms */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
@@ -2150,262 +2259,47 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_setup_m88 - setup m88 PHY
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return IXGBE_SUCCESS;
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
if (rc)
return rc;
+ memset(store, 0, sizeof(store));
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0,
- reg);
- }
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 2);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- reg);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- usec_delay(50);
- } else {
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
-
- if (!(reg & IXGBE_M88E1500_COPPER_CTRL_AN_EN)) {
- reg |= IXGBE_M88E1500_COPPER_CTRL_AN_EN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg |= IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_COPPER_AN_T4;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_HD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_HD;
-
- /* Flow control auto negotiation configuration was moved from here to
- * the function ixgbe_setup_fc_sgmii_x550em_a()
- */
-
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_100TX_FD;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_10TX_FD;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
-
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-}
-
-/**
- * ixgbe_reset_phy_m88e1500 - Reset m88e1500 PHY
- * @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
- */
-static s32 ixgbe_reset_phy_m88e1500(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
if (rc)
return rc;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- return rc;
-
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
- usec_delay(10);
-
- return rc;
+ return ixgbe_setup_fw_link(hw);
}
/**
- * ixgbe_reset_phy_m88e1543 - Reset m88e1543 PHY
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
*/
-static s32 ixgbe_reset_phy_m88e1543(struct ixgbe_hw *hw)
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
- return hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
-}
-
-/**
- * ixgbe_reset_phy_m88 - Reset m88 PHY
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
-{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
- if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
- return IXGBE_SUCCESS;
-
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
return rc;
- switch (hw->phy.id) {
- case IXGBE_M88E1500_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1500(hw);
- break;
- case IXGBE_M88E1543_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1543(hw);
- break;
- default:
- rc = IXGBE_ERR_PHY;
- break;
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
}
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_GEN_CTRL_RESET |
- IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_GEN_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_AN_EN |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- if (rc)
- goto out;
-
- reg = (IXGBE_M88E1500_MAC_CTRL_1_DWN_4X <<
- IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_ED_TM <<
- IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO <<
- IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, reg);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_COPPER_CTRL_RESET |
- IXGBE_M88E1500_COPPER_CTRL_AN_EN |
- IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
- IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
- IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- hw->mac.ops.release_swfw_sync(hw, mask);
-
- /* In case of first reset set advertised speeds to default value */
- if (!hw->phy.autoneg_advertised)
- hw->phy.autoneg_advertised = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
-
- return ixgbe_setup_m88(hw);
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
+ return IXGBE_SUCCESS;
}
/**
@@ -2450,6 +2344,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+ hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
@@ -2463,6 +2360,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -2482,13 +2380,18 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ break;
default:
break;
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -2508,6 +2411,16 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_ext_1g_t:
+ /* link is managed by FW */
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
@@ -2527,9 +2440,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_sgmii:
phy->ops.setup_link = NULL;
break;
- case ixgbe_phy_m88:
- phy->ops.setup_link = ixgbe_setup_m88;
- phy->ops.reset = ixgbe_reset_phy_m88;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
break;
default:
break;
@@ -2549,8 +2462,6 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_SGMII:
case IXGBE_DEV_ID_X550EM_A_SGMII_L:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_QSFP:
@@ -2559,6 +2470,13 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
default:
break;
}
@@ -2579,14 +2497,16 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
u32 ctrl = 0;
u32 i;
bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X550em");
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
- if (status != IXGBE_SUCCESS)
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
return status;
-
+ }
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
@@ -2595,14 +2515,23 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ DEBUGOUT("Returning from reset HW due to PHY init failure\n");
return status;
+ }
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
status = ixgbe_init_ext_t_x550em(hw);
- if (status)
+ if (status) {
+ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
+ status);
return status;
+ }
}
/* Setup SFP module if there is one present. */
@@ -2615,8 +2544,10 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
return status;
/* Reset PHY */
- if (!hw->phy.reset_disable && hw->phy.ops.reset)
- hw->phy.ops.reset(hw);
+ if (!hw->phy.reset_disable && hw->phy.ops.reset) {
+ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
+ return IXGBE_ERR_OVERTEMP;
+ }
mac_reset_top:
/* Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -2631,9 +2562,17 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear meaning reset is complete */
for (i = 0; i < 10; i++) {
@@ -2674,6 +2613,9 @@ mac_reset_top:
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
+
return status;
}
@@ -2723,14 +2665,16 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
**/
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- if (hw->mac.type != ixgbe_mac_X550EM_x)
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return IXGBE_SUCCESS;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -2762,53 +2706,18 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- /* Configure CS4227 LINE side to 10G SR. */
- reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = IXGBE_CS4227_SPEED_10G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Configure CS4227 for HOST connection rate then type. */
- reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Setup XFI internal link. */
- ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
- } else {
- /* Configure internal PHY for KR/KX. */
- ixgbe_setup_kr_speed_x550em(hw, speed);
-
- /* Configure CS4227 LINE side to proper mode. */
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
- }
+ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
return ret_val;
}
@@ -2922,8 +2831,8 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
return IXGBE_ERR_PHY_ADDR_INVALID;
}
- /* Get external PHY device id */
- ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
if (ret_val != IXGBE_SUCCESS)
@@ -2932,7 +2841,7 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
*/
- if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
slice_offset = (hw->bus.lan_id +
(hw->bus.instance_id << 1)) << 12;
else
@@ -2940,12 +2849,26 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* Configure CS4227/CS4223 LINE side to proper mode. */
reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+
+ /* Flush previous write with a read */
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
}
return ret_val;
}
@@ -3033,6 +2956,10 @@ STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
s32 status;
u32 reg_val;
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+ return IXGBE_ERR_LINK_SETUP;
+
/* Disable AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -3129,7 +3056,8 @@ s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
/* If link is down, there is no setup necessary so return */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status != IXGBE_SUCCESS)
@@ -3745,9 +3673,9 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
@@ -3756,6 +3684,21 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ }
+ }
+ /* fall through */
+ case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
break;
@@ -3772,6 +3715,20 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_fw:
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+ break;
+ case ixgbe_phy_sgmii:
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
default:
break;
}
@@ -4071,6 +4028,9 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->fc.disable_fc_autoneg = true;
+ break;
default:
break;
}
@@ -4177,7 +4137,7 @@ void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
- u16 reg, pcs_an_lp, pcs_an;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
bool link_up;
@@ -4199,34 +4159,20 @@ void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
}
/* Check if auto-negotiation has completed */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_STATUS,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status != IXGBE_SUCCESS ||
- (reg & IXGBE_M88E1500_COPPER_STATUS_AN_DONE) == 0) {
+ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
DEBUGOUT("Auto-Negotiation did not complete\n");
status = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
- /* Get the advertized flow control */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an);
- if (status != IXGBE_SUCCESS)
- goto out;
-
- /* Get link partner's flow control */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_M88E1500_COPPER_AN_LP_ABILITY,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an_lp);
- if (status != IXGBE_SUCCESS)
- goto out;
-
/* Negotiate the flow control */
- status = ixgbe_negotiate_fc(hw, (u32)pcs_an, (u32)pcs_an_lp,
- IXGBE_M88E1500_COPPER_AN_PAUSE,
- IXGBE_M88E1500_COPPER_AN_AS_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE);
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
out:
if (status == IXGBE_SUCCESS) {
@@ -4238,83 +4184,6 @@ out:
}
/**
- * ixgbe_setup_fc_sgmii_x550em_a - Set up flow control
- * @hw: pointer to hardware structure
- *
- * Called at init time to set up flow control.
- **/
-s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- /* Validate the requested mode */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
-
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /* Read contents of the Auto-Negotiation register, page 0 reg 4 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Disable all the settings related to Flow control Auto-negotiation */
- reg &= ~IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- reg &= ~IXGBE_M88E1500_COPPER_AN_PAUSE;
-
- /* Configure the Asymmetric and symmetric pause according to the user
- * requested mode.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_full:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_rx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_tx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- default:
- break;
- }
-
- /* Write back to the Auto-Negotiation register with newly configured
- * fields
- */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
- /* In this section of the code we restart Auto-negotiation */
-
- /* Read the CONTROL register, Page 0 reg 0 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Set the bit to restart Auto-Neg. The bit to enable Auto-neg is ON
- * by default
- */
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
-
- /* write the new values to the register to restart Auto-Negotiation */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
-out:
- return rc;
-}
-
-/**
* ixgbe_setup_fc_backplane_x550em_a - Set up flow control
* @hw: pointer to hardware structure
*
@@ -4481,21 +4350,34 @@ STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
status = IXGBE_SUCCESS;
if (hmask)
status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
- if (status)
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
+ status);
return status;
+ }
if (!(mask & IXGBE_GSSR_TOKEN_SM))
return IXGBE_SUCCESS;
status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
+
if (status == IXGBE_SUCCESS)
return IXGBE_SUCCESS;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
+ status);
return status;
+ }
}
+ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
return status;
}
@@ -4631,8 +4513,10 @@ s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
else
force_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If internal link mode is XFI, then setup XFI internal link. */
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If X552 and internal link mode is XFI, then setup XFI internal link.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
if (status != IXGBE_SUCCESS)
@@ -4655,7 +4539,7 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
u32 status;
- u16 autoneg_status;
+ u16 i, autoneg_status = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
@@ -4668,21 +4552,18 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return status;
/* MAC link is up, so check external PHY link.
- * Read this twice back to back to indicate current status.
+ * X557 PHY. Link status is latching low, and can only be used to detect
+ * link drop, and not the current status of the link without performing
+ * back-to-back reads.
*/
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
-
- if (status != IXGBE_SUCCESS)
- return status;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
- if (status != IXGBE_SUCCESS)
- return status;
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* If external PHY link is not up, then indicate link not up */
if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
@@ -4729,7 +4610,8 @@ s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_on_generic(hw, led_idx);
}
/**
@@ -4753,5 +4635,67 @@ s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_off_generic(hw, led_idx);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len, const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val = IXGBE_SUCCESS;
+ int i;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
+
+ if ((len == 0) || (driver_ver == NULL) ||
+ (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index cd4db29c..6d188741 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -57,8 +57,6 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data);
-s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
unsigned int pool);
void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
@@ -67,6 +65,8 @@ s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data);
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -88,7 +88,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index bac36e0d..2083cded 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,10 +56,12 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_hash_crc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -72,8 +74,6 @@
#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
-#include "rte_pmd_ixgbe.h"
-
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
@@ -154,17 +154,16 @@
#define IXGBE_QDE_STRIP_TAG 0x00000004
#define IXGBE_VTEICR_MASK 0x07
-enum ixgbevf_xcast_modes {
- IXGBEVF_XCAST_MODE_NONE = 0,
- IXGBEVF_XCAST_MODE_MULTI,
- IXGBEVF_XCAST_MODE_ALLMULTI,
-};
-
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -183,16 +182,27 @@ static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int size);
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
@@ -231,18 +241,21 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
-static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
-static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
+static bool is_device_supported(struct rte_eth_dev *dev,
+ struct rte_pci_driver *drv);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -276,12 +289,6 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
-static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@@ -297,18 +304,13 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
-static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
@@ -318,17 +320,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
-static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
-static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -371,8 +367,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
-static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void ixgbevf_dev_interrupt_handler(void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -389,6 +384,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -517,6 +514,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN,
+ .nb_seg_max = IXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -533,10 +532,13 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
.xstats_get = ixgbe_dev_xstats_get,
+ .xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
.xstats_get_names = ixgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+ .fw_version_get = ixgbe_fw_version_get,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbe_dev_mtu_set,
@@ -554,6 +556,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
@@ -568,12 +572,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
- .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
- .set_vf_rx = ixgbe_set_pool_rx,
- .set_vf_tx = ixgbe_set_pool_tx,
- .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
- .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#ifdef RTE_NIC_BYPASS
@@ -637,6 +636,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
sizeof(rte_ixgbe_stats_strings[0]))
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+ {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_untagged)},
+ {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_encrypted)},
+ {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_protected)},
+ {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_octets_encrypted)},
+ {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+ out_octets_protected)},
+ {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_untagged)},
+ {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_badtag)},
+ {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_nosci)},
+ {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unknownsci)},
+ {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+ in_octets_decrypted)},
+ {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+ in_octets_validated)},
+ {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unchecked)},
+ {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_delayed)},
+ {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_late)},
+ {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_ok)},
+ {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_invalid)},
+ {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notvalid)},
+ {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unusedsa)},
+ {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+ sizeof(rte_ixgbe_macsec_strings[0]))
+
/* Per-queue statistics */
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -859,6 +905,8 @@ ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ status = IXGBE_SUCCESS;
return status;
}
@@ -1083,7 +1131,8 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1094,6 +1143,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
uint32_t ctrl_ext;
uint16_t csum;
int diag, i;
@@ -1103,6 +1154,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -1127,9 +1179,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -1196,6 +1248,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
diag = ixgbe_init_hw(hw);
}
+ if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
+ diag = IXGBE_SUCCESS;
+
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
@@ -1272,20 +1327,37 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* enable uio/vfio intr/eventfd mapping */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
/* enable support intr */
ixgbe_enable_intr(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct ixgbe_filter_info));
+
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ /* initialize flow director filter list & hash */
+ ixgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ ixgbe_l2_tn_filter_init(eth_dev);
+
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+
+ /* initialize bandwidth configuration info */
+ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
return 0;
}
@@ -1293,7 +1365,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
@@ -1302,7 +1375,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
if (hw->adapter_stopped == 0)
ixgbe_dev_close(eth_dev);
@@ -1315,9 +1387,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
ixgbe_swfw_lock_reset(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1328,9 +1400,154 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->hash_mac_addrs);
eth_dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ ixgbe_fdir_filter_uninit(eth_dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ ixgbe_l2_tn_filter_uninit(eth_dev);
+
+ /* Remove all ntuple filters of the device */
+ ixgbe_ntuple_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ ixgbe_filterlist_flush();
+
return 0;
}
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = IXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(union ixgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", eth_dev->data->name);
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_fdir_filter *) *
+ IXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct ixgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", eth_dev->data->name);
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_l2_tn_filter *) *
+ IXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
* After reset API version is always set to the basic one (ixgbe_mbox_api_10).
@@ -1381,7 +1598,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1419,9 +1637,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1513,10 +1730,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return -EIO;
}
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_callback_register(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
+ rte_intr_enable(intr_handle);
ixgbevf_intr_enable(hw);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1531,8 +1747,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -1554,40 +1771,52 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
- rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
return 0;
}
-static struct eth_driver rte_ixgbe_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbe_dev_init,
- .eth_dev_uninit = eth_ixgbe_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+}
+
+static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ixgbe_pmd = {
+ .id_table = pci_id_ixgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ixgbe_pci_probe,
+ .remove = eth_ixgbe_pci_remove,
};
+static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
+}
+
+static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_ixgbevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbevf_dev_init,
- .eth_dev_uninit = eth_ixgbevf_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static struct rte_pci_driver rte_ixgbevf_pmd = {
+ .id_table = pci_id_ixgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_ixgbevf_pci_probe,
+ .remove = eth_ixgbevf_pci_remove,
};
static int
@@ -1947,6 +2176,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
switch (nb_rx_q) {
case 1:
case 2:
@@ -1960,7 +2191,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
}
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
return 0;
}
@@ -2180,6 +2411,80 @@ ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
}
}
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
/*
* Configure device link speed and setup link.
* It returns 0 on success.
@@ -2191,7 +2496,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
@@ -2253,7 +2559,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2291,10 +2597,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* Restore vf rate limit */
if (vfinfo != NULL) {
- for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vf < pci_dev->max_vfs; vf++)
for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
+ ixgbe_set_vf_rate_limit(
+ dev, vf,
vfinfo[vf].tx_rate[idx],
1 << idx);
}
@@ -2366,13 +2673,13 @@ skip_link_setup:
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)dev);
+ ixgbe_dev_interrupt_handler, dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
@@ -2385,6 +2692,8 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
+ ixgbe_l2_tunnel_conf(dev);
+ ixgbe_filter_restore(dev);
return 0;
@@ -2405,10 +2714,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct ixgbe_filter_info *filter_info =
- IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
PMD_INIT_FUNC_TRACE();
@@ -2423,8 +2730,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* stop adapter */
ixgbe_stop_adapter(hw);
- for (vf = 0; vfinfo != NULL &&
- vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
vfinfo[vf].clear_to_send = false;
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
@@ -2445,17 +2751,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -2557,6 +2852,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
static void
ixgbe_read_stats_registers(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *hw_stats,
+ struct ixgbe_macsec_stats *macsec_stats,
uint64_t *total_missed_rx, uint64_t *total_qbrc,
uint64_t *total_qprc, uint64_t *total_qprdc)
{
@@ -2564,9 +2860,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
uint32_t delta_gprc = 0;
unsigned i;
/* Workaround for RX byte count not including CRC bytes when CRC
-+ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * strip is enabled. CRC bytes are removed from counters when crc_strip
* is disabled.
-+ */
+ */
int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
IXGBE_HLREG0_RXCRCSTRP);
@@ -2726,6 +3022,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
/* Flow Director Stats registers */
hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+ /* MACsec Stats registers */
+ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+ macsec_stats->out_pkts_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+ macsec_stats->out_pkts_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+ macsec_stats->out_octets_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+ macsec_stats->out_octets_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+ macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+ macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+ macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+ macsec_stats->in_pkts_unknownsci +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+ macsec_stats->in_octets_decrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+ macsec_stats->in_octets_validated +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+ macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+ macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+ macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+ for (i = 0; i < 2; i++) {
+ macsec_stats->in_pkts_ok +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+ macsec_stats->in_pkts_invalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+ macsec_stats->in_pkts_notvalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+ }
+ macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+ macsec_stats->in_pkts_notusingsa +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
}
/*
@@ -2738,6 +3068,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i;
@@ -2746,8 +3079,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
if (stats == NULL)
return;
@@ -2799,13 +3132,13 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS +
+ return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
}
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
{
const unsigned cnt_stats = ixgbe_xstats_calc_num();
unsigned stat, i, count;
@@ -2826,6 +3159,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2851,6 +3193,84 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return cnt_stats;
}
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ if (!ids) {
+ const unsigned int cnt_stats = ixgbe_xstats_calc_num();
+ unsigned int stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ struct rte_eth_xstat_name xstats_names_copy[size];
+
+ ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ size);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
@@ -2875,6 +3295,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i, stat, count = 0;
@@ -2888,8 +3311,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
/* If this is a reset xstats is NULL, and we have cleared the
* registers by reading them.
@@ -2906,6 +3329,14 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2930,11 +3361,105 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ if (!ids) {
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(
+ dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned int i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (!ids && n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
+ &total_missed_rx, &total_qbrc, &total_qprc,
+ &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!ids && !values)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+ return count;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ uint64_t values_copy[size];
+
+ ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
static void
ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct ixgbe_hw_stats *stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
unsigned count = ixgbe_xstats_calc_num();
@@ -2943,6 +3468,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
/* Reset software totals */
memset(stats, 0, sizeof(*stats));
+ memset(macsec_stats, 0, sizeof(*macsec_stats));
}
static void
@@ -2991,6 +3517,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -3031,12 +3558,35 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
hw_stats->vfgotc = 0;
}
+static int
+ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u16 eeprom_verh, eeprom_verl;
+ u32 etrack_id;
+ int ret;
+
+ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+ ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3052,7 +3602,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3073,6 +3623,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
!RTE_ETH_DEV_SRIOV(dev).active)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3086,6 +3640,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3166,15 +3724,17 @@ static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3223,8 +3783,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int link_up;
int diag;
+ u32 speed = 0;
+ bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
@@ -3234,6 +3798,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
hw->mac.get_link_status = true;
+ if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
+ ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+ ixgbe_setup_link(hw, speed, true);
+ }
+
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
@@ -3251,10 +3823,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (link_up == 0) {
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
if (link.link_status == old.link_status)
return -1;
return 0;
}
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -3381,6 +3955,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_LINKSEC;
+
+ return 0;
+}
+
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
*
@@ -3415,6 +4011,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
if (eicr & IXGBE_EICR_MAILBOX)
intr->flags |= IXGBE_FLAG_MAILBOX;
+ if (eicr & IXGBE_EICR_LINKSEC)
+ intr->flags |= IXGBE_FLAG_MACSEC;
+
if (hw->mac.type == ixgbe_mac_X550EM_x &&
hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3436,6 +4035,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
static void
ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -3451,10 +4051,10 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
(int)(dev->data->port_id));
}
PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
}
/*
@@ -3468,7 +4068,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3506,19 +4107,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
- intr->mask_original = intr->mask;
- /* only disable lsc interrupt */
- intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- else
- intr->mask = intr->mask_original;
+ else {
+ /* remember original mask */
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
+ }
}
PMD_DRV_LOG(DEBUG, "enable intr immediately");
ixgbe_enable_intr(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -3541,12 +4143,16 @@ static void
ixgbe_dev_interrupt_delayed_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t eicr;
+ ixgbe_disable_intr(hw);
+
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_pf_mbx_process(dev);
@@ -3563,9 +4169,19 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
+ if (intr->flags & IXGBE_FLAG_MACSEC) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~IXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask = intr->mask_original;
+ intr->mask_original = 0;
+
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
}
/**
@@ -3581,13 +4197,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
* void
*/
static void
-ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbe_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
ixgbe_dev_interrupt_get_status(dev);
- ixgbe_dev_interrupt_action(dev);
+ ixgbe_dev_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -3951,7 +4566,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -3998,7 +4613,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -4023,14 +4638,15 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t enable_addr = 1;
- ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+ return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
}
static void
@@ -4044,41 +4660,26 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
static void
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
ixgbe_remove_rar(dev, 0);
- ixgbe_add_rar(dev, addr, 0, 0);
+ ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
}
-int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
- struct ether_addr *mac_addr)
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- struct ixgbe_hw *hw;
- struct ixgbe_vf_info *vfinfo;
- int rar_entry;
- uint8_t *new_mac = (uint8_t *)(mac_addr);
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
+ if (strcmp(dev->data->drv_name, drv->driver.name))
+ return false;
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ return true;
+}
- if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- ETHER_ADDR_LEN);
- return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
- IXGBE_RAH_AV);
- }
- return -EINVAL;
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_ixgbe_pmd);
}
static int
@@ -4089,6 +4690,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4099,7 +4701,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -4197,7 +4799,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t intr_vector = 0;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int err, mask = 0;
@@ -4242,7 +4845,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -4260,7 +4863,8 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -4401,15 +5005,15 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-static int
-ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+int
+ixgbe_vt_check(struct ixgbe_hw *hw)
{
uint32_t reg_val;
- /* we only need to do this if VMDq is enabled */
+ /* if Virtualization Technology is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
return -1;
}
@@ -4547,343 +5151,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
return new_val;
}
-static int
-ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on)
-{
- int val = 0;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer");
- return -ENOTSUP;
- }
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
- if (on)
- vmolr |= val;
- else
- vmolr &= ~val;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFRE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFRE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFTE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFTE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on)
-{
- int ret = 0;
- uint16_t pool_idx;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
- for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
- if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
- vlan_on, false);
- if (ret < 0)
- return ret;
- }
- }
-
- return ret;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
-
- mac->ops.set_vlan_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
- mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (vlan_id > 4095)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
- if (vlan_id) {
- ctrl = vlan_id;
- ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
- } else {
- ctrl = 0;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
- /* enable or disable VMDQ loopback */
- if (on)
- ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
- else
- ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- int i;
- int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- for (i = 0; i <= num_queues; i++) {
- reg_value = IXGBE_QDE_WRITE |
- (i << IXGBE_QDE_IDX_SHIFT) |
- (on & IXGBE_QDE_ENABLE);
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
- }
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- /* only support VF's 0 to 63 */
- if ((vf >= dev_info.max_vfs) || (vf > 63))
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
- if (on)
- reg_value |= IXGBE_SRRCTL_DROP_EN;
- else
- reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- uint16_t queues_per_pool;
- uint32_t q;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
- /* The PF has 128 queue pairs and in SRIOV configuration
- * those queues will be assigned to VF's, so RXDCTL
- * registers will be dealing with queues which will be
- * assigned to VF's.
- * Let's say we have SRIOV configured with 31 VF's then the
- * first 124 queues 0-123 will be allocated to VF's and only
- * the last 4 queues 123-127 will be assigned to the PF.
- */
-
- queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
-
- for (q = 0; q < queues_per_pool; q++)
- (*dev->dev_ops->vlan_strip_queue_set)(dev,
- q + vf * queues_per_pool, on);
- return 0;
-}
-
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
@@ -4894,8 +5161,8 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
static int
ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
@@ -4918,7 +5185,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint8_t mirror_type = 0;
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
if (rule_id >= IXGBE_MAX_MIRROR_RULES)
@@ -4926,22 +5193,28 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
- mirror_conf->rule_type);
+ mirror_conf->rule_type);
return -EINVAL;
}
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
- /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ /* Check if vlan id is valid and find conresponding VLAN ID
+ * index in VLVF
+ */
for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
- /* search vlan id related pool vlan filter index */
- reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i],
- false);
+ /* search vlan id related pool vlan filter
+ * index
+ */
+ reg_index = ixgbe_find_vlvf_slot(
+ hw,
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
- vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ vlvf = IXGBE_READ_REG(hw,
+ IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
((vlvf & IXGBE_VLVF_VLANID_MASK) ==
mirror_conf->vlan.vlan_id[i]))
@@ -4971,7 +5244,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
}
}
- /*
+ /**
* if enable pool mirror, write related pool mask register,if disable
* pool mirror, clear PFMRVM register
*/
@@ -5001,8 +5274,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
mr_ctl |= mirror_type;
mr_ctl &= mirror_rule_mask;
mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
- } else
+ } else {
mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+ }
mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
@@ -5039,11 +5313,11 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
memset(&mr_info->mr_conf[rule_id], 0,
- sizeof(struct rte_eth_mirror_conf));
+ sizeof(struct rte_eth_mirror_conf));
/* clear PFVMCTL register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
@@ -5062,6 +5336,8 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5071,7 +5347,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5094,6 +5370,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5113,7 +5391,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
mask &= (1 << (queue_id - 32));
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
}
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5217,7 +5495,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
@@ -5250,7 +5529,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
@@ -5365,62 +5645,7 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
}
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vf_info *vfinfo =
- *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
- uint32_t queue_stride =
- IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
- uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
- uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
- uint16_t total_rate = 0;
-
- if (queue_end >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (vfinfo != NULL) {
- for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
- if (vf_idx == vf)
- continue;
- for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
- idx++)
- total_rate += vfinfo[vf_idx].tx_rate[idx];
- }
- } else
- return -EINVAL;
-
- /* Store tx_rate for this vf. */
- for (idx = 0; idx < nb_q_per_pool; idx++) {
- if (((uint64_t)0x1 << idx) & q_msk) {
- if (vfinfo[vf].tx_rate[idx] != tx_rate)
- vfinfo[vf].tx_rate[idx] = tx_rate;
- total_rate += tx_rate;
- }
- }
-
- if (total_rate > dev->data->dev_link.link_speed) {
- /*
- * Reset stored TX rate of the VF if it causes exceed
- * link speed.
- */
- memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
- return -EINVAL;
- }
-
- /* Set RTTBCNRC of each queue/pool for vf X */
- for (; queue_idx <= queue_end; queue_idx++) {
- if (0x1 & q_msk)
- ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
- q_msk = q_msk >> 1;
- }
-
- return 0;
-}
-
-static void
+static int
ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
__attribute__((unused)) uint32_t index,
__attribute__((unused)) uint32_t pool)
@@ -5434,11 +5659,19 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
* set of PF resources used to store VF MAC addresses.
*/
if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
- return;
+ return -1;
diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
- if (diag == 0)
- return;
- PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ return diag;
}
static void
@@ -5497,28 +5730,24 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
- (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
- (type) != ixgbe_mac_X550EM_a)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
uint32_t synqf;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ syn_info = filter_info->syn_info;
if (add) {
- if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
return -EINVAL;
synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
@@ -5528,10 +5757,13 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
else
synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
} else {
- if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
return -ENOENT;
synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
}
+
+ filter_info->syn_info = synqf;
IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
IXGBE_WRITE_FLUSH(hw);
return 0;
@@ -5587,7 +5819,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -5609,6 +5841,52 @@ convert_protocol_type(uint8_t protocol_value)
return IXGBE_FILTER_PROTOCOL_NONE;
}
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
/*
* add a 5tuple filter
*
@@ -5626,13 +5904,9 @@ static int
ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
int i, idx, shift;
- uint32_t ftqf, sdpqf;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
/*
* look for an unused 5tuple filter index,
@@ -5655,37 +5929,8 @@ ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- sdpqf = (uint32_t)(filter->filter_info.dst_port <<
- IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
- ftqf = (uint32_t)(filter->filter_info.proto &
- IXGBE_FTQF_PROTOCOL_MASK);
- ftqf |= (uint32_t)((filter->filter_info.priority &
- IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
- if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
- mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
- if (filter->filter_info.dst_ip_mask == 0)
- mask &= IXGBE_FTQF_DEST_ADDR_MASK;
- if (filter->filter_info.src_port_mask == 0)
- mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
- if (filter->filter_info.dst_port_mask == 0)
- mask &= IXGBE_FTQF_DEST_PORT_MASK;
- if (filter->filter_info.proto_mask == 0)
- mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
- ftqf |= IXGBE_FTQF_POOL_MASK_EN;
- ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+ ixgbe_inject_5tuple_filter(dev, filter);
- l34timir |= IXGBE_L34T_IMIR_RESERVE;
- l34timir |= (uint32_t)(filter->queue <<
- IXGBE_L34T_IMIR_QUEUE_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
return 0;
}
@@ -5722,6 +5967,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5731,7 +5977,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -5752,11 +5998,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
- return -ENOTSUP;\
-} while (0)
-
static inline struct ixgbe_5tuple_filter *
ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
struct ixgbe_5tuple_filter_info *key)
@@ -5864,7 +6105,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -6009,48 +6250,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
- (filter_info->ethertype_mask & (1 << i)))
- return i;
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
- return i;
- }
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
- uint8_t idx)
-{
- if (idx >= IXGBE_MAX_ETQF_FILTERS)
- return -1;
- filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
- return idx;
-}
-
-static int
+int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -6061,6 +6261,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
uint32_t etqf = 0;
uint32_t etqs = 0;
int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
@@ -6094,18 +6295,23 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
- ret = ixgbe_ethertype_filter_insert(filter_info,
- filter->ether_type);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "ethertype filters are full.");
- return -ENOSYS;
- }
etqf = IXGBE_ETQF_FILTER_EN;
etqf |= (uint32_t)filter->ether_type;
etqs |= (uint32_t)((filter->queue <<
IXGBE_ETQS_RX_QUEUE_SHIFT) &
IXGBE_ETQS_RX_QUEUE);
etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
} else {
ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -6201,7 +6407,7 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -6219,9 +6425,15 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
break;
}
@@ -6869,12 +7081,15 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
if (l2_tunnel == NULL)
return -EINVAL;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
break;
default:
@@ -6913,9 +7128,12 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
ret = ixgbe_e_tag_enable(hw);
break;
default:
@@ -6954,9 +7172,12 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
ret = ixgbe_e_tag_disable(hw);
break;
default:
@@ -7045,12 +7266,108 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
return -EINVAL;
}
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
/* Add l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ (void)rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7062,15 +7379,27 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
break;
}
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
return ret;
}
/* Delete l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7096,7 +7425,7 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = 0;
+ int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
@@ -7111,7 +7440,8 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ADD:
ret = ixgbe_dev_l2_tunnel_filter_add
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg);
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_dev_l2_tunnel_filter_del
@@ -7154,10 +7484,13 @@ ixgbe_dev_l2_tunnel_forwarding_enable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
break;
default:
@@ -7175,10 +7508,13 @@ ixgbe_dev_l2_tunnel_forwarding_disable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
break;
default:
@@ -7195,15 +7531,16 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool en)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
int ret = 0;
uint32_t vmtir, vmvir;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
PMD_DRV_LOG(ERR,
"VF id %u should be less than %u",
l2_tunnel->vf_id,
- dev->pci_dev->max_vfs);
+ pci_dev->max_vfs);
return -EINVAL;
}
@@ -7529,7 +7866,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
@@ -7584,8 +7921,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
}
static void
-ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbevf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -7593,7 +7929,226 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
ixgbevf_dev_interrupt_action(dev);
}
-RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+/**
+ * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+ int i;
+ int sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+ break;
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECTX_POLL)
+ PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+ "path fully disabled. Continuing with init.");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+ uint32_t sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ ixgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+ filter_info->ethertype_filters[i].etqs);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ ixgbe_ntuple_filter_restore(dev);
+ ixgbe_ethertype_filter_restore(dev);
+ ixgbe_syn_filter_restore(dev);
+ ixgbe_fdir_filter_restore(dev);
+ ixgbe_l2_tn_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tn_info->e_tag_en)
+ (void)ixgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)ixgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
-RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a4e2996a..b576a6f4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,11 +38,14 @@
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
+#include <rte_hash.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
/*
* Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,10 +133,28 @@
#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM 128
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
/*
* Information about the fdir mode.
*/
-
struct ixgbe_hw_fdir_mask {
uint16_t vlan_tci_mask;
uint32_t src_ipv4_mask;
@@ -148,6 +169,28 @@ struct ixgbe_hw_fdir_mask {
uint8_t tunnel_type_mask;
};
+struct ixgbe_fdir_filter {
+ TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
+struct ixgbe_fdir_rule {
+ struct ixgbe_hw_fdir_mask mask;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+};
+
struct ixgbe_hw_fdir_info {
struct ixgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -159,6 +202,11 @@ struct ixgbe_hw_fdir_info {
uint64_t remove;
uint64_t f_add;
uint64_t f_remove;
+ struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct ixgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
};
/* structure for interrupt relative data */
@@ -254,16 +302,136 @@ struct ixgbe_5tuple_filter {
(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
(sizeof(uint32_t) * NBBY))
+struct ixgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
/*
* Structure to store filters' info.
*/
struct ixgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
struct ixgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+};
+
+struct ixgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+ TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
+ struct ixgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+ struct ixgbe_l2_tn_filter_list l2_tn_list;
+ struct ixgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ bool e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+struct ixgbe_flow_mem_list ixgbe_flow_list;
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
};
/*
@@ -272,6 +440,7 @@ struct ixgbe_filter_info {
struct ixgbe_adapter {
struct ixgbe_hw hw;
struct ixgbe_hw_stats stats;
+ struct ixgbe_macsec_stats macsec_stats;
struct ixgbe_hw_fdir_info fdir;
struct ixgbe_interrupt intr;
struct ixgbe_stat_mapping_registers stat_mappings;
@@ -285,6 +454,8 @@ struct ixgbe_adapter {
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
struct ixgbe_filter_info filter;
+ struct ixgbe_l2_tn_info l2_tn;
+ struct ixgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
@@ -293,12 +464,18 @@ struct ixgbe_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+#define IXGBE_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stats)
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
(&((struct ixgbe_adapter *)adapter)->intr)
@@ -329,6 +506,12 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct ixgbe_adapter *)adapter)->filter)
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->l2_tn)
+
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
/*
* RX/TX function prototypes
*/
@@ -353,7 +536,9 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
@@ -398,6 +583,9 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
@@ -414,10 +602,31 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
+int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_flush(void);
/*
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del, bool update);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
@@ -444,4 +653,74 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops ixgbe_flow_ops;
+
+void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ struct ixgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee37..7f6c7b58 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -43,6 +43,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_malloc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -111,10 +112,8 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -294,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -307,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
- uint16_t dst_ipv6m = 0;
- uint16_t src_ipv6m = 0;
volatile uint32_t *reg;
PMD_INIT_FUNC_TRACE();
@@ -319,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
- if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(
- rte_be_to_cpu_16(input_mask->dst_port_mask),
- rte_be_to_cpu_16(input_mask->src_port_mask));
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
/* write all the same so that UDP, TCP and SCTP use the same mask
* (little-endian)
@@ -351,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
- info->mask.src_port_mask = input_mask->src_port_mask;
- info->mask.dst_port_mask = input_mask->dst_port_mask;
/* Store source and destination IPv4 masks (big-endian),
* can not use IXGBE_WRITE_REG.
*/
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
- *reg = ~(input_mask->ipv4_mask.src_ip);
+ *reg = ~(info->mask.src_ipv4_mask);
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
- *reg = ~(input_mask->ipv4_mask.dst_ip);
- info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
- info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ *reg = ~(info->mask.dst_ipv4_mask);
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
- fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+ fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+ info->mask.src_ipv6_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
- info->mask.src_ipv6_mask = src_ipv6m;
- info->mask.dst_ipv6_mask = dst_ipv6m;
}
return IXGBE_SUCCESS;
@@ -385,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -409,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
/* some bits must be set for mac vlan or tunnel mode */
fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
@@ -433,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = input_mask->mac_addr_byte_mask;
+ mac_mask = info->mask.mac_addr_byte_mask;
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
& IXGBE_FDIRIP6M_INNER_MAC;
- info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
- switch (input_mask->tunnel_type_mask) {
+ switch (info->mask.tunnel_type_mask) {
case 0:
/* Mask turnnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -449,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
return -EINVAL;
}
- info->mask.tunnel_type_mask =
- input_mask->tunnel_type_mask;
- switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+ switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
case 0x0:
/* Mask vxlan id */
fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -466,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
return -EINVAL;
}
- info->mask.tunnel_id_mask =
- input_mask->tunnel_id_mask;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -481,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
}
static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+ info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
- return fdir_set_input_mask_82599(dev, input_mask);
+ return fdir_set_input_mask_82599(dev);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- return fdir_set_input_mask_x550(dev, input_mask);
+ return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ int ret;
+
+ ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+ if (ret)
+ return ret;
+
+ return ixgbe_fdir_set_input_mask(dev);
+}
+
/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
@@ -681,6 +730,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp4_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -696,6 +746,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp6_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -1075,36 +1126,115 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
}
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ struct ixgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle,
+ &fdir_filter->ixgbe_fdir);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct ixgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->ixgbe_fdir,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
uint32_t fdirhash;
- union ixgbe_atr_input input;
uint8_t queue;
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_fdir_filter *node;
+ bool add_node = FALSE;
- if (fdir_mode == RTE_FDIR_MODE_NONE)
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
return -ENOTSUP;
/*
* Sanity check for x550.
- * When adding a new filter with flow type set to IPv4-other,
+ * When adding a new filter with flow type set to IPv4,
* the flow director mask should be configed before,
* and the L4 protocol and ports are masked.
*/
@@ -1112,12 +1242,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
(hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
- (fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ (rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV4) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
+ " IPv4 is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1126,28 +1256,26 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
- memset(&input, 0, sizeof(input));
-
- err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
- fdir_mode);
- if (err)
- return err;
-
if (is_perfect) {
- if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ if (rule->ixgbe_fdir.formatted.flow_type &
+ IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
- fdirhash = atr_compute_perfect_hash_82599(&input,
+ fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
- fdirhash |= fdir_filter->soft_id <<
+ fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
- fdirhash = atr_compute_sig_hash_82599(&input,
+ fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
+ err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ if (err < 0)
+ return err;
+
err = fdir_erase_filter_82599(hw, fdirhash);
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
@@ -1157,7 +1285,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
/* add or update an fdir filter*/
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+ if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1166,28 +1294,86 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
" signature mode.");
return -EINVAL;
}
- } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
- queue = (uint8_t)fdir_filter->action.rx_queue;
+ } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)rule->queue;
else
return -EINVAL;
+ node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
+ if (node) {
+ if (update) {
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ } else {
+ add_node = TRUE;
+ node = rte_zmalloc("ixgbe_fdir",
+ sizeof(struct ixgbe_fdir_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+ (void)rte_memcpy(&node->ixgbe_fdir,
+ &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = ixgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
if (is_perfect) {
- err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash, fdir_mode);
} else {
- err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash);
}
- if (err < 0)
+ if (err < 0) {
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
- else
+
+ if (add_node)
+ (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ } else {
PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
return err;
}
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_fdir_rule rule;
+ int err;
+
+ err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+ if (err)
+ return err;
+
+ return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
static int
ixgbe_fdir_flush(struct rte_eth_dev *dev)
{
@@ -1378,3 +1564,66 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+ struct ixgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = ixgbe_fdir_flush(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
new file mode 100644
index 00000000..da7b1cc8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -0,0 +1,2778 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "rte_pmd_ixgbe.h"
+
+
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+ do { \
+ item = pattern + index;\
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+ index++; \
+ item = pattern + index; \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP info */
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support tcp flags. */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Ixgbe doesn't support many priorities. */
+ if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -rte_errno;
+
+ /* fixed value for ixgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+ /* parse pattern */
+ index = 0;
+
+ /* The first not void item should be e-tag. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
+ e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->pool = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ret = cons_parse_l2_tn_filter(attr, pattern,
+ actions, l2_tn_filter, error);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+ uint32_t index;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+ (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item must be END.
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+
+ if (item->mask) {
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /**
+ * Check if the next not void item is not vlan.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ rule->ixgbe_fdir.formatted.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->ixgbe_fdir.formatted.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_TCPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ tcp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_UDPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ udp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the VxLAN info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_VXLAN;
+
+ /* Only care about VNI, others should be masked. */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ if (vxlan_mask->flags) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* VNI must be totally masked or not. */
+ if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) &&
+ ((vxlan_mask->vni[0] != 0xFF) ||
+ (vxlan_mask->vni[1] != 0xFF) ||
+ (vxlan_mask->vni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+ RTE_DIM(vxlan_mask->vni));
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ vxlan_spec = (const struct rte_flow_item_vxlan *)
+ item->spec;
+ rte_memcpy(((uint8_t *)
+ &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+ rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
+ rule->ixgbe_fdir.formatted.tni_vni);
+ }
+ }
+
+ /* Get the NVGRE info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_NVGRE;
+
+ /**
+ * Only care about flags0, flags1, protocol and TNI,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ if (nvgre_mask->flow_id) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x3000) ||
+ nvgre_mask->protocol != 0xFFFF) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* TNI must be totally masked or not. */
+ if (nvgre_mask->tni[0] &&
+ ((nvgre_mask->tni[0] != 0xFF) ||
+ (nvgre_mask->tni[1] != 0xFF) ||
+ (nvgre_mask->tni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
+ RTE_DIM(nvgre_mask->tni));
+ rule->mask.tunnel_id_mask <<= 8;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) ||
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+ nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
+ rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOTSUP;
+
+ ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ actions, rule, error);
+
+ if (!ret)
+ goto step_next;
+
+ ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+
+step_next:
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+ return ret;
+}
+
+void
+ixgbe_filterlist_flush(void)
+{
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr,
+ entries);
+ rte_free(ixgbe_flow_mem_ptr->flow);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct rte_flow *flow = NULL;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
+ sizeof(struct ixgbe_flow_mem), 0);
+ if (!ixgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ ixgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+ sizeof(struct ixgbe_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "ixgbe_ethertype_filter",
+ sizeof(struct ixgbe_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+ sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ ret = ixgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+ sizeof(struct ixgbe_fdir_rule_ele), 0);
+ (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct ixgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret)
+ goto out;
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+ sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+ (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(ixgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on ixgbe. */
+static int
+ixgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ethertype_filter,
+ &ethertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
+ (void)rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
+ if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* Destroy all flow rules associated with a port on ixgbe. */
+static int
+ixgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ixgbe_clear_all_ntuple_filter(dev);
+ ixgbe_clear_all_ethertype_filter(dev);
+ ixgbe_clear_syn_filter(dev);
+
+ ret = ixgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = ixgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ixgbe_filterlist_flush();
+
+ return 0;
+}
+
+const struct rte_flow_ops ixgbe_flow_ops = {
+ ixgbe_flow_validate,
+ ixgbe_flow_create,
+ ixgbe_flow_destroy,
+ ixgbe_flow_flush,
+ NULL,
+};
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e41..d88832e5 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -61,7 +61,9 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- return eth_dev->pci_dev->max_vfs;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
}
static inline
@@ -176,6 +178,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
uint16_t vf_num;
int i;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (!hw->mac.ops.set_ethertype_anti_spoofing) {
RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
@@ -183,16 +186,23 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
return;
}
- /* occupy an entity of ether type filter */
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] =
- IXGBE_ETHERTYPE_FLOW_CTRL;
- break;
- }
+ i = ixgbe_ethertype_filter_lookup(filter_info,
+ IXGBE_ETHERTYPE_FLOW_CTRL);
+ if (i >= 0) {
+ RTE_LOG(ERR, PMD, "A ether type filter"
+ " entity for flow control already exists!\n");
+ return;
}
- if (i == IXGBE_MAX_ETQF_FILTERS) {
+
+ ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqs = 0;
+ ethertype_filter.conf = TRUE;
+ i = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (i < 0) {
RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
" entity for flow control.\n");
return;
@@ -387,15 +397,27 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
diff --git a/drivers/net/ixgbe/ixgbe_regs.h b/drivers/net/ixgbe/ixgbe_regs.h
index 773e1693..2aa48201 100644
--- a/drivers/net/ixgbe/ixgbe_regs.h
+++ b/drivers/net/ixgbe/ixgbe_regs.h
@@ -41,7 +41,7 @@ struct reg_info {
uint32_t count;
uint32_t stride;
const char *name;
-} reg_info;
+};
static const struct reg_info ixgbe_regs_general[] = {
{IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c61ce470..1e078959 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -70,6 +70,7 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_net.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -79,13 +80,23 @@
#include "base/ixgbe_common.h"
#include "ixgbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+ PKT_TX_MACSEC | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
@@ -100,6 +111,11 @@
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+#endif
+
/*********************************************************************
*
* TX functions
@@ -131,7 +147,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
/* free buffers one at a time */
- m = __rte_pktmbuf_prefree_seg(txep->mbuf);
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
txep->mbuf = NULL;
if (unlikely(m == NULL))
@@ -321,7 +337,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
@@ -352,6 +368,30 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+#endif
+
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -519,6 +559,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
@@ -897,7 +939,7 @@ end_of_tx:
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -905,6 +947,57 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1492,8 +1585,6 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
if (reset_mbuf) {
- mb->next = NULL;
- mb->nb_segs = 1;
mb->port = rxq->port_id;
}
@@ -1583,7 +1674,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1987,8 +2079,8 @@ next_desc:
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
@@ -2100,12 +2192,6 @@ next_desc:
goto next_desc;
}
- /*
- * This is the last buffer of the received packet - return
- * the current cluster to the user.
- */
- rxm->next = NULL;
-
/* Initialize the first mbuf of the returned packet */
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
@@ -2159,7 +2245,7 @@ next_desc:
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
@@ -2284,6 +2370,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
@@ -2304,6 +2391,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prepare = ixgbe_prep_pkts;
}
}
@@ -2587,7 +2675,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
* rxq->rx_free_thresh < rxq->nb_rx_desc
* (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
- * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
@@ -2609,15 +2696,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc <
- (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "IXGBE_MAX_RING_DESC=%d, "
- "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
- RTE_PMD_IXGBE_RX_MAX_BURST);
- ret = -EINVAL;
}
return ret;
@@ -2634,12 +2712,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
/*
* By default, the Rx queue setup function allocates enough memory for
* IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
- * extra memory at the end of the descriptor ring to be zero'd out. A
- * pre-condition for using the Rx burst bulk alloc function is that the
- * number of descriptors is less than or equal to
- * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
- * constraints here to see if we need to zero out memory after the end
- * of the H/W descriptor ring.
+ * extra memory at the end of the descriptor ring to be zero'd out.
*/
if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
@@ -2859,11 +2932,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -2898,6 +2966,63 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (rxq->rx_using_sse)
+ nb_hold = rxq->rxrearm_nb;
+ else
+#endif
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
@@ -3323,7 +3448,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -3343,18 +3467,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
- /* Disable drop for all queues in VMDQ mode*/
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
- } else {
- /* Enable drop for all queues in SRIOV mode */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
- }
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3488,16 +3600,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
@@ -3535,6 +3649,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3625,6 +3754,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
@@ -3647,7 +3778,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3696,8 +3827,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Re-configure 4 TCs BW */
for (i = 0; i < nb_tcs; i++) {
tc = &dcb_config->tc_config[i];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
- (uint8_t)(100 / nb_tcs);
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
(uint8_t)(100 / nb_tcs);
}
@@ -3706,6 +3838,16 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
@@ -4083,9 +4225,8 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
@@ -4093,10 +4234,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_config_vf_rss(dev);
break;
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
@@ -4378,6 +4522,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
bool rsc_capable = false;
uint16_t i;
uint32_t rdrxctl;
+ uint32_t rfctl;
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
@@ -4405,22 +4550,18 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
}
/* RFCTL configuration */
- if (rsc_capable) {
- uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-
- if (rx_conf->enable_lro)
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
- else
- rfctl |= IXGBE_RFCTL_RSC_DIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
- }
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if ((rsc_capable) && (rx_conf->enable_lro))
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
if (!rx_conf->enable_lro)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 2608b364..1ffab4cc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -67,7 +67,7 @@
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
-#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
sizeof(union ixgbe_adv_rx_desc))
#ifdef RTE_PMD_PACKET_PREFETCH
@@ -80,6 +80,8 @@
#define RTE_IXGBE_WAIT_100_US 100
#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+#define IXGBE_TX_MAX_SEG 40
+
#define IXGBE_PACKET_TYPE_MASK_82599 0X7F
#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
@@ -309,8 +311,8 @@ void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
#endif /* RTE_IXGBE_INC_VECTOR */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a3473b98..1c34bb5f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -123,12 +123,12 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -143,7 +143,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -310,13 +310,6 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e2715cb9..44de1caa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -85,9 +85,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/*
* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -114,14 +111,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
#define VTAG_SHIFT (3)
static inline void
@@ -170,9 +159,6 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
rx_pkts[2]->ol_flags = vol.e[2];
rx_pkts[3]->ol_flags = vol.e[3];
}
-#else
-#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
-#endif
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -330,12 +316,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
@@ -451,8 +431,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index abbf2841..a7bc199f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -82,23 +82,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /*
- * Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -133,23 +120,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
+desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1, csum;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
+ __m128i rearm0, rearm1, rearm2, rearm3;
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@@ -228,18 +204,41 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
vtag1 = _mm_or_si128(vtag0, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
- vol.dword = _mm_cvtsi128_si64(vtag1);
- rx_pkts[0]->ol_flags = vol.e[0];
- rx_pkts[1]->ol_flags = vol.e[1];
- rx_pkts[2]->ol_flags = vol.e[2];
- rx_pkts[3]->ol_flags = vol.e[3];
-}
+ /*
+ * At this point, we have the 4 sets of flags in the low 64-bits
+ * of vtag1 (4x16).
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
+
#else
-#define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \
- RTE_SET_USED(vlan_flags); \
- } while (0)
-#endif
+ rearm0 = _mm_slli_si128(vtag1, 14);
+ rearm1 = _mm_slli_si128(vtag1, 12);
+ rearm2 = _mm_slli_si128(vtag1, 10);
+ rearm3 = _mm_slli_si128(vtag1, 8);
+
+ rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
+ rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
+ rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
+ rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
+
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -268,6 +267,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ __m128i mbuf_init;
uint8_t vlan_flags;
/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
@@ -313,6 +313,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF
);
+ mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
*/
@@ -335,9 +337,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -345,11 +351,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -358,8 +366,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -385,7 +395,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
/* set ol_flags with vlan packet type */
- desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -425,12 +435,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -537,8 +541,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644
index 00000000..e8fc9a64
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -0,0 +1,910 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ ctrl = IXGBE_PF_CONTROL_MSG;
+ if (vfinfo[vf].clear_to_send)
+ ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= pci_dev->max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_16_POOLS;
+ else
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_64_POOLS;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint32_t vmolr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return -ENOTSUP;
+ }
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+ ctrl |= 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+ IXGBE_LSECTXCTRL_AUTH;
+ ctrl |= IXGBE_LSECTXCTRL_AISCI;
+ ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+ ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+ if (rp)
+ ctrl |= IXGBE_LSECRXCTRL_RP;
+ else
+ ctrl &= ~IXGBE_LSECRXCTRL_RP;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Disable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ /* Disable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+ ctrl = mac[4] | (mac[5] << 8);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+ pi = rte_cpu_to_be_16(pi);
+ ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Set the PN and key */
+ pn = rte_cpu_to_be_32(pn);
+ if (idx == 0) {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+ }
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+ }
+ }
+
+ /* Set AN and select the SA */
+ ctrl = (an << idx * 2) | (idx << 4);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ /* Set the PN */
+ pn = rte_cpu_to_be_32(pn);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+ /* Set the key */
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+ }
+
+ /* Set the AN and validate the SA */
+ ctrl = an | (1 << 2);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_dcb_config *dcb_config;
+ struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct ixgbe_bw_conf *bw_conf;
+ uint8_t i;
+ uint8_t nb_tcs;
+ uint16_t sum;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ IXGBE_DCB_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb8261..1f2b1bd7 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -42,6 +42,20 @@
#include <rte_ethdev.h>
/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
+
+/**
* Set the VF MAC address.
*
* @param port
@@ -183,6 +197,237 @@ int
rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
/**
+ * Enable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param en
+ * 1 - Enable encryption (encrypt and add integrity signature).
+ * 0 - Disable encryption (only add integrity signature).
+ * @param rp
+ * 1 - Enable replay protection.
+ * 0 - Disable replay protection.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the remote side.
+ * @param pi
+ * The PI (port identifier) on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1).
+ * @param an
+ * The association number on the local side.
+ * @param pn
+ * The packet number on the local side.
+ * @param key
+ * The key on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1)
+ * @param an
+ * The association number on the remote side.
+ * @param pn
+ * The packet number on the remote side.
+ * @param key
+ * The key on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+* Set RX L2 Filtering mode of a VF of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param rx_mask
+* The RX mode mask, which is one or more of accepting Untagged Packets,
+* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* in rx_mode.
+* @param on
+* 1 - Enable a VF RX mode.
+* 0 - Disable a VF RX mode.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vlan
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
* Response sent back to ixgbe driver from user app after callback
*/
enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3f..45a57e33 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,26 @@ DPDK_16.11 {
rte_pmd_ixgbe_set_vf_vlan_insert;
rte_pmd_ixgbe_set_vf_vlan_stripq;
} DPDK_2.0;
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_ixgbe_macsec_config_rxsc;
+ rte_pmd_ixgbe_macsec_config_txsc;
+ rte_pmd_ixgbe_macsec_disable;
+ rte_pmd_ixgbe_macsec_enable;
+ rte_pmd_ixgbe_macsec_select_rxsa;
+ rte_pmd_ixgbe_macsec_select_txsa;
+ rte_pmd_ixgbe_set_vf_rate_limit;
+ rte_pmd_ixgbe_set_vf_rx;
+ rte_pmd_ixgbe_set_vf_rxmode;
+ rte_pmd_ixgbe_set_vf_tx;
+ rte_pmd_ixgbe_set_vf_vlan_filter;
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_ixgbe_ping_vf;
+ rte_pmd_ixgbe_set_tc_bw_alloc;
+} DPDK_17.02;
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
new file mode 100644
index 00000000..46a1ad08
--- /dev/null
+++ b/drivers/net/kni/Makefile
@@ -0,0 +1,56 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_kni.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_pmd_kni_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += rte_eth_kni.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
new file mode 100644
index 00000000..f688d919
--- /dev/null
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -0,0 +1,510 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_kni.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_vdev.h>
+
+/* Only single queue supported */
+#define KNI_MAX_QUEUE_PER_PORT 1
+
+#define MAX_PACKET_SZ 2048
+#define MAX_KNI_PORTS 8
+
+#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
+static const char * const valid_arguments[] = {
+ ETH_KNI_NO_REQUEST_THREAD_ARG,
+ NULL
+};
+
+struct eth_kni_args {
+ int no_request_thread;
+};
+
+struct pmd_queue_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t err_pkts;
+};
+
+struct pmd_queue {
+ struct pmd_internals *internals;
+ struct rte_mempool *mb_pool;
+
+ struct pmd_queue_stats rx;
+ struct pmd_queue_stats tx;
+};
+
+struct pmd_internals {
+ struct rte_kni *kni;
+ int is_kni_started;
+
+ pthread_t thread;
+ int stop_thread;
+ int no_request_thread;
+
+ struct ether_addr eth_addr;
+
+ struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT];
+ struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT];
+};
+
+static const struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+};
+static int is_kni_initialized;
+
+static uint16_t
+eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
+
+ kni_q->rx.pkts += nb_pkts;
+ kni_q->rx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static uint16_t
+eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
+
+ kni_q->tx.pkts += nb_pkts;
+ kni_q->tx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static void *
+kni_handle_request(void *param)
+{
+ struct pmd_internals *internals = param;
+#define MS 1000
+
+ while (!internals->stop_thread) {
+ rte_kni_handle_request(internals->kni);
+ usleep(500 * MS);
+ }
+
+ return param;
+}
+
+static int
+eth_kni_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint16_t port_id = dev->data->port_id;
+ struct rte_mempool *mb_pool;
+ struct rte_kni_conf conf;
+ const char *name = dev->data->name + 4; /* remove net_ */
+
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
+ conf.force_bind = 0;
+ conf.group_id = port_id;
+ conf.mbuf_size = MAX_PACKET_SZ;
+ mb_pool = internals->rx_queues[0].mb_pool;
+
+ internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
+ if (internals->kni == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Fail to create kni interface for port: %d\n",
+ port_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+eth_kni_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->is_kni_started == 0) {
+ ret = eth_kni_start(dev);
+ if (ret)
+ return -1;
+ internals->is_kni_started = 1;
+ }
+
+ if (internals->no_request_thread == 0) {
+ ret = pthread_create(&internals->thread, NULL,
+ kni_handle_request, internals);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Fail to create kni request thread\n");
+ return -1;
+ }
+ }
+
+ dev->data->dev_link.link_status = 1;
+
+ return 0;
+}
+
+static void
+eth_kni_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->no_request_thread == 0) {
+ internals->stop_thread = 1;
+
+ ret = pthread_cancel(internals->thread);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
+
+ ret = pthread_join(internals->thread, NULL);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Can't join the thread\n");
+
+ internals->stop_thread = 0;
+ }
+
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = UINT32_MAX;
+ dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static int
+eth_kni_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->rx_queues[rx_queue_id];
+ q->internals = internals;
+ q->mb_pool = mb_pool;
+
+ dev->data->rx_queues[rx_queue_id] = q;
+
+ return 0;
+}
+
+static int
+eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->tx_queues[tx_queue_id];
+ q->internals = internals;
+
+ dev->data->tx_queues[tx_queue_id] = q;
+
+ return 0;
+}
+
+static void
+eth_kni_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+ struct rte_eth_dev_data *data = dev->data;
+ unsigned long tx_packets_err_total = 0;
+ unsigned int i, num_stats;
+ struct pmd_queue *q;
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_rx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->rx_queues[i];
+ stats->q_ipackets[i] = q->rx.pkts;
+ stats->q_ibytes[i] = q->rx.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
+ }
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_tx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->tx_queues[i];
+ stats->q_opackets[i] = q->tx.pkts;
+ stats->q_obytes[i] = q->tx.bytes;
+ stats->q_errors[i] = q->tx.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
+ }
+
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
+}
+
+static void
+eth_kni_stats_reset(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_queue *q;
+ unsigned int i;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ q = data->rx_queues[i];
+ q->rx.pkts = 0;
+ q->rx.bytes = 0;
+ }
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ q = data->tx_queues[i];
+ q->tx.pkts = 0;
+ q->tx.bytes = 0;
+ q->tx.err_pkts = 0;
+ }
+}
+
+static const struct eth_dev_ops eth_kni_ops = {
+ .dev_start = eth_kni_dev_start,
+ .dev_stop = eth_kni_dev_stop,
+ .dev_configure = eth_kni_dev_configure,
+ .dev_infos_get = eth_kni_dev_info,
+ .rx_queue_setup = eth_kni_rx_queue_setup,
+ .tx_queue_setup = eth_kni_tx_queue_setup,
+ .rx_queue_release = eth_kni_queue_release,
+ .tx_queue_release = eth_kni_queue_release,
+ .link_update = eth_kni_link_update,
+ .stats_get = eth_kni_stats_get,
+ .stats_reset = eth_kni_stats_reset,
+};
+
+static struct rte_vdev_driver eth_kni_drv;
+
+static struct rte_eth_dev *
+eth_kni_create(struct rte_vdev_device *vdev,
+ struct eth_kni_args *args,
+ unsigned int numa_node)
+{
+ struct pmd_internals *internals;
+ struct rte_eth_dev_data *data;
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n",
+ numa_node);
+
+ name = rte_vdev_device_name(vdev);
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ return NULL;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
+ if (eth_dev == NULL) {
+ rte_free(data);
+ return NULL;
+ }
+
+ internals = eth_dev->data->dev_private;
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data->nb_rx_queues = 1;
+ data->nb_tx_queues = 1;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &internals->eth_addr;
+
+ eth_random_addr(internals->eth_addr.addr_bytes);
+
+ eth_dev->data = data;
+ eth_dev->dev_ops = &eth_kni_ops;
+
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+
+ internals->no_request_thread = args->no_request_thread;
+
+ return eth_dev;
+}
+
+static int
+kni_init(void)
+{
+ if (is_kni_initialized == 0)
+ rte_kni_init(MAX_KNI_PORTS);
+
+ is_kni_initialized++;
+
+ return 0;
+}
+
+static int
+eth_kni_kvargs_process(struct eth_kni_args *args, const char *params)
+{
+ struct rte_kvargs *kvlist;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ memset(args, 0, sizeof(struct eth_kni_args));
+
+ if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1)
+ args->no_request_thread = 1;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
+static int
+eth_kni_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct eth_kni_args args;
+ const char *name;
+ const char *params;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name);
+
+ ret = eth_kni_kvargs_process(&args, params);
+ if (ret < 0)
+ return ret;
+
+ ret = kni_init();
+ if (ret < 0)
+ return ret;
+
+ eth_dev = eth_kni_create(vdev, &args, rte_socket_id());
+ if (eth_dev == NULL)
+ goto kni_uninit;
+
+ eth_dev->rx_pkt_burst = eth_kni_rx;
+ eth_dev->tx_pkt_burst = eth_kni_tx;
+
+ return 0;
+
+kni_uninit:
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+ return -1;
+}
+
+static int
+eth_kni_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct pmd_internals *internals;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name);
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -1;
+
+ eth_kni_dev_stop(eth_dev);
+
+ internals = eth_dev->data->dev_private;
+ rte_kni_release(internals->kni);
+
+ rte_free(internals);
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+
+ return 0;
+}
+
+static struct rte_vdev_driver eth_kni_drv = {
+ .probe = eth_kni_probe,
+ .remove = eth_kni_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
diff --git a/drivers/net/kni/rte_pmd_kni_version.map b/drivers/net/kni/rte_pmd_kni_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/net/kni/rte_pmd_kni_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
new file mode 100644
index 00000000..32c06f5b
--- /dev/null
+++ b/drivers/net/liquidio/Makefile
@@ -0,0 +1,58 @@
+#
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_lio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/base -I$(SRCDIR)
+
+EXPORT_MAP := rte_pmd_lio_version.map
+
+LIBABIVER := 1
+
+VPATH += $(RTE_SDK)/drivers/net/liquidio/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_23xx_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_mbox.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/liquidio/base/lio_23xx_reg.h b/drivers/net/liquidio/base/lio_23xx_reg.h
new file mode 100644
index 00000000..794bc2ca
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_reg.h
@@ -0,0 +1,194 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_23XX_REG_H_
+#define _LIO_23XX_REG_H_
+
+/* ###################### REQUEST QUEUE ######################### */
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT(0..63)_INSTR_BADDR */
+#define CN23XX_SLI_PKT_INSTR_BADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT(0..63)_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT(0..63)_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START 0x10030
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE(0..63)_CNTS */
+#define CN23XX_SLI_PKT_IN_DONE_CNTS_START64 0x10040
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_PKT_INPUT_CONTROL_START64 0x10000
+
+/* ------- Request Queue Macros --------- */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B (1 << 24)
+#define CN23XX_PKT_INPUT_CTL_RST (1 << 23)
+#define CN23XX_PKT_INPUT_CTL_QUIET (1 << 28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB (1 << 22)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP (1 << 6)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR (1 << 4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+
+/* These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS 45
+/* These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS 32
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/* ############################ OUTPUT QUEUE ######################### */
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_PKT_OUTPUT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size
+ * SLI_PKT(0..63)_OUT_SIZE
+ */
+#define CN23XX_SLI_PKT_OUT_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT(0..63)_SLIST_BADDR */
+#define CN23XX_SLI_SLIST_BADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits
+ * SLI_PKT(0..63)_SLIST_BAOFF_DBELL
+ */
+#define CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT(0..63)_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT(0..63)_CNTS */
+#define CN23XX_SLI_PKT_CNTS_START 0x100B0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* ------- Output Queue Macros --------- */
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_PKT_OUTPUT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_SLIST_BADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_PKT_OUT_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_PKT_CNTS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+/* ------------------ Masks ---------------- */
+#define CN23XX_PKT_OUTPUT_CTL_IPTR (1 << 11)
+#define CN23XX_PKT_OUTPUT_CTL_ES (1 << 9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR (1 << 8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR (1 << 7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR (1 << 6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE (1 << 5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P (1 << 3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P (1 << 2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P (1 << 1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB (1 << 0)
+
+/* Rings per Virtual Function [RO] */
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK 0x3F
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS 48
+
+/* These bits[47:44][RO] give the Physical function
+ * number info within the MAC
+ */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK 0x7
+
+/* These bits[43:32][RO] give the virtual function
+ * number info within the PF
+ */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK 0x1FFF
+
+/* ######################### Mailbox Reg Macros ######################## */
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#endif /* _LIO_23XX_REG_H_ */
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c
new file mode 100644
index 00000000..e30c20dc
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -0,0 +1,588 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_23xx_reg.h"
+#include "lio_mbox.h"
+
+static int
+cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
+{
+ uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
+ uint64_t d64, q_no;
+ int ret_val = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ d64);
+ }
+
+ /* wait until the RST bit is clear or the RST and QUIET bits are set */
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ volatile uint64_t reg_val;
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop = loop - 1;
+ }
+
+ if (loop == 0) {
+ lio_dev_err(lio_dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = lio_read_csr64(
+ lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ lio_dev_err(lio_dev,
+ "clearing the reset failed for qno: %lu\n",
+ (unsigned long)q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int
+cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
+{
+ uint64_t q_no;
+ uint64_t d64;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_reset_io_queues(lio_dev,
+ lio_dev->sriov_info.rings_per_vf))
+ return -1;
+
+ for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
+ 0xFFFFFFFF);
+
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no));
+
+ d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
+ d64);
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * the Input Queues
+ */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CN23XX_PKT_INPUT_CTL_MASK);
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
+{
+ uint32_t reg_val;
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));
+
+ reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Scatter List
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+}
+
+static int
+cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_setup_global_input_regs(lio_dev))
+ return -1;
+
+ cn23xx_vf_setup_global_output_regs(lio_dev);
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint64_t pkt_in_done = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Write the start of the input queue's ring and its size */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = rte_read64(iq->inst_cnt_reg);
+
+ /* Clear the count by writing back what we read, but don't
+ * enable data traffic here
+ */
+ rte_write64(pkt_in_done, iq->inst_cnt_reg);
+}
+
+static void
+cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
+{
+ struct lio_droq *droq = lio_dev->droq[oq_no];
+
+ PMD_INIT_FUNC_TRACE();
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCTEON_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void
+cn23xx_vf_free_mbox(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_free(lio_dev->mbox[0]);
+ lio_dev->mbox[0] = NULL;
+
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+}
+
+static int
+cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
+{
+ struct lio_mbox *mbox;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (lio_dev->mbox == NULL) {
+ lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
+ if (lio_dev->mbox == NULL)
+ return -ENOMEM;
+ }
+
+ mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
+ if (mbox == NULL) {
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&mbox->lock);
+
+ mbox->lio_dev = lio_dev;
+
+ mbox->q_no = 0;
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_VF_SLI_PKT_MBOX_INT(0);
+ /* VF reads from SIG0 reg */
+ mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+ /* VF writes into SIG1 reg */
+ mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+ lio_dev->mbox[0] = mbox;
+
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+}
+
+static int
+cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
+ uint64_t reg_val;
+
+ /* set the corresponding IQ IS_64B bit */
+ if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+ }
+ for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
+ uint32_t reg_val;
+
+ /* set the corresponding OQ ENB bit */
+ if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
+ reg_val = lio_read_csr(
+ lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ lio_write_csr(lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t num_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* per HRM, rings can only be disabled via reset operation,
+ * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+ */
+ num_queues = lio_dev->num_iqs;
+ if (num_queues < lio_dev->num_oqs)
+ num_queues = lio_dev->num_oqs;
+
+ cn23xx_vf_reset_io_queues(lio_dev, num_queues);
+}
+
+void
+cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+
+ lio_mbox_write(lio_dev, &mbox_cmd);
+}
+
+static void
+cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *cmd, void *arg)
+{
+ uint32_t major = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6);
+ if (cmd->recv_len > 1) {
+ struct lio_version *lio_ver = (struct lio_version *)cmd->data;
+
+ major = lio_ver->major;
+ major = major << 16;
+ }
+
+ rte_atomic64_set((rte_atomic64_t *)arg, major | 1);
+}
+
+int
+cn23xx_pfvf_handshake(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+ struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
+ uint32_t q_no, count = 0;
+ rte_atomic64_t status;
+ uint32_t pfmajor;
+ uint32_t vfmajor;
+ uint32_t ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Sending VF_ACTIVE indication to the PF driver */
+ lio_dev_dbg(lio_dev, "requesting info from PF\n");
+
+ mbox_cmd.msg.mbox_msg64 = 0;
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
+ mbox_cmd.msg.s.len = 2;
+ mbox_cmd.data[0] = 0;
+ lio_ver->major = LIO_BASE_MAJOR_VERSION;
+ lio_ver->minor = LIO_BASE_MINOR_VERSION;
+ lio_ver->micro = LIO_BASE_MICRO_VERSION;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
+ mbox_cmd.fn_arg = (void *)&status;
+
+ if (lio_mbox_write(lio_dev, &mbox_cmd)) {
+ lio_dev_err(lio_dev, "Write to mailbox failed\n");
+ return -1;
+ }
+
+ rte_atomic64_set(&status, 0);
+
+ do {
+ rte_delay_ms(1);
+ } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));
+
+ ret = rte_atomic64_read(&status);
+ if (ret == 0) {
+ lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
+ return -1;
+ }
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
+ lio_dev->instr_queue[q_no]->txpciq.s.pkind =
+ lio_dev->pfvf_hsword.pkind;
+
+ vfmajor = LIO_BASE_MAJOR_VERSION;
+ pfmajor = ret >> 16;
+ if (pfmajor != vfmajor) {
+ lio_dev_err(lio_dev,
+ "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = -EPERM;
+ } else {
+ lio_dev_dbg(lio_dev,
+ "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = 0;
+ }
+
+ lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
+ lio_dev->pfvf_hsword.pkind);
+
+ return ret;
+}
+
+void
+cn23xx_vf_handle_mbox(struct lio_device *lio_dev)
+{
+ uint64_t mbox_int_val;
+
+ /* read and clear by writing 1 */
+ mbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg);
+ rte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg);
+ if (lio_mbox_read(lio_dev->mbox[0]))
+ lio_mbox_process_message(lio_dev->mbox[0]);
+}
+
+int
+cn23xx_vf_setup_device(struct lio_device *lio_dev)
+{
+ uint64_t reg_val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* INPUT_CONTROL[RPVF] gives the VF IOq count */
+ reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));
+
+ lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+ lio_dev->sriov_info.rings_per_vf =
+ reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+ lio_dev->default_config = lio_get_conf(lio_dev);
+ if (lio_dev->default_config == NULL)
+ return -1;
+
+ lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs;
+ lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs;
+ lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox;
+ lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox;
+
+ lio_dev->fn_list.setup_device_regs = cn23xx_vf_setup_device_regs;
+
+ lio_dev->fn_list.enable_io_queues = cn23xx_vf_enable_io_queues;
+ lio_dev->fn_list.disable_io_queues = cn23xx_vf_disable_io_queues;
+
+ return 0;
+}
+
+int
+cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
+{
+ uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
+ uint64_t q_no;
+
+ /* Disable the i/p and o/p queues for this Octeon.
+ * IOQs will already be in reset.
+ * If RST bit is set, wait for Quiet bit to be set
+ * Once Quiet bit is set, clear the RST bit
+ */
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
+ volatile uint64_t reg_val;
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop = loop - 1;
+ }
+
+ if (loop == 0) {
+ lio_dev_err(lio_dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ lio_dev_err(lio_dev, "unable to reset qno %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h
new file mode 100644
index 00000000..ad8db0df
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -0,0 +1,97 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_23XX_VF_H_
+#define _LIO_23XX_VF_H_
+
+#include <stdio.h>
+
+#include "lio_struct.h"
+
+static const struct lio_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN23XX_MAX_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ },
+
+ .num_nic_ports = CN23XX_DEFAULT_NUM_PORTS,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+};
+
+static inline const struct lio_config *
+lio_get_conf(struct lio_device *lio_dev)
+{
+ const struct lio_config *default_lio_conf = NULL;
+
+ /* check the LIO Device model & return the corresponding lio
+ * configuration
+ */
+ default_lio_conf = &default_cn23xx_conf;
+
+ if (default_lio_conf == NULL) {
+ lio_dev_err(lio_dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return default_lio_conf;
+}
+
+/** Turns off the input and output queues for the device
+ * @param lio_dev which device io queues to disable
+ */
+int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
+
+#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
+
+void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
+
+int cn23xx_pfvf_handshake(struct lio_device *lio_dev);
+
+int cn23xx_vf_setup_device(struct lio_device *lio_dev);
+
+void cn23xx_vf_handle_mbox(struct lio_device *lio_dev);
+#endif /* _LIO_23XX_VF_H_ */
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
new file mode 100644
index 00000000..67eaa452
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -0,0 +1,249 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_HW_DEFS_H_
+#define _LIO_HW_DEFS_H_
+
+#include <rte_io.h>
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#endif
+
+#define LIO_CN23XX_VF_VID 0x9712
+
+/* --------------------------CONFIG VALUES------------------------ */
+
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF 64
+#define CN23XX_MAX_RINGS_PER_VF 8
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
+#define CN23XX_MIN_IQ_DESCRIPTORS 128
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
+#define CN23XX_MIN_OQ_DESCRIPTORS 128
+#define CN23XX_OQ_BUF_SIZE 1536
+
+#define CN23XX_OQ_REFIL_THRESHOLD 16
+
+#define CN23XX_DEFAULT_NUM_PORTS 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+
+/* common OCTEON configuration macros */
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+/* Max IOQs per LIO Link */
+#define LIO_MAX_IOQS_PER_IF 64
+
+enum lio_card_type {
+ LIO_23XX /* 23xx */
+};
+
+#define LIO_23XX_NAME "23xx"
+
+#define LIO_DEV_RUNNING 0xc
+
+#define LIO_OQ_REFILL_THRESHOLD_CFG(cfg) \
+ ((cfg)->default_config->oq.refill_threshold)
+#define LIO_NUM_DEF_TX_DESCS_CFG(cfg) \
+ ((cfg)->default_config->num_def_tx_descs)
+
+#define LIO_IQ_INSTR_TYPE(cfg) ((cfg)->default_config->iq.instr_type)
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum number of Instruction queues */
+#define LIO_MAX_INSTR_QUEUES(lio_dev) CN23XX_MAX_RINGS_PER_VF
+
+#define LIO_MAX_POSSIBLE_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
+
+#define LIO_DEVICE_NAME_LEN 32
+#define LIO_BASE_MAJOR_VERSION 1
+#define LIO_BASE_MINOR_VERSION 5
+#define LIO_BASE_MICRO_VERSION 1
+
+#define LIO_FW_VERSION_LENGTH 32
+
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ OCTEON_ORDERED_TAG = 0,
+ OCTEON_ATOMIC_TAG = 1,
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* used for NIC operations */
+#define LIO_OPCODE 1
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define LIO_OPCODE_SUBCODE(op, sub) \
+ ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** LIO_OPCODE subcodes */
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define LIO_OPCODE_NW_DATA 0x02 /* network packet data */
+#define LIO_OPCODE_CMD 0x03
+#define LIO_OPCODE_INFO 0x04
+#define LIO_OPCODE_PORT_STATS 0x05
+#define LIO_OPCODE_IF_CFG 0x09
+
+#define LIO_MIN_RX_BUF_SIZE 64
+#define LIO_MAX_RX_PKTLEN (64 * 1024)
+
+/* NIC Command types */
+#define LIO_CMD_CHANGE_DEVFLAGS 0x3
+#define LIO_CMD_RX_CTL 0x4
+#define LIO_CMD_CLEAR_STATS 0x6
+#define LIO_CMD_SET_RSS 0xD
+#define LIO_CMD_TNL_RX_CSUM_CTL 0x10
+#define LIO_CMD_TNL_TX_CSUM_CTL 0x11
+#define LIO_CMD_ADD_VLAN_FILTER 0x17
+#define LIO_CMD_DEL_VLAN_FILTER 0x18
+#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define LIO_CMD_VXLAN_PORT_ADD 0x0
+#define LIO_CMD_VXLAN_PORT_DEL 0x1
+#define LIO_CMD_RXCSUM_ENABLE 0x0
+#define LIO_CMD_TXCSUM_ENABLE 0x0
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define LIO_L4_CSUM_VERIFIED 0x1
+#define LIO_IP_CSUM_VERIFIED 0x2
+
+/* RSS */
+#define LIO_RSS_PARAM_DISABLE_RSS 0x10
+#define LIO_RSS_PARAM_HASH_KEY_UNCHANGED 0x08
+#define LIO_RSS_PARAM_ITABLE_UNCHANGED 0x04
+#define LIO_RSS_PARAM_HASH_INFO_UNCHANGED 0x02
+
+#define LIO_RSS_HASH_IPV4 0x100
+#define LIO_RSS_HASH_TCP_IPV4 0x200
+#define LIO_RSS_HASH_IPV6 0x400
+#define LIO_RSS_HASH_TCP_IPV6 0x1000
+#define LIO_RSS_HASH_IPV6_EX 0x800
+#define LIO_RSS_HASH_TCP_IPV6_EX 0x2000
+
+#define LIO_RSS_OFFLOAD_ALL ( \
+ LIO_RSS_HASH_IPV4 | \
+ LIO_RSS_HASH_TCP_IPV4 | \
+ LIO_RSS_HASH_IPV6 | \
+ LIO_RSS_HASH_TCP_IPV6 | \
+ LIO_RSS_HASH_IPV6_EX | \
+ LIO_RSS_HASH_TCP_IPV6_EX)
+
+#define LIO_RSS_MAX_TABLE_SZ 128
+#define LIO_RSS_MAX_KEY_SZ 40
+#define LIO_RSS_PARAM_SIZE 16
+
+/* Interface flags communicated between host driver and core app. */
+enum lio_ifflags {
+ LIO_IFFLAG_ALLMULTI = 0x02,
+ LIO_IFFLAG_UNICAST = 0x10
+};
+
+/* Routines for reading and writing CSRs */
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define lio_write_csr(lio_dev, reg_off, value) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(value) _value = value; \
+ PMD_REGS_LOG(_dev, \
+ "Write32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)_value); \
+ rte_write32(_value, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(val64) _val64 = val64; \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Write64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)_val64); \
+ rte_write64(_val64, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_read_csr(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint32_t val = rte_read32(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG(_dev, \
+ "Read32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)val); \
+ val; \
+ })
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint64_t val64 = rte_read64(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Read64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)val64); \
+ val64; \
+ })
+#else
+#define lio_write_csr(lio_dev, reg_off, value) \
+ rte_write32(value, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ rte_write64(val64, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr(lio_dev, reg_off) \
+ rte_read32((lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ rte_read64((lio_dev)->hw_addr + (reg_off))
+#endif
+#endif /* _LIO_HW_DEFS_H_ */
diff --git a/drivers/net/liquidio/base/lio_mbox.c b/drivers/net/liquidio/base/lio_mbox.c
new file mode 100644
index 00000000..b4abc623
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_mbox.c
@@ -0,0 +1,275 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_mbox.h"
+
+/**
+ * lio_mbox_read:
+ * @mbox: Pointer mailbox
+ *
+ * Reads the 8-bytes of data from the mbox register
+ * Writes back the acknowledgment indicating completion of read
+ */
+int
+lio_mbox_read(struct lio_mbox *mbox)
+{
+ union lio_mbox_message msg;
+ int ret = 0;
+
+ msg.mbox_msg64 = rte_read64(mbox->mbox_read_reg);
+
+ if ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG))
+ return 0;
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_req.recv_len++;
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_resp.recv_len++;
+ } else {
+ if ((mbox->state & LIO_MBOX_STATE_IDLE) &&
+ (msg.s.type == LIO_MBOX_REQUEST)) {
+ mbox->state &= ~LIO_MBOX_STATE_IDLE;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64;
+ mbox->mbox_req.q_no = mbox->q_no;
+ mbox->mbox_req.recv_len = 1;
+ } else {
+ if ((mbox->state &
+ LIO_MBOX_STATE_RES_PENDING) &&
+ (msg.s.type == LIO_MBOX_RESPONSE)) {
+ mbox->state &=
+ ~LIO_MBOX_STATE_RES_PENDING;
+ mbox->state |=
+ LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->mbox_resp.msg.mbox_msg64 =
+ msg.mbox_msg64;
+ mbox->mbox_resp.q_no = mbox->q_no;
+ mbox->mbox_resp.recv_len = 1;
+ } else {
+ rte_write64(LIO_PFVFERR,
+ mbox->mbox_read_reg);
+ mbox->state |= LIO_MBOX_STATE_ERROR;
+ return -1;
+ }
+ }
+ }
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ if (mbox->mbox_req.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ if (mbox->mbox_resp.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_RES_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ RTE_ASSERT(0);
+ }
+ }
+
+ rte_write64(LIO_PFVFACK, mbox->mbox_read_reg);
+
+ return ret;
+}
+
+/**
+ * lio_mbox_write:
+ * @lio_dev: Pointer lio device
+ * @mbox_cmd: Cmd to send to mailbox.
+ *
+ * Populates the queue specific mbox structure
+ * with cmd information.
+ * Write the cmd to mbox register
+ */
+int
+lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no];
+ uint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) &&
+ !(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED))
+ return LIO_MBOX_STATUS_FAILED;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) &&
+ !(mbox->state & LIO_MBOX_STATE_IDLE))
+ return LIO_MBOX_STATUS_BUSY;
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) {
+ rte_memcpy(&mbox->mbox_resp, mbox_cmd,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_RES_PENDING;
+ }
+
+ count = 0;
+
+ while (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+
+ if (ret == LIO_MBOX_STATUS_SUCCESS) {
+ rte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg);
+ for (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) {
+ count = 0;
+ while (rte_read64(mbox->mbox_write_reg) !=
+ LIO_PFVFACK) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+ rte_write64(mbox_cmd->data[i], mbox->mbox_write_reg);
+ }
+ }
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) {
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ } else {
+ if ((!mbox_cmd->msg.s.resp_needed) ||
+ (ret == LIO_MBOX_STATUS_FAILED)) {
+ mbox->state &= ~LIO_MBOX_STATE_RES_PENDING;
+ if (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING |
+ LIO_MBOX_STATE_REQ_RECEIVED)))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * lio_mbox_process_cmd:
+ * @mbox: Pointer mailbox
+ * @mbox_cmd: Pointer to command received
+ *
+ * Process the cmd received in mbox
+ */
+static int
+lio_mbox_process_cmd(struct lio_mbox *mbox,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_device *lio_dev = mbox->lio_dev;
+
+ if (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED)
+ lio_dev_err(lio_dev, "Octeon core(s) crashed or got stuck!\n");
+
+ return 0;
+}
+
+/**
+ * Process the received mbox message.
+ */
+int
+lio_mbox_process_message(struct lio_mbox *mbox)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ if (mbox->state & LIO_MBOX_STATE_ERROR) {
+ if (mbox->state & (LIO_MBOX_STATE_RES_PENDING |
+ LIO_MBOX_STATE_RES_RECEIVING)) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 1;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd,
+ mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 0;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_req,
+ sizeof(struct lio_mbox_cmd));
+ if (!mbox_cmd.msg.s.resp_needed) {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED;
+ if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ lio_mbox_process_cmd(mbox, &mbox_cmd);
+
+ return 0;
+ }
+
+ RTE_ASSERT(0);
+
+ return 0;
+}
diff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h
new file mode 100644
index 00000000..b0875d64
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_mbox.h
@@ -0,0 +1,131 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_MBOX_H_
+#define _LIO_MBOX_H_
+
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+
+/* Macros for Mail Box Communication */
+
+#define LIO_MBOX_DATA_MAX 32
+
+#define LIO_VF_ACTIVE 0x1
+#define LIO_VF_FLR_REQUEST 0x2
+#define LIO_CORES_CRASHED 0x3
+
+/* Macro for Read acknowledgment */
+#define LIO_PFVFACK 0xffffffffffffffff
+#define LIO_PFVFSIG 0x1122334455667788
+#define LIO_PFVFERR 0xDEADDEADDEADDEAD
+
+enum lio_mbox_cmd_status {
+ LIO_MBOX_STATUS_SUCCESS = 0,
+ LIO_MBOX_STATUS_FAILED = 1,
+ LIO_MBOX_STATUS_BUSY = 2
+};
+
+enum lio_mbox_message_type {
+ LIO_MBOX_REQUEST = 0,
+ LIO_MBOX_RESPONSE = 1
+};
+
+union lio_mbox_message {
+ uint64_t mbox_msg64;
+ struct {
+ uint16_t type : 1;
+ uint16_t resp_needed : 1;
+ uint16_t cmd : 6;
+ uint16_t len : 8;
+ uint8_t params[6];
+ } s;
+};
+
+typedef void (*lio_mbox_callback)(void *, void *, void *);
+
+struct lio_mbox_cmd {
+ union lio_mbox_message msg;
+ uint64_t data[LIO_MBOX_DATA_MAX];
+ uint32_t q_no;
+ uint32_t recv_len;
+ uint32_t recv_status;
+ lio_mbox_callback fn;
+ void *fn_arg;
+};
+
+enum lio_mbox_state {
+ LIO_MBOX_STATE_IDLE = 1,
+ LIO_MBOX_STATE_REQ_RECEIVING = 2,
+ LIO_MBOX_STATE_REQ_RECEIVED = 4,
+ LIO_MBOX_STATE_RES_PENDING = 8,
+ LIO_MBOX_STATE_RES_RECEIVING = 16,
+ LIO_MBOX_STATE_RES_RECEIVED = 16,
+ LIO_MBOX_STATE_ERROR = 32
+};
+
+struct lio_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ rte_spinlock_t lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t q_no;
+
+ enum lio_mbox_state state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ void *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ void *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ void *mbox_read_reg;
+
+ struct lio_mbox_cmd mbox_req;
+
+ struct lio_mbox_cmd mbox_resp;
+
+};
+
+int lio_mbox_read(struct lio_mbox *mbox);
+int lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd);
+int lio_mbox_process_message(struct lio_mbox *mbox);
+#endif /* _LIO_MBOX_H_ */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
new file mode 100644
index 00000000..436d25b0
--- /dev/null
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -0,0 +1,2058 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_alarm.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+/* Default RSS key in use */
+static uint8_t lio_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static const struct rte_eth_desc_lim lio_rx_desc_lim = {
+ .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+static const struct rte_eth_desc_lim lio_tx_desc_lim = {
+ .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+/* Wait for control command to reach nic. */
+static uint16_t
+lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
+ struct lio_dev_ctrl_cmd *ctrl_cmd)
+{
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+
+ while ((ctrl_cmd->cond == 0) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+ rte_delay_ms(1);
+ }
+
+ return !timeout;
+}
+
+/**
+ * \brief Send Rx control command
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ * @param start_stop whether to start or stop
+ */
+static int
+lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
+ ctrl_pkt.ncmd.s.param1 = start_stop;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send RX Control message\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "RX Control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* store statistics names and its offset in stats structure */
+struct rte_lio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
+ {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
+ {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
+ {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
+ {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
+ {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
+ {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
+ {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
+ {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
+ {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
+ {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
+ {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
+ {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
+ {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
+ {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_broadcast_pkts",
+ (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_multicast_pkts",
+ (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
+ total_collisions)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
+ sizeof(struct octeon_rx_stats)},
+};
+
+#define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
+
+/* Get hw stats of the port */
+static int
+lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct octeon_link_stats *hw_stats;
+ struct lio_link_stats_resp *resp;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+ unsigned int i;
+ int retval;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (n < LIO_NB_XSTATS)
+ return LIO_NB_XSTATS;
+
+ resp_size = sizeof(struct lio_link_stats_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_link_stats_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_PORT_STATS, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
+ retval);
+ goto get_stats_fail;
+ }
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
+ goto get_stats_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->link_stats),
+ sizeof(struct octeon_link_stats) >> 3);
+
+ hw_stats = &resp->link_stats;
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_lio_stats_strings[i].offset);
+ }
+
+ lio_free_soft_command(sc);
+
+ return LIO_NB_XSTATS;
+
+get_stats_fail:
+ lio_free_soft_command(sc);
+
+ return -1;
+}
+
+static int
+lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ unsigned int i;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (xstats_names == NULL)
+ return LIO_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_lio_stats_strings[i].name);
+ }
+
+ return LIO_NB_XSTATS;
+}
+
+/* Reset hw stats for the port */
+static void
+lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send clear stats command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Clear stats command timed out\n");
+ return;
+ }
+
+ /* clear stored per queue stats */
+ RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
+ (*eth_dev->dev_ops->stats_reset)(eth_dev);
+}
+
+/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
+static void
+lio_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+ uint64_t bytes = 0;
+ uint64_t pkts = 0;
+ uint64_t drop = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+ }
+
+ stats->opackets = pkts;
+ stats->obytes = bytes;
+ stats->oerrors = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+ }
+ stats->ibytes = bytes;
+ stats->ipackets = pkts;
+ stats->ierrors = drop;
+}
+
+static void
+lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ memset(iq_stats, 0, sizeof(struct lio_iq_stats));
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ memset(oq_stats, 0, sizeof(struct lio_droq_stats));
+ }
+ }
+}
+
+static void
+lio_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *devinfo)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ devinfo->max_rx_queues = lio_dev->max_rx_queues;
+ devinfo->max_tx_queues = lio_dev->max_tx_queues;
+
+ devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
+ devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
+
+ devinfo->max_mac_addrs = 1;
+
+ devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+
+ devinfo->rx_desc_lim = lio_rx_desc_lim;
+ devinfo->tx_desc_lim = lio_tx_desc_lim;
+
+ devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
+ devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_IPV6_TCP_EX);
+}
+
+static int
+lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* Limit the MTU to make sure the ethernet packets are between
+ * ETHER_MIN_MTU bytes and PF's MTU
+ */
+ if ((new_mtu < ETHER_MIN_MTU) ||
+ (new_mtu > lio_dev->linfo.link.s.mtu)) {
+ lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
+ lio_dev_err(lio_dev, "Valid range %d and %d\n",
+ ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+ int i, j, index;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+ rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
+ rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
+
+ for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
+ index = (i * RTE_RETA_GROUP_SIZE) + j;
+ rss_state->itable[index] = reta_conf[i].reta[j];
+ }
+ }
+ }
+
+ rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
+ memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ int i, num;
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+
+ for (i = 0; i < num; i++) {
+ memcpy(reta_conf->reta,
+ &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
+ RTE_RETA_GROUP_SIZE);
+ reta_conf++;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ uint8_t *hash_key = NULL;
+ uint64_t rss_hf = 0;
+
+ if (rss_state->hash_disable) {
+ lio_dev_info(lio_dev, "RSS disabled in nic\n");
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+
+ /* Get key value */
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL)
+ memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
+
+ if (rss_state->ip)
+ rss_hf |= ETH_RSS_IPV4;
+ if (rss_state->tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (rss_state->ipv6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (rss_state->ipv6_tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (rss_state->ipv6_ex)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (rss_state->ipv6_tcp_ex_hash)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+
+ rss_conf->rss_hf = rss_hf;
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+
+ if (rss_conf->rss_key) {
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
+ rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
+ memcpy(rss_state->hash_key, rss_conf->rss_key,
+ rss_state->hash_key_size);
+ memcpy(rss_param->key, rss_state->hash_key,
+ rss_state->hash_key_size);
+ }
+
+ if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ /* Can't disable rss through hash flags,
+ * if it is enabled by default during init
+ */
+ if (!rss_state->hash_disable)
+ return -EINVAL;
+
+ /* This is for --disable-rss during testpmd launch */
+ rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
+ } else {
+ uint32_t hashinfo = 0;
+
+ /* Can't enable rss if disabled by default during init */
+ if (rss_state->hash_disable)
+ return -EINVAL;
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ hashinfo |= LIO_RSS_HASH_IPV4;
+ rss_state->ip = 1;
+ } else {
+ rss_state->ip = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV4;
+ rss_state->tcp_hash = 1;
+ } else {
+ rss_state->tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+ hashinfo |= LIO_RSS_HASH_IPV6;
+ rss_state->ipv6 = 1;
+ } else {
+ rss_state->ipv6 = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6;
+ rss_state->ipv6_tcp_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+ hashinfo |= LIO_RSS_HASH_IPV6_EX;
+ rss_state->ipv6_ex = 1;
+ } else {
+ rss_state->ipv6_ex = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
+ rss_state->ipv6_tcp_ex_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_ex_hash = 0;
+ }
+
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
+ rss_param->param.hashinfo = hashinfo;
+ }
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Add vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (lio_dev->linfo.vlan_is_admin_assigned)
+ return -EPERM;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = on ?
+ LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
+ ctrl_pkt.ncmd.s.param1 = vlan_id;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param eth_dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &eth_dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static uint64_t
+lio_hweight64(uint64_t w)
+{
+ uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
+
+ res =
+ (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+}
+
+static int
+lio_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_eth_link link, old;
+
+ /* Initialize */
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ memset(&old, 0, sizeof(old));
+
+ /* Return what we found */
+ if (lio_dev->linfo.link.s.link_up == 0) {
+ /* Interface is down */
+ if (lio_dev_atomic_write_link_status(eth_dev, &link))
+ return -1;
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
+ }
+
+ link.link_status = ETH_LINK_UP; /* Interface is up */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ switch (lio_dev->linfo.link.s.speed) {
+ case LIO_LINK_SPEED_10000:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case LIO_LINK_SPEED_25000:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ }
+
+ if (lio_dev_atomic_write_link_status(eth_dev, &link))
+ return -1;
+
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * \brief Net device enable, disable allmulticast
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ */
+static void
+lio_change_dev_flag(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
+ ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send change flag message\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+}
+
+static void
+lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t i;
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ return;
+ }
+
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = lio_rss_key; /* Default hash key */
+
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
+ uint8_t q_idx, conf_idx, reta_idx;
+
+ q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
+ i % eth_dev->data->nb_rx_queues : 0);
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf[conf_idx].reta[reta_idx] = q_idx;
+ reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
+ }
+
+ lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
+}
+
+static void
+lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_conf rss_conf;
+
+ switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ lio_dev_rss_configure(eth_dev);
+ break;
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode. */
+ default:
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ }
+}
+
+/**
+ * Setup our receive queue/ringbuffer. This is the
+ * queue the Octeon uses to send us packets and
+ * responses. We are given a memory pool for our
+ * packet buffers that are used to populate the receive
+ * queue.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param q_no
+ * Queue number
+ * @param num_rx_descs
+ * Number of entries in the queue
+ * @param socket_id
+ * Where to allocate memory
+ * @param rx_conf
+ * Pointer to the struction rte_eth_rxconf
+ * @param mp
+ * Pointer to the packet pool
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -1
+ */
+static int
+lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_rx_descs, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t fw_mapped_oq;
+ uint16_t buf_size;
+
+ if (q_no >= lio_dev->nb_rx_queues) {
+ lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
+
+ fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
+
+ if ((lio_dev->droq[fw_mapped_oq]) &&
+ (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->droq[fw_mapped_oq]->max_count);
+ return -ENOTSUP;
+ }
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
+ socket_id)) {
+ lio_dev_err(lio_dev, "droq allocation failed\n");
+ return -1;
+ }
+
+ eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
+
+ return 0;
+}
+
+/**
+ * Release the receive queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param rxq
+ * Opaque pointer to the receive queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_rx_queue_release(void *rxq)
+{
+ struct lio_droq *droq = rxq;
+ int oq_no;
+
+ if (droq) {
+ /* Run time queue deletion not supported */
+ if (droq->lio_dev->port_configured)
+ return;
+
+ oq_no = droq->q_no;
+ lio_delete_droq_queue(droq->lio_dev, oq_no);
+ }
+}
+
+/**
+ * Allocate and initialize SW ring. Initialize associated HW registers.
+ *
+ * @param eth_dev
+ * Pointer to structure rte_eth_dev
+ *
+ * @param q_no
+ * Queue number
+ *
+ * @param num_tx_descs
+ * Number of ringbuffer descriptors
+ *
+ * @param socket_id
+ * NUMA socket id, used for memory allocations
+ *
+ * @param tx_conf
+ * Pointer to the structure rte_eth_txconf
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -errno value
+ */
+static int
+lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_tx_descs, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
+ int retval;
+
+ if (q_no >= lio_dev->nb_tx_queues) {
+ lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
+
+ if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
+ (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->instr_queue[fw_mapped_iq]->max_count);
+ return -ENOTSUP;
+ }
+
+ retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
+ num_tx_descs, lio_dev, socket_id);
+
+ if (retval) {
+ lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
+ return retval;
+ }
+
+ retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
+ lio_dev->instr_queue[fw_mapped_iq]->max_count,
+ socket_id);
+
+ if (retval) {
+ lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
+ return retval;
+ }
+
+ eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
+
+ return 0;
+}
+
+/**
+ * Release the transmit queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param txq
+ * Opaque pointer to the transmit queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_tx_queue_release(void *txq)
+{
+ struct lio_instr_queue *tq = txq;
+ uint32_t fw_mapped_iq_no;
+
+
+ if (tq) {
+ /* Run time queue deletion not supported */
+ if (tq->lio_dev->port_configured)
+ return;
+
+ /* Free sg_list */
+ lio_delete_sglist(tq);
+
+ fw_mapped_iq_no = tq->txpciq.s.q_no;
+ lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
+ }
+}
+
+/**
+ * Api to check link state.
+ */
+static void
+lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct lio_link_status_resp *resp;
+ union octeon_link_status *ls;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+
+ if (!lio_dev->intf_open)
+ return;
+
+ resp_size = sizeof(struct lio_link_status_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return;
+
+ resp = (struct lio_link_status_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_INFO, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
+ goto get_status_fail;
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ rte_delay_ms(1);
+ }
+
+ if (resp->status)
+ goto get_status_fail;
+
+ ls = &resp->link_info.link;
+
+ lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
+
+ if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
+ lio_dev->linfo.link.link_status64 = ls->link_status64;
+ lio_dev_link_update(eth_dev, 0);
+ }
+
+ lio_free_soft_command(sc);
+
+ return;
+
+get_status_fail:
+ lio_free_soft_command(sc);
+}
+
+/* This function will be invoked every LSC_TIMEOUT ns (100ms)
+ * and will update link state if it changes.
+ */
+static void
+lio_sync_link_state_check(void *eth_dev)
+{
+ struct lio_device *lio_dev =
+ (((struct rte_eth_dev *)eth_dev)->data->dev_private);
+
+ if (lio_dev->port_configured)
+ lio_dev_get_link_status(eth_dev);
+
+ /* Schedule periodic link status check.
+ * Stop check if interface is close and start again while opening.
+ */
+ if (lio_dev->intf_open)
+ rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
+ eth_dev);
+}
+
+static int
+lio_dev_start(struct rte_eth_dev *eth_dev)
+{
+ uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int ret = 0;
+
+ lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ return -1;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1))
+ return -1;
+
+ /* Ready for link status updates */
+ lio_dev->intf_open = 1;
+ rte_mb();
+
+ /* Configure RSS if device configured with multiple RX queues. */
+ lio_dev_mq_rx_configure(eth_dev);
+
+ /* start polling for lsc */
+ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
+ lio_sync_link_state_check,
+ eth_dev);
+ if (ret) {
+ lio_dev_err(lio_dev,
+ "link state check handler creation failed\n");
+ goto dev_lsc_handle_error;
+ }
+
+ while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
+ rte_delay_ms(1);
+
+ if (lio_dev->linfo.link.link_status64 == 0) {
+ ret = -1;
+ goto dev_mtu_check_error;
+ }
+
+ if (lio_dev->linfo.link.s.mtu != mtu) {
+ ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
+ if (ret)
+ goto dev_mtu_check_error;
+ }
+
+ return 0;
+
+dev_mtu_check_error:
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+dev_lsc_handle_error:
+ lio_dev->intf_open = 0;
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ return ret;
+}
+
+/* Stop device and disable input/output functions */
+static void
+lio_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
+ lio_dev->intf_open = 0;
+ rte_mb();
+
+ /* Cancel callback if still running. */
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ /* Clear recorded link status */
+ lio_dev->linfo.link.link_status64 = 0;
+}
+
+static int
+lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already UP\n");
+ return 0;
+ }
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
+ lio_dev_err(lio_dev, "Unable to set Link UP\n");
+ return -1;
+ }
+
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static int
+lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (!lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already DOWN\n");
+ return 0;
+ }
+
+ lio_dev->linfo.link.s.link_up = 0;
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ lio_dev_err(lio_dev, "Unable to set Link Down\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset and stop the device. This occurs on the first
+ * call to this routine. Subsequent calls will simply
+ * return. NB: This will require the NIC to be rebooted.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - nothing
+ */
+static void
+lio_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint32_t i;
+
+ lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ lio_wait_for_instr_fetch(lio_dev);
+
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ cn23xx_vf_set_io_queues_off(lio_dev);
+
+ /* Reset iq regs (IQ_DBELL).
+ * Clear sli_pktx_cnts (OQ_PKTS_SENT).
+ */
+ for (i = 0; i < lio_dev->nb_rx_queues; i++) {
+ struct lio_droq *droq = lio_dev->droq[i];
+
+ if (droq == NULL)
+ break;
+
+ uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ lio_dev_dbg(lio_dev,
+ "pending oq count %u\n", pkt_count);
+ rte_write32(pkt_count, droq->pkts_sent_reg);
+ }
+
+ /* Do FLR for the VF */
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+
+ /* lio_free_mbox */
+ lio_dev->fn_list.free_mbox(lio_dev);
+
+ /* Free glist resources */
+ rte_free(lio_dev->glist_head);
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_head = NULL;
+ lio_dev->glist_lock = NULL;
+
+ lio_dev->port_configured = 0;
+
+ /* Delete all queues */
+ lio_dev_clear_queues(eth_dev);
+}
+
+/**
+ * Enable tunnel rx checksum verification from firmware.
+ */
+static void
+lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
+}
+
+/**
+ * Enable checksum calculation for inner packet in a tunnel.
+ */
+static void
+lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
+}
+
+static int lio_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int retval, num_iqueues, num_oqueues;
+ uint8_t mac[ETHER_ADDR_LEN], i;
+ struct lio_if_cfg_resp *resp;
+ struct lio_soft_command *sc;
+ union lio_if_cfg if_cfg;
+ uint32_t resp_size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Re-configuring firmware not supported.
+ * Can't change tx/rx queues per port from initial value.
+ */
+ if (lio_dev->port_configured) {
+ if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
+ (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
+ lio_dev_err(lio_dev,
+ "rxq/txq re-conf not supported. Restart application with new value.\n");
+ return -ENOTSUP;
+ }
+ return 0;
+ }
+
+ lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
+
+ resp_size = sizeof(struct lio_if_cfg_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_if_cfg_resp *)sc->virtrptr;
+
+ /* Firmware doesn't have capability to reconfigure the queues,
+ * Claim all queues, and use as many required
+ */
+ if_cfg.if_cfg64 = 0;
+ if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
+ if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
+ if_cfg.s.base_queue = 0;
+
+ if_cfg.s.gmx_port_id = lio_dev->pf_num;
+
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_IF_CFG, 0,
+ if_cfg.if_cfg64, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto nic_config_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "iq/oq config failed\n");
+ goto nic_config_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
+ sizeof(struct octeon_if_cfg_info) >> 3);
+
+ num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
+ num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ lio_dev_err(lio_dev,
+ "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask);
+ goto nic_config_fail;
+ }
+
+ lio_dev_dbg(lio_dev,
+ "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
+ eth_dev->data->port_id,
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+
+ lio_dev->linfo.num_rxpciq = num_oqueues;
+ lio_dev->linfo.num_txpciq = num_iqueues;
+
+ for (i = 0; i < num_oqueues; i++) {
+ lio_dev->linfo.rxpciq[i].rxpciq64 =
+ resp->cfg_info.linfo.rxpciq[i].rxpciq64;
+ lio_dev_dbg(lio_dev, "index %d OQ %d\n",
+ i, lio_dev->linfo.rxpciq[i].s.q_no);
+ }
+
+ for (i = 0; i < num_iqueues; i++) {
+ lio_dev->linfo.txpciq[i].txpciq64 =
+ resp->cfg_info.linfo.txpciq[i].txpciq64;
+ lio_dev_dbg(lio_dev, "index %d IQ %d\n",
+ i, lio_dev->linfo.txpciq[i].s.q_no);
+ }
+
+ lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio_dev->linfo.link.link_status64 =
+ resp->cfg_info.linfo.link.link_status64;
+
+ /* 64-bit swap required on LE machines */
+ lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
+ 2 + i));
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)mac, &eth_dev->data->mac_addrs[0]);
+
+ /* enable firmware checksum support for tunnel packets */
+ lio_enable_hw_tunnel_rx_checksum(eth_dev);
+ lio_enable_hw_tunnel_tx_checksum(eth_dev);
+
+ lio_dev->glist_lock =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
+ if (lio_dev->glist_lock == NULL)
+ return -ENOMEM;
+
+ lio_dev->glist_head =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
+ 0);
+ if (lio_dev->glist_head == NULL) {
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev_link_update(eth_dev, 0);
+
+ lio_dev->port_configured = 1;
+
+ lio_free_soft_command(sc);
+
+ /* Disable iq_0 for reconf */
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
+
+ /* Free iq_0 used during init */
+ lio_free_instr_queue0(lio_dev);
+
+ return 0;
+
+nic_config_fail:
+ lio_dev_err(lio_dev, "Failed retval %d\n", retval);
+ lio_free_soft_command(sc);
+ lio_free_instr_queue0(lio_dev);
+
+ return -ENODEV;
+}
+
+/* Define our ethernet definitions */
+static const struct eth_dev_ops liovf_eth_dev_ops = {
+ .dev_configure = lio_dev_configure,
+ .dev_start = lio_dev_start,
+ .dev_stop = lio_dev_stop,
+ .dev_set_link_up = lio_dev_set_link_up,
+ .dev_set_link_down = lio_dev_set_link_down,
+ .dev_close = lio_dev_close,
+ .allmulticast_enable = lio_dev_allmulticast_enable,
+ .allmulticast_disable = lio_dev_allmulticast_disable,
+ .link_update = lio_dev_link_update,
+ .stats_get = lio_dev_stats_get,
+ .xstats_get = lio_dev_xstats_get,
+ .xstats_get_names = lio_dev_xstats_get_names,
+ .stats_reset = lio_dev_stats_reset,
+ .xstats_reset = lio_dev_xstats_reset,
+ .dev_infos_get = lio_dev_info_get,
+ .vlan_filter_set = lio_dev_vlan_filter_set,
+ .rx_queue_setup = lio_dev_rx_queue_setup,
+ .rx_queue_release = lio_dev_rx_queue_release,
+ .tx_queue_setup = lio_dev_tx_queue_setup,
+ .tx_queue_release = lio_dev_tx_queue_release,
+ .reta_update = lio_dev_rss_reta_update,
+ .reta_query = lio_dev_rss_reta_query,
+ .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
+ .rss_hash_update = lio_dev_rss_hash_update,
+ .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
+ .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
+};
+
+static void
+lio_check_pf_hs_response(void *lio_dev)
+{
+ struct lio_device *dev = lio_dev;
+
+ /* check till response arrives */
+ if (dev->pfvf_hsword.coproc_tics_per_us)
+ return;
+
+ cn23xx_vf_handle_mbox(dev);
+
+ rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
+}
+
+/**
+ * \brief Identify the LIO device and to map the BAR address space
+ * @param lio_dev lio device
+ */
+static int
+lio_chip_specific_setup(struct lio_device *lio_dev)
+{
+ struct rte_pci_device *pdev = lio_dev->pci_dev;
+ uint32_t dev_id = pdev->id.device_id;
+ const char *s;
+ int ret = 1;
+
+ switch (dev_id) {
+ case LIO_CN23XX_VF_VID:
+ lio_dev->chip_id = LIO_CN23XX_VF_VID;
+ ret = cn23xx_vf_setup_device(lio_dev);
+ s = "CN23XX VF";
+ break;
+ default:
+ s = "?";
+ lio_dev_err(lio_dev, "Unsupported Chip\n");
+ }
+
+ if (!ret)
+ lio_dev_info(lio_dev, "DEVICE : %s\n", s);
+
+ return ret;
+}
+
+static int
+lio_first_time_init(struct lio_device *lio_dev,
+ struct rte_pci_device *pdev)
+{
+ int dpdk_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set dpdk specific pci device pointer */
+ lio_dev->pci_dev = pdev;
+
+ /* Identify the LIO type and set device ops */
+ if (lio_chip_specific_setup(lio_dev)) {
+ lio_dev_err(lio_dev, "Chip specific setup failed\n");
+ return -1;
+ }
+
+ /* Initialize soft command buffer pool */
+ if (lio_setup_sc_buffer_pool(lio_dev)) {
+ lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
+ return -1;
+ }
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from applications for this lio device.
+ */
+ lio_setup_response_list(lio_dev);
+
+ if (lio_dev->fn_list.setup_mbox(lio_dev)) {
+ lio_dev_err(lio_dev, "Mailbox setup failed\n");
+ goto error;
+ }
+
+ /* Check PF response */
+ lio_check_pf_hs_response((void *)lio_dev);
+
+ /* Do handshake and exit if incompatible PF driver */
+ if (cn23xx_pfvf_handshake(lio_dev))
+ goto error;
+
+ /* Initial reset */
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ /* Wait for FLR for 100ms per SRIOV specification */
+ rte_delay_ms(100);
+
+ if (cn23xx_vf_set_io_queues_off(lio_dev)) {
+ lio_dev_err(lio_dev, "Setting io queues off failed\n");
+ goto error;
+ }
+
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ goto error;
+ }
+
+ if (lio_setup_instr_queue0(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
+ goto error;
+ }
+
+ dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
+
+ lio_dev->max_tx_queues = dpdk_queues;
+ lio_dev->max_rx_queues = dpdk_queues;
+
+ /* Enable input and output queues for this device */
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto error;
+
+ return 0;
+
+error:
+ lio_free_sc_buffer_pool(lio_dev);
+ if (lio_dev->mbox[0])
+ lio_dev->fn_list.free_mbox(lio_dev);
+ if (lio_dev->instr_queue[0])
+ lio_free_instr_queue0(lio_dev);
+
+ return -1;
+}
+
+static int
+lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ /* lio_free_sc_buffer_pool */
+ lio_free_sc_buffer_pool(lio_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
+ eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
+
+ /* Primary does the initialization. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pdev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ if (pdev->mem_resource[0].addr) {
+ lio_dev->hw_addr = pdev->mem_resource[0].addr;
+ } else {
+ PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
+ return -ENODEV;
+ }
+
+ lio_dev->eth_dev = eth_dev;
+ /* set lio device print string */
+ snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
+ "%s[%02x:%02x.%x]", pdev->driver->driver.name,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+
+ lio_dev->port_id = eth_dev->data->port_id;
+
+ if (lio_first_time_init(lio_dev, pdev)) {
+ lio_dev_err(lio_dev, "Device init failed\n");
+ return -EINVAL;
+ }
+
+ eth_dev->dev_ops = &liovf_eth_dev_ops;
+ eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ lio_dev_err(lio_dev,
+ "MAC addresses memory allocation failed\n");
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return -ENOMEM;
+ }
+
+ rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
+ rte_wmb();
+
+ lio_dev->port_configured = 0;
+ /* Always allow unicast packets */
+ lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev,
+ sizeof(struct lio_device));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = lio_eth_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ lio_eth_dev_uninit);
+}
+
+/* Set of PCI devices this driver supports */
+static const struct rte_pci_id pci_id_liovf_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
+ { .vendor_id = 0, /* sentinel */ }
+};
+
+static struct rte_pci_driver rte_liovf_pmd = {
+ .id_table = pci_id_liovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = lio_eth_dev_pci_probe,
+ .remove = lio_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");
diff --git a/drivers/net/liquidio/lio_ethdev.h b/drivers/net/liquidio/lio_ethdev.h
new file mode 100644
index 00000000..655c2011
--- /dev/null
+++ b/drivers/net/liquidio/lio_ethdev.h
@@ -0,0 +1,205 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_ETHDEV_H_
+#define _LIO_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "lio_struct.h"
+
+/* timeout to check link state updates from firmware in us */
+#define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */
+#define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */
+
+#define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private)
+
+/* LIO Response condition variable */
+struct lio_dev_ctrl_cmd {
+ struct rte_eth_dev *eth_dev;
+ uint64_t cond;
+};
+
+enum lio_bus_speed {
+ LIO_LINK_SPEED_UNKNOWN = 0,
+ LIO_LINK_SPEED_10000 = 10000,
+ LIO_LINK_SPEED_25000 = 25000
+};
+
+struct octeon_if_cfg_info {
+ uint64_t iqmask; /** mask for IQs enabled for the port */
+ uint64_t oqmask; /** mask for OQs enabled for the port */
+ struct octeon_link_info linfo; /** initial link information */
+ char lio_firmware_version[LIO_FW_VERSION_LENGTH];
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_rx_stats {
+ /* link-level stats */
+ uint64_t total_rcvd;
+ uint64_t bytes_rcvd;
+ uint64_t total_bcst;
+ uint64_t total_mcst;
+ uint64_t runts;
+ uint64_t ctl_rcvd;
+ uint64_t fifo_err; /* Accounts for over/under-run of buffers */
+ uint64_t dmac_drop;
+ uint64_t fcs_err;
+ uint64_t jabber_err;
+ uint64_t l2_err;
+ uint64_t frame_err;
+
+ /* firmware stats */
+ uint64_t fw_total_rcvd;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_rx_vxlan;
+ uint64_t fw_rx_vxlan_err;
+
+ /* LRO */
+ uint64_t fw_lro_pkts; /* Number of packets that are LROed */
+ uint64_t fw_lro_octs; /* Number of octets that are LROed */
+ uint64_t fw_total_lro; /* Number of LRO packets formed */
+ uint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */
+ uint64_t fw_lro_aborts_port;
+ uint64_t fw_lro_aborts_seq;
+ uint64_t fw_lro_aborts_tsval;
+ uint64_t fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ uint64_t fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_tx_stats {
+ /* link-level stats */
+ uint64_t total_pkts_sent;
+ uint64_t total_bytes_sent;
+ uint64_t mcast_pkts_sent;
+ uint64_t bcast_pkts_sent;
+ uint64_t ctl_sent;
+ uint64_t one_collision_sent; /* Packets sent after one collision */
+ /* Packets sent after multiple collision */
+ uint64_t multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ uint64_t max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ uint64_t max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ uint64_t fifo_err;
+ uint64_t runts;
+ uint64_t total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ uint64_t fw_total_sent;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_err_tso;
+ uint64_t fw_tso; /* number of tso requests */
+ uint64_t fw_tso_fwd; /* number of packets segmented in tso */
+ uint64_t fw_tx_vxlan;
+};
+
+struct octeon_link_stats {
+ struct octeon_rx_stats fromwire;
+ struct octeon_tx_stats fromhost;
+};
+
+union lio_if_cfg {
+ uint64_t if_cfg64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t base_queue : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t num_oqueues : 16;
+ uint64_t gmx_port_id : 8;
+ uint64_t vf_id : 8;
+#else
+ uint64_t vf_id : 8;
+ uint64_t gmx_port_id : 8;
+ uint64_t num_oqueues : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t base_queue : 16;
+#endif
+ } s;
+};
+
+struct lio_if_cfg_resp {
+ uint64_t rh;
+ struct octeon_if_cfg_info cfg_info;
+ uint64_t status;
+};
+
+struct lio_link_stats_resp {
+ uint64_t rh;
+ struct octeon_link_stats link_stats;
+ uint64_t status;
+};
+
+struct lio_link_status_resp {
+ uint64_t rh;
+ struct octeon_link_info link_info;
+ uint64_t status;
+};
+
+struct lio_rss_set {
+ struct param {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t flags : 16;
+ uint64_t hashinfo : 32;
+ uint64_t itablesize : 16;
+ uint64_t hashkeysize : 16;
+ uint64_t reserved : 48;
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t itablesize : 16;
+ uint64_t hashinfo : 32;
+ uint64_t flags : 16;
+ uint64_t reserved : 48;
+ uint64_t hashkeysize : 16;
+#endif
+ } param;
+
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t key[LIO_RSS_MAX_KEY_SZ];
+};
+
+void lio_dev_rx_queue_release(void *rxq);
+
+void lio_dev_tx_queue_release(void *txq);
+
+#endif /* _LIO_ETHDEV_H_ */
diff --git a/drivers/net/liquidio/lio_logs.h b/drivers/net/liquidio/lio_logs.h
new file mode 100644
index 00000000..a4c9ca4d
--- /dev/null
+++ b/drivers/net/liquidio/lio_logs.h
@@ -0,0 +1,91 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_LOGS_H_
+#define _LIO_LOGS_H_
+
+#define lio_dev_printf(lio_dev, level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s" fmt, (lio_dev)->dev_string, ##args)
+
+#define lio_dev_info(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, INFO, "INFO: " fmt, ##args)
+
+#define lio_dev_err(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, ERR, "ERROR: %s() " fmt, __func__, ##args)
+
+#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD, fmt, ## args)
+
+/* Enable these through config options */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, "%s() >>\n", __func__)
+#else /* !RTE_LIBRTE_LIO_DEBUG_INIT */
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_INIT */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_DRIVER
+#define lio_dev_dbg(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "DEBUG: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_DRIVER */
+#define lio_dev_dbg(lio_dev, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_DRIVER */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_RX
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "RX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_RX */
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_RX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_TX
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "TX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_TX */
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_TX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_MBOX
+#define PMD_MBOX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "MBOX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_MBOX */
+#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_MBOX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define PMD_REGS_LOG(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "REGS: " fmt, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_REGS */
+#define PMD_REGS_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_REGS */
+
+#endif /* _LIO_LOGS_H_ */
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
new file mode 100644
index 00000000..9533015c
--- /dev/null
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -0,0 +1,1885 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+#define LIO_MAX_SG 12
+/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
+#define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
+#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
+
+static void
+lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
+{
+ uint32_t count = 0;
+
+ do {
+ count += droq->buffer_size;
+ } while (count < LIO_MAX_RX_PKTLEN);
+}
+
+static void
+lio_droq_reset_indices(struct lio_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ rte_atomic64_set(&droq->pkts_pending, 0);
+}
+
+static void
+lio_droq_destroy_ring_buffers(struct lio_droq *droq)
+{
+ uint32_t i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ rte_pktmbuf_free((struct rte_mbuf *)
+ droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ lio_droq_reset_indices(droq);
+}
+
+static void *
+lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+ struct rte_mempool *mpool = droq->mpool;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mpool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate\n");
+ return NULL;
+ }
+
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ m->nb_segs = 1;
+ m->pool = mpool;
+
+ return m;
+}
+
+static int
+lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
+ struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring = droq->desc_ring;
+ uint32_t i;
+ void *buf;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ if (buf == NULL) {
+ lio_dev_err(lio_dev, "buffer alloc failed\n");
+ droq->stats.rx_alloc_failure++;
+ lio_droq_destroy_ring_buffers(droq);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[i].buffer);
+ }
+
+ lio_droq_reset_indices(droq);
+
+ lio_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+static void
+lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
+{
+ const struct rte_memzone *mz_tmp;
+ int ret = 0;
+
+ if (mz == NULL) {
+ lio_dev_err(lio_dev, "Memzone NULL\n");
+ return;
+ }
+
+ mz_tmp = rte_memzone_lookup(mz->name);
+ if (mz_tmp == NULL) {
+ lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
+ return;
+ }
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
+}
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ */
+static void
+lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ lio_droq_destroy_ring_buffers(droq);
+ rte_free(droq->recv_buf_list);
+ droq->recv_buf_list = NULL;
+ lio_dma_zone_free(lio_dev, droq->info_mz);
+ lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
+
+ memset(droq, 0, LIO_DROQ_SIZE);
+}
+
+static void *
+lio_alloc_info_buffer(struct lio_device *lio_dev,
+ struct lio_droq *droq, unsigned int socket_id)
+{
+ droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "info_list", droq->q_no,
+ (droq->max_count *
+ LIO_DROQ_INFO_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->info_mz == NULL)
+ return NULL;
+
+ droq->info_list_dma = droq->info_mz->phys_addr;
+ droq->info_alloc_size = droq->info_mz->len;
+ droq->info_base_addr = (size_t)droq->info_mz->addr;
+
+ return droq->info_mz->addr;
+}
+
+/**
+ * Allocates space for the descriptor ring for the droq and
+ * sets the base addr, num desc etc in Octeon registers.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
+ uint32_t num_descs, uint32_t desc_size,
+ struct rte_mempool *mpool, unsigned int socket_id)
+{
+ uint32_t c_refill_threshold;
+ uint32_t desc_ring_size;
+ struct lio_droq *droq;
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ droq = lio_dev->droq[q_no];
+ droq->lio_dev = lio_dev;
+ droq->q_no = q_no;
+ droq->mpool = mpool;
+
+ c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
+
+ droq->max_count = num_descs;
+ droq->buffer_size = desc_size;
+
+ desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
+ droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "droq", q_no,
+ desc_ring_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->desc_ring_mz == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return -1;
+ }
+
+ droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
+ droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
+
+ lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
+ lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
+ if (droq->info_list == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
+ goto init_droq_fail;
+ }
+
+ droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
+ (droq->max_count *
+ LIO_DROQ_RECVBUF_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (droq->recv_buf_list == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (lio_droq_setup_ring_buffers(lio_dev, droq))
+ goto init_droq_fail;
+
+ droq->refill_threshold = c_refill_threshold;
+
+ rte_spinlock_init(&droq->lock);
+
+ lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
+
+ lio_dev->io_qmask.oq |= (1ULL << q_no);
+
+ return 0;
+
+init_droq_fail:
+ lio_delete_droq(lio_dev, q_no);
+
+ return -1;
+}
+
+int
+lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
+{
+ struct lio_droq *droq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (lio_dev->droq[oq_no]) {
+ lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
+ return 0;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (droq == NULL)
+ return -ENOMEM;
+
+ lio_dev->droq[oq_no] = droq;
+
+ /* Initialize the Droq */
+ if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
+ socket_id)) {
+ lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev->num_oqs++;
+
+ lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
+
+ /* Send credit for octeon output queues. credits are always
+ * sent after the output queue is enabled.
+ */
+ rte_write32(lio_dev->droq[oq_no]->max_count,
+ lio_dev->droq[oq_no]->pkts_credit_reg);
+ rte_wmb();
+
+ return 0;
+}
+
+static inline uint32_t
+lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
+{
+ uint32_t buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+
+ return buf_cnt;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline uint32_t
+lio_droq_refill_pullup_descs(struct lio_droq *droq,
+ struct lio_droq_desc *desc_ring)
+{
+ uint32_t refill_index = droq->refill_idx;
+ uint32_t desc_refilled = 0;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ droq->refill_idx = lio_incr_index(
+ droq->refill_idx, 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
+ }
+ refill_index = lio_incr_index(refill_index, 1,
+ droq->max_count);
+ } /* while */
+
+ return desc_refilled;
+}
+
+/* lio_droq_refill
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param droq - droq in which descriptors require new buffers.
+ *
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ *
+ * Returns:
+ * No of descriptors refilled.
+ *
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static uint32_t
+lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring;
+ uint32_t desc_refilled = 0;
+ void *buf = NULL;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse the buffer, else allocate.
+ */
+ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
+ buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (buf == NULL) {
+ droq->stats.rx_alloc_failure++;
+ break;
+ }
+
+ droq->recv_buf_list[droq->refill_idx].buffer = buf;
+ }
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static int
+lio_droq_fast_process_packet(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts)
+{
+ struct rte_mbuf *nicbuf = NULL;
+ struct lio_droq_info *info;
+ uint32_t total_len = 0;
+ int data_total_len = 0;
+ uint32_t pkt_len = 0;
+ union octeon_rh *rh;
+ int data_pkts = 0;
+
+ info = &droq->info_list[droq->read_idx];
+ lio_swap_8B_data((uint64_t *)info, 2);
+
+ if (!info->length)
+ return -1;
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCTEON_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (uint32_t)info->length;
+
+ if (lio_opcode_slow_path(rh)) {
+ uint32_t buf_cnt;
+
+ buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
+ (uint32_t)info->length);
+ droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
+ droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ if (rh->r_dh.has_hash)
+ pkt_len = (uint32_t)(info->length - 8);
+ else
+ pkt_len = (uint32_t)info->length;
+
+ nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer = NULL;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx, 1,
+ droq->max_count);
+ droq->refill_count++;
+
+ if (likely(nicbuf != NULL)) {
+ nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ nicbuf->nb_segs = 1;
+ nicbuf->next = NULL;
+ /* We don't have a way to pass flags yet */
+ nicbuf->ol_flags = 0;
+ if (rh->r_dh.has_hash) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |= PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(nicbuf,
+ uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss = (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ }
+
+ nicbuf->pkt_len = pkt_len;
+ nicbuf->data_len = pkt_len;
+ nicbuf->port = lio_dev->port_id;
+ /* Store the mbuf */
+ rx_pkts[data_pkts++] = nicbuf;
+ data_total_len += pkt_len;
+ }
+
+ /* Prefetch buffer pointers when on a cache line
+ * boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(
+ &droq->recv_buf_list[droq->read_idx]);
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ } else {
+ struct rte_mbuf *first_buf = NULL;
+ struct rte_mbuf *last_buf = NULL;
+
+ while (pkt_len < info->length) {
+ int cpy_len = 0;
+
+ cpy_len = ((pkt_len + droq->buffer_size) >
+ info->length)
+ ? ((uint32_t)info->length -
+ pkt_len)
+ : droq->buffer_size;
+
+ nicbuf =
+ droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+
+ if (likely(nicbuf != NULL)) {
+ /* Note the first seg */
+ if (!pkt_len)
+ first_buf = nicbuf;
+
+ nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ nicbuf->nb_segs = 1;
+ nicbuf->next = NULL;
+ nicbuf->port = lio_dev->port_id;
+ /* We don't have a way to pass
+ * flags yet
+ */
+ nicbuf->ol_flags = 0;
+ if ((!pkt_len) && (rh->r_dh.has_hash)) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |=
+ PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(
+ nicbuf, uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss =
+ (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ nicbuf->pkt_len = cpy_len - 8;
+ nicbuf->data_len = cpy_len - 8;
+ } else {
+ nicbuf->pkt_len = cpy_len;
+ nicbuf->data_len = cpy_len;
+ }
+
+ if (pkt_len)
+ first_buf->nb_segs++;
+
+ if (last_buf)
+ last_buf->next = nicbuf;
+
+ last_buf = nicbuf;
+ } else {
+ PMD_RX_LOG(lio_dev, ERR, "no buf\n");
+ }
+
+ pkt_len += cpy_len;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx,
+ 1, droq->max_count);
+ droq->refill_count++;
+
+ /* Prefetch buffer pointers when on a
+ * cache line boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(&droq->recv_buf_list
+ [droq->read_idx]);
+
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ }
+ rx_pkts[data_pkts++] = first_buf;
+ if (rh->r_dh.has_hash)
+ data_total_len += (pkt_len - 8);
+ else
+ data_total_len += pkt_len;
+ }
+
+ /* Inform upper layer about packet checksum verification */
+ struct rte_mbuf *m = rx_pkts[data_pkts - 1];
+
+ if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = lio_droq_refill(lio_dev, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory is
+ * accurate.
+ */
+ rte_wmb();
+ rte_write32(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ rte_wmb();
+ }
+
+ info->length = 0;
+ info->rh.rh64 = 0;
+
+ droq->stats.pkts_received++;
+ droq->stats.rx_pkts_received += data_pkts;
+ droq->stats.rx_bytes_received += data_total_len;
+ droq->stats.bytes_received += total_len;
+
+ return data_pkts;
+}
+
+static uint32_t
+lio_droq_fast_process_packets(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t pkts_to_process)
+{
+ int ret, data_pkts = 0;
+ uint32_t pkt;
+
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ ret = lio_droq_fast_process_packet(lio_dev, droq,
+ &rx_pkts[data_pkts]);
+ if (ret < 0) {
+ lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ lio_dev->port_id, droq->q_no,
+ droq->read_idx, pkts_to_process);
+ break;
+ }
+ data_pkts += ret;
+ }
+
+ rte_atomic64_sub(&droq->pkts_pending, pkt);
+
+ return data_pkts;
+}
+
+static inline uint32_t
+lio_droq_check_hw_for_pkts(struct lio_droq *droq)
+{
+ uint32_t last_count;
+ uint32_t pkt_count;
+
+ pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ if (last_count)
+ rte_atomic64_add(&droq->pkts_pending, last_count);
+
+ return last_count;
+}
+
+uint16_t
+lio_dev_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t budget)
+{
+ struct lio_droq *droq = rx_queue;
+ struct lio_device *lio_dev = droq->lio_dev;
+ uint32_t pkts_processed = 0;
+ uint32_t pkt_count = 0;
+
+ lio_droq_check_hw_for_pkts(droq);
+
+ pkt_count = rte_atomic64_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ rte_spinlock_lock(&droq->lock);
+ pkts_processed = lio_droq_fast_process_packets(lio_dev,
+ droq, rx_pkts,
+ pkt_count);
+
+ if (droq->pkt_count) {
+ rte_write32(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ }
+
+ /* Release the spin lock */
+ rte_spinlock_unlock(&droq->lock);
+
+ return pkts_processed;
+}
+
+void
+lio_delete_droq_queue(struct lio_device *lio_dev,
+ int oq_no)
+{
+ lio_delete_droq(lio_dev, oq_no);
+ lio_dev->num_oqs--;
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+}
+
+/**
+ * lio_init_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param txpciq - queue to be initialized.
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_instr_queue(struct lio_device *lio_dev,
+ union octeon_txpciq txpciq,
+ uint32_t num_descs, unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+ struct lio_instr_queue *iq;
+ uint32_t instr_type;
+ uint32_t q_size;
+
+ instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
+
+ q_size = instr_type * num_descs;
+ iq = lio_dev->instr_queue[iq_no];
+ iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "instr_queue", iq_no, q_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->iq_mz == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return -1;
+ }
+
+ iq->base_addr_dma = iq->iq_mz->phys_addr;
+ iq->base_addr = (uint8_t *)iq->iq_mz->addr;
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = rte_zmalloc_socket("request_list",
+ sizeof(*iq->request_list) *
+ num_descs,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->request_list == NULL) {
+ lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+ return -1;
+ }
+
+ lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
+ iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
+ iq->max_count);
+
+ iq->lio_dev = lio_dev;
+ iq->txpciq.txpciq64 = txpciq.txpciq64;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->lio_read_index = 0;
+ iq->flush_index = 0;
+
+ rte_atomic64_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ rte_spinlock_init(&iq->lock);
+ rte_spinlock_init(&iq->post_lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ lio_dev->io_qmask.iq |= (1ULL << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (instr_type == 64);
+
+ lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
+
+ return 0;
+}
+
+int
+lio_setup_instr_queue0(struct lio_device *lio_dev)
+{
+ union octeon_txpciq txpciq;
+ uint32_t num_descs = 0;
+ uint32_t iq_no = 0;
+
+ num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
+
+ lio_dev->num_iqs = 0;
+
+ lio_dev->instr_queue[0] = rte_zmalloc(NULL,
+ sizeof(struct lio_instr_queue), 0);
+ if (lio_dev->instr_queue[0] == NULL)
+ return -ENOMEM;
+
+ lio_dev->instr_queue[0]->q_index = 0;
+ lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ txpciq.txpciq64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ return -1;
+ }
+
+ lio_dev->num_iqs++;
+
+ return 0;
+}
+
+/**
+ * lio_delete_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param iq_no - queue to be deleted.
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ */
+static void
+lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+
+ rte_free(iq->request_list);
+ iq->request_list = NULL;
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+}
+
+void
+lio_free_instr_queue0(struct lio_device *lio_dev)
+{
+ lio_delete_instr_queue(lio_dev, 0);
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ lio_dev->num_iqs--;
+}
+
+/* Return 0 on success, -1 on failure */
+int
+lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+
+ if (lio_dev->instr_queue[iq_no]) {
+ lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
+ lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+
+ lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
+ sizeof(struct lio_instr_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (lio_dev->instr_queue[iq_no] == NULL)
+ return -1;
+
+ lio_dev->instr_queue[iq_no]->q_index = q_index;
+ lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
+
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
+ goto release_lio_iq;
+
+ lio_dev->num_iqs++;
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto delete_lio_iq;
+
+ return 0;
+
+delete_lio_iq:
+ lio_delete_instr_queue(lio_dev, iq_no);
+ lio_dev->num_iqs--;
+release_lio_iq:
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+
+ return -1;
+}
+
+int
+lio_wait_for_instr_fetch(struct lio_device *lio_dev)
+{
+ int pending, instr_cnt;
+ int i, retry = 1000;
+
+ do {
+ instr_cnt = 0;
+
+ for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
+ if (!(lio_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+
+ if (lio_dev->instr_queue[i] == NULL)
+ break;
+
+ pending = rte_atomic64_read(
+ &lio_dev->instr_queue[i]->instr_pending);
+ if (pending)
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
+
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ rte_delay_ms(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+lio_ring_doorbell(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
+ rte_write32(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ rte_wmb();
+ iq->fill_cnt = 0;
+ }
+}
+
+static inline void
+copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ uint8_t *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ rte_memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline struct lio_iq_post_status
+post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ struct lio_iq_post_status st;
+
+ st.status = LIO_IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->max_count - 1)) {
+ st.status = LIO_IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->max_count - 2))
+ st.status = LIO_IQ_SEND_STOP;
+
+ copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
+ iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ rte_wmb();
+
+ rte_atomic64_inc(&iq->instr_pending);
+
+ return st;
+}
+
+static inline void
+lio_add_to_request_list(struct lio_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+static inline void
+lio_free_netsgbuf(void *buf)
+{
+ struct lio_buf_free_info *finfo = buf;
+ struct lio_device *lio_dev = finfo->lio_dev;
+ struct rte_mbuf *m = finfo->mbuf;
+ struct lio_gather *g = finfo->g;
+ uint8_t iq = finfo->iq_no;
+
+ /* This will take care of multiple segments also */
+ rte_pktmbuf_free(m);
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq]);
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
+ rte_free(finfo);
+}
+
+/* Can only run in process context */
+static int
+lio_process_iq_request_list(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ struct octeon_instr_irh *irh = NULL;
+ uint32_t old = iq->flush_index;
+ struct lio_soft_command *sc;
+ uint32_t inst_count = 0;
+ int reqtype;
+ void *buf;
+
+ while (old != iq->lio_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == LIO_REQTYPE_NONE)
+ goto skip_this;
+
+ switch (reqtype) {
+ case LIO_REQTYPE_NORESP_NET:
+ rte_pktmbuf_free((struct rte_mbuf *)buf);
+ break;
+ case LIO_REQTYPE_NORESP_NET_SG:
+ lio_free_netsgbuf(buf);
+ break;
+ case LIO_REQTYPE_SOFT_COMMAND:
+ sc = buf;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ rte_spinlock_lock(&lio_dev->response_list.lock);
+ rte_atomic64_inc(
+ &lio_dev->response_list.pending_req_count);
+ STAILQ_INSERT_TAIL(
+ &lio_dev->response_list.head,
+ &sc->node, entries);
+ rte_spinlock_unlock(
+ &lio_dev->response_list.lock);
+ } else {
+ if (sc->callback) {
+ /* This callback must not sleep */
+ sc->callback(LIO_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown reqtype: %d buf: %p at idx %d\n",
+ reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+skip_this:
+ inst_count++;
+ old = lio_incr_index(old, 1, iq->max_count);
+ }
+
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static void
+lio_update_read_index(struct lio_instr_queue *iq)
+{
+ uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
+ uint32_t last_done;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Add last_done and modulo with the IQ size to get new index */
+ iq->lio_read_index = (iq->lio_read_index +
+ (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+}
+
+int
+lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
+{
+ uint32_t tot_inst_processed = 0;
+ uint32_t inst_processed = 0;
+ int tx_done = 1;
+
+ if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
+ return tx_done;
+
+ rte_spinlock_lock(&iq->lock);
+
+ lio_update_read_index(iq);
+
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->lio_read_index)
+ break;
+
+ inst_processed = lio_process_iq_request_list(lio_dev, iq);
+
+ if (inst_processed) {
+ rte_atomic64_sub(&iq->instr_pending, inst_processed);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (1);
+
+ rte_spinlock_unlock(&iq->lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ return tx_done;
+}
+
+static int
+lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
+ void *buf, uint32_t datasize, uint32_t reqtype)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ struct lio_iq_post_status st;
+
+ rte_spinlock_lock(&iq->post_lock);
+
+ st = post_command2(iq, cmd);
+
+ if (st.status != LIO_IQ_SEND_FAILED) {
+ lio_add_to_request_list(iq, st.index, buf, reqtype);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
+ datasize);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
+
+ lio_ring_doorbell(lio_dev, iq);
+ } else {
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
+ }
+
+ rte_spinlock_unlock(&iq->post_lock);
+
+ return st.status;
+}
+
+void
+lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc, uint8_t opcode,
+ uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1)
+{
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ RTE_ASSERT(opcode <= 15);
+ RTE_ASSERT(subcode <= 127);
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = lio_dev->pcie_port;
+ rdp->rlen = sc->rdatasize;
+ irh->rflag = 1;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+ }
+}
+
+int
+lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc)
+{
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ uint32_t len = 0;
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ RTE_ASSERT(sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ RTE_ASSERT(sc->dmarptr);
+ RTE_ASSERT(sc->status_word != NULL);
+ *sc->status_word = LIO_COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+
+ len = (uint32_t)ih3->dlengsz;
+
+ if (sc->wait_time)
+ sc->timeout = lio_uptime + sc->wait_time;
+
+ return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
+ LIO_REQTYPE_SOFT_COMMAND);
+}
+
+int
+lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
+ uint16_t buf_size;
+
+ buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
+ snprintf(sc_pool_name, sizeof(sc_pool_name),
+ "lio_sc_pool_%u", lio_dev->port_id);
+ lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
+ LIO_MAX_SOFT_COMMAND_BUFFERS,
+ 0, 0, buf_size, SOCKET_ID_ANY);
+ return 0;
+}
+
+void
+lio_free_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ rte_mempool_free(lio_dev->sc_buf_pool);
+}
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
+ uint32_t rdatasize, uint32_t ctxsize)
+{
+ uint32_t offset = sizeof(struct lio_soft_command);
+ struct lio_soft_command *sc;
+ struct rte_mbuf *m;
+ uint64_t dma_addr;
+
+ RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
+ LIO_SOFT_COMMAND_BUFFER_SIZE);
+
+ m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
+ return NULL;
+ }
+
+ /* set rte_mbuf data size and there is only 1 segment */
+ m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+
+ /* use rte_mbuf buffer for soft command */
+ sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
+ memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
+ sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->mbuf = m;
+
+ dma_addr = sc->dma_addr;
+
+ if (ctxsize) {
+ sc->ctxptr = (uint8_t *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (uint8_t *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ RTE_ASSERT(rdatasize >= 16);
+ sc->virtrptr = (uint8_t *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
+ rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void
+lio_free_soft_command(struct lio_soft_command *sc)
+{
+ rte_pktmbuf_free(sc->mbuf);
+}
+
+void
+lio_setup_response_list(struct lio_device *lio_dev)
+{
+ STAILQ_INIT(&lio_dev->response_list.head);
+ rte_spinlock_init(&lio_dev->response_list.lock);
+ rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
+}
+
+int
+lio_process_ordered_list(struct lio_device *lio_dev)
+{
+ int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
+ struct lio_response_list *ordered_sc_list;
+ struct lio_soft_command *sc;
+ int request_complete = 0;
+ uint64_t status64;
+ uint32_t status;
+
+ ordered_sc_list = &lio_dev->response_list;
+
+ do {
+ rte_spinlock_lock(&ordered_sc_list->lock);
+
+ if (STAILQ_EMPTY(&ordered_sc_list->head)) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ return -1;
+ }
+
+ sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
+ struct lio_soft_command, node);
+
+ status = LIO_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ status64 = *sc->status_word;
+
+ if (status64 != LIO_COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
+ if ((status64 & 0xff) != 0xff) {
+ lio_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ /* retrieve 16-bit firmware status */
+ status = (uint32_t)(status64 &
+ 0xffffULL);
+ if (status) {
+ status =
+ LIO_FIRMWARE_STATUS_CODE(
+ status);
+ } else {
+ /* i.e. no error */
+ status = LIO_REQUEST_DONE;
+ }
+ }
+ }
+ } else if ((sc->timeout && lio_check_timeout(lio_uptime,
+ sc->timeout))) {
+ lio_dev_err(lio_dev,
+ "cmd failed, timeout (%ld, %ld)\n",
+ (long)lio_uptime, (long)sc->timeout);
+ status = LIO_REQUEST_TIMEOUT;
+ }
+
+ if (status != LIO_REQUEST_PENDING) {
+ /* we have received a response or we have timed out.
+ * remove node from linked list
+ */
+ STAILQ_REMOVE(&ordered_sc_list->head,
+ &sc->node, lio_stailq_node, entries);
+ rte_atomic64_dec(
+ &lio_dev->response_list.pending_req_count);
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(status, sc->callback_arg);
+
+ request_complete++;
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit and let this function be invoked the next time
+ * the poll thread runs to process the remaining requests.
+ * This function can take up the entire CPU if there is
+ * no upper limit to the requests processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static inline struct lio_stailq_node *
+list_delete_first_node(struct lio_stailq_head *head)
+{
+ struct lio_stailq_node *node;
+
+ if (STAILQ_EMPTY(head))
+ node = NULL;
+ else
+ node = STAILQ_FIRST(head);
+
+ if (node)
+ STAILQ_REMOVE(head, node, lio_stailq_node, entries);
+
+ return node;
+}
+
+void
+lio_delete_sglist(struct lio_instr_queue *txq)
+{
+ struct lio_device *lio_dev = txq->lio_dev;
+ int iq_no = txq->q_index;
+ struct lio_gather *g;
+
+ if (lio_dev->glist_head == NULL)
+ return;
+
+ do {
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ if (g) {
+ if (g->sg)
+ rte_free(
+ (void *)((unsigned long)g->sg - g->adjust));
+ rte_free(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+int
+lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id)
+{
+ struct lio_gather *g;
+ int i;
+
+ rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
+
+ STAILQ_INIT(&lio_dev->glist_head[iq_no]);
+
+ for (i = 0; i < num_descs; i++) {
+ g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (g == NULL) {
+ lio_dev_err(lio_dev,
+ "lio_gather memory allocation failed for qno %d\n",
+ iq_no);
+ break;
+ }
+
+ g->sg_size =
+ ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
+
+ g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (g->sg == NULL) {
+ lio_dev_err(lio_dev,
+ "sg list memory allocation failed for qno %d\n",
+ iq_no);
+ rte_free(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg =
+ (struct lio_sg_entry *)((unsigned long)g->sg +
+ g->adjust);
+ }
+
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
+ entries);
+ }
+
+ if (i != num_descs) {
+ lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
+{
+ lio_delete_instr_queue(lio_dev, iq_no);
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+ lio_dev->num_iqs--;
+}
+
+static inline uint32_t
+lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((lio_dev->instr_queue[q_no]->max_count - 1) -
+ (uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending));
+}
+
+static inline int
+lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending) >=
+ (lio_dev->instr_queue[q_no]->max_count - 2));
+}
+
+static int
+lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint32_t count = 10000;
+
+ while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
+ --count)
+ lio_flush_iq(lio_dev, iq);
+
+ return count ? 0 : 1;
+}
+
+static void
+lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
+{
+ struct lio_soft_command *sc = sc_ptr;
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+ struct lio_ctrl_pkt *ctrl_pkt;
+
+ ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
+ ctrl_cmd = ctrl_pkt->ctrl_cmd;
+ ctrl_cmd->cond = 1;
+
+ lio_free_soft_command(sc);
+}
+
+static inline struct lio_soft_command *
+lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ uint32_t uddsize, datasize;
+ uint32_t rdatasize;
+ uint8_t *data;
+
+ uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
+
+ datasize = OCTEON_CMD_SIZE + uddsize;
+ rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
+
+ sc = lio_alloc_soft_command(lio_dev, datasize,
+ rdatasize, sizeof(struct lio_ctrl_pkt));
+ if (sc == NULL)
+ return NULL;
+
+ rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
+
+ data = (uint8_t *)sc->virtdptr;
+
+ rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
+
+ lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
+ }
+
+ sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
+
+ lio_prepare_soft_command(lio_dev, sc,
+ LIO_OPCODE, LIO_OPCODE_CMD,
+ 0, 0, 0);
+
+ sc->callback = lio_ctrl_cmd_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = ctrl_pkt->wait_time;
+
+ return sc;
+}
+
+int
+lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ int retval;
+
+ sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
+ if (sc == NULL) {
+ lio_dev_err(lio_dev, "soft command allocation failed\n");
+ return -1;
+ }
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_free_soft_command(sc);
+ lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
+ lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
+ return -1;
+ }
+
+ return retval;
+}
+
+/** Send data packet to the device
+ * @param lio_dev - lio device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+static inline int
+lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
+{
+ return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
+ ndata->buf, ndata->datasize, ndata->reqtype);
+}
+
+uint16_t
+lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
+{
+ struct lio_instr_queue *txq = tx_queue;
+ union lio_cmd_setup cmdsetup;
+ struct lio_device *lio_dev;
+ struct lio_iq_stats *stats;
+ struct lio_data_pkt ndata;
+ int i, processed = 0;
+ struct rte_mbuf *m;
+ uint32_t tag = 0;
+ int status = 0;
+ int iq_no;
+
+ lio_dev = txq->lio_dev;
+ iq_no = txq->txpciq.s.q_no;
+ stats = &lio_dev->instr_queue[iq_no]->stats;
+
+ if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
+ PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
+ lio_dev->linfo.link.s.link_up);
+ goto xmit_failed;
+ }
+
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+
+ for (i = 0; i < nb_pkts; i++) {
+ uint32_t pkt_len = 0;
+
+ m = pkts[i];
+
+ /* Prepare the attributes for the data to be passed to BASE. */
+ memset(&ndata, 0, sizeof(struct lio_data_pkt));
+
+ ndata.buf = m;
+
+ ndata.q_no = iq_no;
+ if (lio_iq_is_full(lio_dev, ndata.q_no)) {
+ stats->tx_iq_busy++;
+ if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ break;
+ }
+ }
+
+ cmdsetup.cmd_setup64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ /* check checksum offload flags to form cmd */
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ cmdsetup.s.ip_csum = 1;
+
+ if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cmdsetup.s.tnl_csum = 1;
+ else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
+ (m->ol_flags & PKT_TX_UDP_CKSUM))
+ cmdsetup.s.transport_csum = 1;
+
+ if (m->nb_segs == 1) {
+ pkt_len = rte_pktmbuf_data_len(m);
+ cmdsetup.s.u.datasize = pkt_len;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET;
+ } else {
+ struct lio_buf_free_info *finfo;
+ struct lio_gather *g;
+ phys_addr_t phyaddr;
+ int i, frags;
+
+ finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
+ sizeof(*finfo), 0);
+ if (finfo == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "free buffer alloc failed\n");
+ goto xmit_failed;
+ }
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
+ if (g == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit scatter gather: glist null!\n");
+ goto xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = m->nb_segs;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+ g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ lio_add_sg_size(&g->sg[0], m->data_len, 0);
+ pkt_len = m->data_len;
+ finfo->mbuf = m;
+
+ /* First seg taken care above */
+ frags = m->nb_segs - 1;
+ i = 1;
+ m = m->next;
+ while (frags--) {
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ rte_mbuf_data_dma_addr(m);
+ lio_add_sg_size(&g->sg[(i >> 2)],
+ m->data_len, (i & 3));
+ pkt_len += m->data_len;
+ i++;
+ m = m->next;
+ }
+
+ phyaddr = rte_mem_virt2phy(g->sg);
+ if (phyaddr == RTE_BAD_PHYS_ADDR) {
+ PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
+ goto xmit_failed;
+ }
+
+ ndata.cmd.cmd3.dptr = phyaddr;
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
+
+ finfo->g = g;
+ finfo->lio_dev = lio_dev;
+ finfo->iq_no = (uint64_t)iq_no;
+ ndata.buf = finfo;
+ }
+
+ ndata.datasize = pkt_len;
+
+ status = lio_send_data_pkt(lio_dev, &ndata);
+
+ if (unlikely(status == LIO_IQ_SEND_FAILED)) {
+ PMD_TX_LOG(lio_dev, ERR, "send failed\n");
+ break;
+ }
+
+ if (unlikely(status == LIO_IQ_SEND_STOP)) {
+ PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
+ /* create space as iq is full */
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+ }
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += pkt_len;
+ processed++;
+ }
+
+xmit_failed:
+ stats->tx_dropped += (nb_pkts - processed);
+
+ return processed;
+}
+
+void
+lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
+{
+ struct lio_instr_queue *txq;
+ struct lio_droq *rxq;
+ uint16_t i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq != NULL) {
+ lio_dev_tx_queue_release(txq);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ lio_dev_rx_queue_release(rxq);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h
new file mode 100644
index 00000000..85685dc7
--- /dev/null
+++ b/drivers/net/liquidio/lio_rxtx.h
@@ -0,0 +1,769 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_RXTX_H_
+#define _LIO_RXTX_H_
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "lio_struct.h"
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
+ (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
+
+#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
+
+#define lio_uptime \
+ (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
+
+/** Descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a lio_droq_info structure.
+ * The device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct lio_droq_desc {
+ /** The buffer pointer */
+ uint64_t buffer_ptr;
+
+ /** The Info pointer */
+ uint64_t info_ptr;
+};
+
+#define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct lio_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ uint64_t length;
+};
+
+#define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+ */
+struct lio_recv_buffer {
+ /** Packet buffer, including meta data. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ uint8_t *data;
+
+};
+
+#define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
+
+#define LIO_DROQ_SIZE (sizeof(struct lio_droq))
+
+#define LIO_IQ_SEND_OK 0
+#define LIO_IQ_SEND_STOP 1
+#define LIO_IQ_SEND_FAILED -1
+
+/* conditions */
+#define LIO_REQTYPE_NONE 0
+#define LIO_REQTYPE_NORESP_NET 1
+#define LIO_REQTYPE_NORESP_NET_SG 2
+#define LIO_REQTYPE_SOFT_COMMAND 3
+
+struct lio_request_list {
+ uint32_t reqtype;
+ void *buf;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+struct lio_instr3_64B {
+ /** Pointer where the input data is available. */
+ uint64_t dptr;
+
+ /** Instruction Header. */
+ uint64_t ih3;
+
+ /** Instruction Header. */
+ uint64_t pki_ih3;
+
+ /** Input Request Header. */
+ uint64_t irh;
+
+ /** opcode/subcode specific parameters */
+ uint64_t ossp[2];
+
+ /** Return Data Parameters */
+ uint64_t rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ uint64_t rptr;
+
+};
+
+union lio_instr_64B {
+ struct lio_instr3_64B cmd3;
+};
+
+/** The size of each buffer in soft command buffer pool */
+#define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
+
+/** Maximum number of buffers to allocate into soft command buffer pool */
+#define LIO_MAX_SOFT_COMMAND_BUFFERS 255
+
+struct lio_soft_command {
+ /** Soft command buffer info. */
+ struct lio_stailq_node node;
+ uint64_t dma_addr;
+ uint32_t size;
+
+ /** Command and return status */
+ union lio_instr_64B cmd;
+
+#define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ uint64_t *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ uint64_t dmadptr;
+ uint32_t datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ uint64_t dmarptr;
+ uint32_t rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ uint32_t ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ uint32_t iq_no;
+ void (*callback)(uint32_t, void *);
+ void *callback_arg;
+ struct rte_mbuf *mbuf;
+};
+
+struct lio_iq_post_status {
+ int status;
+ int index;
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCTEON_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octeon_cmd {
+ uint64_t cmd64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t cmd : 5;
+
+ uint64_t more : 6; /* How many udd words follow the command */
+
+ uint64_t reserved : 29;
+
+ uint64_t param1 : 16;
+
+ uint64_t param2 : 8;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ uint64_t param2 : 8;
+
+ uint64_t param1 : 16;
+
+ uint64_t reserved : 29;
+
+ uint64_t more : 6;
+
+ uint64_t cmd : 5;
+
+#endif
+ } s;
+};
+
+#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
+
+/* Maximum number of 8-byte words can be
+ * sent in a NIC control message.
+ */
+#define LIO_MAX_NCTRL_UDD 32
+
+/* Structure of control information passed by driver to the BASE
+ * layer when sending control commands to Octeon device software.
+ */
+struct lio_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octeon_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ uint64_t dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ uint64_t dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ uint64_t udd[LIO_MAX_NCTRL_UDD];
+
+ /** Input queue to use to send this command. */
+ uint64_t iq_no;
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, BASE assumes no response is expected.
+ */
+ size_t wait_time;
+
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+};
+
+/** Structure of data information passed by driver to the BASE
+ * layer when forwarding data to Octeon device software.
+ */
+struct lio_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * BASE layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ uint32_t reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ uint32_t datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ union lio_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ uint32_t q_no;
+};
+
+/** Structure passed by driver to BASE layer to prepare a command to send
+ * network data to Octeon.
+ */
+union lio_cmd_setup {
+ struct {
+ uint32_t iq_no : 8;
+ uint32_t gather : 1;
+ uint32_t timestamp : 1;
+ uint32_t ip_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t rsvd : 19;
+
+ union {
+ uint32_t datasize;
+ uint32_t gatherptrs;
+ } u;
+ } s;
+
+ uint64_t cmd_setup64;
+};
+
+/* Instruction Header */
+struct octeon_instr_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+#endif
+};
+
+/* PKI Instruction Header(PKI IH) */
+struct octeon_instr_pki_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Wider bit */
+ uint64_t w : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Wider bit */
+ uint64_t w : 1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t opcode : 4;
+ uint64_t rflag : 1;
+ uint64_t subcode : 7;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t reserved : 5;
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+ uint64_t reserved : 5;
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t subcode : 7;
+ uint64_t rflag : 1;
+ uint64_t opcode : 4;
+#endif
+};
+
+/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
+/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+#define OCTEON_PCI_CMD_O3 (24 + 8)
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t reserved : 49;
+ uint64_t pcie_port : 3;
+ uint64_t rlen : 12;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t rlen : 12;
+ uint64_t pcie_port : 3;
+ uint64_t reserved : 49;
+#endif
+};
+
+union octeon_packet_params {
+ uint32_t pkt_params32;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t reserved : 24;
+ uint32_t ip_csum : 1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ uint32_t transport_csum : 1;
+ /* Find tunnel, and perform transport csum. */
+ uint32_t tnl_csum : 1;
+ uint32_t tsflag : 1; /* Timestamp this packet */
+ uint32_t ipsec_ops : 4; /* IPsec operation */
+#else
+ uint32_t ipsec_ops : 4;
+ uint32_t tsflag : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t ip_csum : 1;
+ uint32_t reserved : 7;
+#endif
+ } s;
+};
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+lio_prepare_pci_cmd(struct lio_device *lio_dev,
+ union lio_instr_64B *cmd,
+ union lio_cmd_setup *setup,
+ uint32_t tag)
+{
+ union octeon_packet_params packet_params;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ int port;
+
+ memset(cmd, 0, sizeof(union lio_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /* PKI IH */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 0;
+ pki_ih3->utag = 0;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = OCTEON_ORDERED_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x0; /* parse from L2 */
+ pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = LIO_OPCODE;
+ irh->subcode = LIO_OPCODE_NW_DATA;
+
+ packet_params.pkt_params32 = 0;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.pkt_params32;
+}
+
+int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
+void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev,
+ uint32_t datasize, uint32_t rdatasize,
+ uint32_t ctxsize);
+void lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc,
+ uint8_t opcode, uint8_t subcode,
+ uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1);
+int lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc);
+void lio_free_soft_command(struct lio_soft_command *sc);
+
+/** Send control packet to the device
+ * @param lio_dev - lio device pointer
+ * @param nctrl - control structure with command, timeout, and callback info
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+int lio_send_ctrl_pkt(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt);
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define LIO_MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ----------------------------
+ * | | |
+ * ----------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+/** Status for a request.
+ * If the request is successfully queued, the driver will return
+ * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
+ * the driver if the response for request failed to arrive before a
+ * time-out period or if the request processing * got interrupted due to
+ * a signal respectively.
+ */
+enum {
+ /** A value of 0x00000000 indicates no error i.e. success */
+ LIO_REQUEST_DONE = 0x00000000,
+ /** (Major number: 0x0000; Minor Number: 0x0001) */
+ LIO_REQUEST_PENDING = 0x00000001,
+ LIO_REQUEST_TIMEOUT = 0x00000003,
+
+};
+
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
+#define LIO_FIRMWARE_STATUS_CODE(status) \
+ ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param lio_dev - the lio device structure.
+ */
+void lio_setup_response_list(struct lio_device *lio_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param lio_dev - the lio device structure.
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct lio_device *lio_dev);
+
+#define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \
+ (((lio_dev)->instr_queue[iq_no]->stats.field) += count)
+
+static inline void
+lio_swap_8B_data(uint64_t *data, uint32_t blocks)
+{
+ while (blocks) {
+ *data = rte_cpu_to_be_64(*data);
+ blocks--;
+ data++;
+ }
+}
+
+static inline uint64_t
+lio_map_ring(void *buf)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+
+ return (uint64_t)dma_addr;
+}
+
+static inline uint64_t
+lio_map_ring_info(struct lio_droq *droq, uint32_t i)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
+
+ return (uint64_t)dma_addr;
+}
+
+static inline int
+lio_opcode_slow_path(union octeon_rh *rh)
+{
+ uint16_t subcode1, subcode2;
+
+ subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
+ subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
+
+ return subcode2 != subcode1;
+}
+
+static inline void
+lio_add_sg_size(struct lio_sg_entry *sg_entry,
+ uint16_t size, uint32_t pos)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ sg_entry->u.size[pos] = size;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+static inline uint32_t
+lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
+int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool,
+ unsigned int socket_id);
+uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t budget);
+void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
+
+void lio_delete_sglist(struct lio_instr_queue *txq);
+int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id);
+uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+int lio_wait_for_instr_fetch(struct lio_device *lio_dev);
+int lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id);
+int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
+void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);
+/** Setup instruction queue zero for the device
+ * @param lio_dev which lio device to setup
+ *
+ * @return 0 if success. -1 if fails
+ */
+int lio_setup_instr_queue0(struct lio_device *lio_dev);
+void lio_free_instr_queue0(struct lio_device *lio_dev);
+void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);
+#endif /* _LIO_RXTX_H_ */
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
new file mode 100644
index 00000000..26f803f9
--- /dev/null
+++ b/drivers/net/liquidio/lio_struct.h
@@ -0,0 +1,689 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_STRUCT_H_
+#define _LIO_STRUCT_H_
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_spinlock.h>
+#include <rte_atomic.h>
+
+#include "lio_hw_defs.h"
+
+struct lio_stailq_node {
+ STAILQ_ENTRY(lio_stailq_node) entries;
+};
+
+STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
+
+struct lio_version {
+ uint16_t major;
+ uint16_t minor;
+ uint16_t micro;
+ uint16_t reserved;
+};
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct lio_iq_stats {
+ uint64_t instr_posted; /**< Instructions posted to this queue. */
+ uint64_t instr_processed; /**< Instructions processed in this queue. */
+ uint64_t instr_dropped; /**< Instructions that could not be processed */
+ uint64_t bytes_sent; /**< Bytes sent through this queue. */
+ uint64_t tx_done; /**< Num of packets sent to network. */
+ uint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */
+ uint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */
+ uint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */
+};
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct lio_droq_stats {
+ /** Number of packets received in this queue. */
+ uint64_t pkts_received;
+
+ /** Bytes received by this queue. */
+ uint64_t bytes_received;
+
+ /** Packets dropped due to no memory available. */
+ uint64_t dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ uint64_t dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ uint64_t rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ uint64_t rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ uint64_t rx_dropped;
+
+ /** Num of vxlan packets received; */
+ uint64_t rx_vxlan;
+
+ /** Num of failures of lio_recv_buffer_alloc() */
+ uint64_t rx_alloc_failure;
+
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * DROQ.
+ */
+struct lio_droq {
+ /** A spinlock to protect access to this ring. */
+ rte_spinlock_t lock;
+
+ uint32_t q_no;
+
+ uint32_t pkt_count;
+
+ struct lio_device *lio_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct lio_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ uint32_t read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ uint32_t write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ uint32_t refill_idx;
+
+ /** Packets pending to be processed */
+ rte_atomic64_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ uint32_t max_count;
+
+ /** The number of descriptors pending refill. */
+ uint32_t refill_count;
+
+ uint32_t refill_threshold;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct lio_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct lio_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ uint32_t buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void *pkts_sent_reg;
+
+ /** Statistics for this DROQ. */
+ struct lio_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ uint32_t info_alloc_size;
+
+ /** Memory zone **/
+ const struct rte_memzone *desc_ring_mz;
+ const struct rte_memzone *info_mz;
+ struct rte_mempool *mpool;
+};
+
+/** Receive Header */
+union octeon_rh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t rh64;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 17;
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t extra : 28;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
+ uint64_t encap_on : 1;
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ } r_dh;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 8;
+ uint64_t extra : 25;
+ uint64_t gmxport : 16;
+ } r_nic_info;
+#else
+ uint64_t rh64;
+ struct {
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ uint64_t reserved : 17;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r;
+ struct {
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ uint64_t encap_on : 1;
+ uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t extra : 28;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_dh;
+ struct {
+ uint64_t gmxport : 16;
+ uint64_t extra : 25;
+ uint64_t reserved : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_nic_info;
+#endif
+};
+
+#define OCTEON_RH_SIZE (sizeof(union octeon_rh))
+
+/** The txpciq info passed to host from the firmware */
+union octeon_txpciq {
+ uint64_t txpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t port : 8;
+ uint64_t pkind : 6;
+ uint64_t use_qpg : 1;
+ uint64_t qpg : 11;
+ uint64_t aura_num : 10;
+ uint64_t reserved : 20;
+#else
+ uint64_t reserved : 20;
+ uint64_t aura_num : 10;
+ uint64_t qpg : 11;
+ uint64_t use_qpg : 1;
+ uint64_t pkind : 6;
+ uint64_t port : 8;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue for
+ * a LIO device has one such structure to represent it.
+ */
+struct lio_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ rte_spinlock_t lock;
+
+ rte_spinlock_t post_lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t pkt_in_done;
+
+ rte_atomic64_t iq_flush_running;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ uint32_t iqcmd_64B:1;
+
+ /** Queue info. */
+ union octeon_txpciq txpciq;
+
+ uint32_t rsvd:17;
+
+ uint32_t status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ uint32_t max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ uint32_t host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ uint32_t lio_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ uint32_t flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ rte_atomic64_t instr_pending;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ uint8_t *base_addr;
+
+ struct lio_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ uint32_t fill_cnt;
+
+ /** Statistics for this input queue. */
+ struct lio_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ uint64_t base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /* Memory zone */
+ const struct rte_memzone *iq_mz;
+};
+
+/** This structure is used by driver to store information required
+ * to free the mbuff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct lio_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio_device *lio_dev;
+
+ /** Bytes 9-16. Pointer to mbuff. */
+ struct rte_mbuf *mbuf;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct lio_gather *g;
+
+ /** Bytes 25-32. Physical address of mbuf->data or gather list. */
+ uint64_t dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct lio_soft_command *sc;
+
+ /** Bytes 48-63. iq no */
+ uint64_t iq_no;
+};
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * input instruction has this format.
+ */
+struct lio_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr. */
+ union {
+ uint16_t size[4];
+ uint64_t size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ uint64_t ptr[4];
+};
+
+#define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
+
+/** Structure of a node in list of gather components maintained by
+ * driver for each network device.
+ */
+struct lio_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct lio_stailq_node list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct lio_sg_entry *sg;
+};
+
+struct lio_rss_ctx {
+ uint16_t hash_key_size;
+ uint8_t hash_key[LIO_RSS_MAX_KEY_SZ];
+ /* Ideally a factor of number of queues */
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t itable_size;
+ uint8_t ip;
+ uint8_t tcp_hash;
+ uint8_t ipv6;
+ uint8_t ipv6_tcp_hash;
+ uint8_t ipv6_ex;
+ uint8_t ipv6_tcp_ex_hash;
+ uint8_t hash_disable;
+};
+
+struct lio_io_enable {
+ uint64_t iq;
+ uint64_t oq;
+ uint64_t iq64B;
+};
+
+struct lio_fn_list {
+ void (*setup_iq_regs)(struct lio_device *, uint32_t);
+ void (*setup_oq_regs)(struct lio_device *, uint32_t);
+
+ int (*setup_mbox)(struct lio_device *);
+ void (*free_mbox)(struct lio_device *);
+
+ int (*setup_device_regs)(struct lio_device *);
+ int (*enable_io_queues)(struct lio_device *);
+ void (*disable_io_queues)(struct lio_device *);
+};
+
+struct lio_pf_vf_hs_word {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+#endif
+};
+
+struct lio_sriov_info {
+ /** Number of rings assigned to VF */
+ uint32_t rings_per_vf;
+
+ /** Number of VF devices enabled */
+ uint32_t num_vfs;
+};
+
+/* Head of a response list */
+struct lio_response_list {
+ /** List structure to add delete pending entries to */
+ struct lio_stailq_head head;
+
+ /** A lock for this response list */
+ rte_spinlock_t lock;
+
+ rte_atomic64_t pending_req_count;
+};
+
+/* Structure to define the configuration attributes for each Input queue. */
+struct lio_iq_config {
+ /* Max number of IQs available */
+ uint8_t max_iqs;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ uint32_t pending_list_size;
+
+ /** Command size - 32 or 64 bytes */
+ uint32_t instr_type;
+};
+
+/* Structure to define the configuration attributes for each Output queue. */
+struct lio_oq_config {
+ /* Max number of OQs available */
+ uint8_t max_oqs;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ uint32_t info_ptr;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ uint32_t refill_threshold;
+};
+
+/* Structure to define the configuration. */
+struct lio_config {
+ uint16_t card_type;
+ const char *card_name;
+
+ /** Input Queue attributes. */
+ struct lio_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct lio_oq_config oq;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union octeon_link_status {
+ uint64_t link_status64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t duplex : 8;
+ uint64_t mtu : 16;
+ uint64_t speed : 16;
+ uint64_t link_up : 1;
+ uint64_t autoneg : 1;
+ uint64_t if_mode : 5;
+ uint64_t pause : 1;
+ uint64_t flashing : 1;
+ uint64_t reserved : 15;
+#else
+ uint64_t reserved : 15;
+ uint64_t flashing : 1;
+ uint64_t pause : 1;
+ uint64_t if_mode : 5;
+ uint64_t autoneg : 1;
+ uint64_t link_up : 1;
+ uint64_t speed : 16;
+ uint64_t mtu : 16;
+ uint64_t duplex : 8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+union octeon_rxpciq {
+ uint64_t rxpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t reserved : 56;
+#else
+ uint64_t reserved : 56;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct octeon_link_info {
+ union octeon_link_status link;
+ uint64_t hw_addr;
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t gmxport : 16;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t rsvd : 30;
+ uint64_t num_txpciq : 8;
+ uint64_t num_rxpciq : 8;
+#else
+ uint64_t num_rxpciq : 8;
+ uint64_t num_txpciq : 8;
+ uint64_t rsvd : 30;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t gmxport : 16;
+#endif
+
+ union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
+ union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
+};
+
+/* ----------------------- THE LIO DEVICE --------------------------- */
+/** The lio device.
+ * Each lio device has this structure to represent all its
+ * components.
+ */
+struct lio_device {
+ /** PCI device pointer */
+ struct rte_pci_device *pci_dev;
+
+ /** Octeon Chip type */
+ uint16_t chip_id;
+ uint16_t pf_num;
+ uint16_t vf_num;
+
+ /** This device's PCIe port used for traffic. */
+ uint16_t pcie_port;
+
+ /** The state of this device */
+ rte_atomic64_t status;
+
+ uint8_t intf_open;
+
+ struct octeon_link_info linfo;
+
+ uint8_t *hw_addr;
+
+ struct lio_fn_list fn_list;
+
+ uint32_t num_iqs;
+
+ /** Guards each glist */
+ rte_spinlock_t *glist_lock;
+ /** Array of gather component linked lists */
+ struct lio_stailq_head *glist_head;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct rte_mempool *sc_buf_pool;
+
+ /** The input instruction queues */
+ struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
+
+ /** The singly-linked tail queues of instruction response */
+ struct lio_response_list response_list;
+
+ uint32_t num_oqs;
+
+ /** The DROQ output queues */
+ struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
+
+ struct lio_io_enable io_qmask;
+
+ struct lio_sriov_info sriov_info;
+
+ struct lio_pf_vf_hs_word pfvf_hsword;
+
+ /** Mail Box details of each lio queue. */
+ struct lio_mbox **mbox;
+
+ char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
+
+ const struct lio_config *default_config;
+
+ struct rte_eth_dev *eth_dev;
+
+ uint64_t ifflags;
+ uint8_t max_rx_queues;
+ uint8_t max_tx_queues;
+ uint8_t nb_rx_queues;
+ uint8_t nb_tx_queues;
+ uint8_t port_configured;
+ struct lio_rss_ctx rss_state;
+ uint8_t port_id;
+};
+#endif /* _LIO_STRUCT_H_ */
diff --git a/drivers/net/liquidio/rte_pmd_lio_version.map b/drivers/net/liquidio/rte_pmd_lio_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/net/liquidio/rte_pmd_lio_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index efed953e..e873fb48 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -36,12 +36,7 @@ LIB = librte_pmd_mlx4.a
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
-
-# Dependencies.
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mempool
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -102,7 +97,7 @@ endif
mlx4_autoconf.h.new: FORCE
-mlx4_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
RSS_SUPPORT \
@@ -129,7 +124,7 @@ mlx4_autoconf.h: mlx4_autoconf.h.new
cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
mv '$<' '$@'
-mlx4.o: mlx4_autoconf.h
+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD):.c=.o): mlx4_autoconf.h
clean_mlx4: FORCE
$Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 6d43a977..ec4419a8 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2015 6WIND S.A.
- * Copyright 2012 Mellanox.
+ * Copyright 2012-2017 6WIND S.A.
+ * Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -58,22 +58,9 @@
#include <linux/sockios.h>
#include <fcntl.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_mbuf.h>
#include <rte_errno.h>
@@ -86,30 +73,15 @@
#include <rte_log.h>
#include <rte_alarm.h>
#include <rte_memory.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <rte_flow.h>
+#include <rte_kvargs.h>
/* Generated configuration header. */
#include "mlx4_autoconf.h"
-/* PMD header. */
+/* PMD headers. */
#include "mlx4.h"
-
-/* Runtime logging through RTE_LOG() is enabled when not in debugging mode.
- * Intermediate LOG_*() macros add the required end-of-line characters. */
-#ifndef NDEBUG
-#define INFO(...) DEBUG(__VA_ARGS__)
-#define WARN(...) DEBUG(__VA_ARGS__)
-#define ERROR(...) DEBUG(__VA_ARGS__)
-#else
-#define LOG__(level, m, ...) \
- RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
-#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
-#define INFO(...) LOG_(INFO, __VA_ARGS__)
-#define WARN(...) LOG_(WARNING, __VA_ARGS__)
-#define ERROR(...) LOG_(ERR, __VA_ARGS__)
-#endif
+#include "mlx4_flow.h"
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
@@ -137,157 +109,6 @@ typedef union {
(((val) & (from)) / ((from) / (to))) : \
(((val) & (from)) * ((to) / (from))))
-struct mlx4_rxq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t ipackets; /**< Total of successfully received packets. */
- uint64_t ibytes; /**< Total of successfully received bytes. */
-#endif
- uint64_t idropped; /**< Total of packets dropped when RX ring full. */
- uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
-};
-
-struct mlx4_txq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t opackets; /**< Total of successfully sent packets. */
- uint64_t obytes; /**< Total of successfully sent bytes. */
-#endif
- uint64_t odropped; /**< Total of packets not sent when TX ring full. */
-};
-
-/* RX element (scattered packets). */
-struct rxq_elt_sp {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
- struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
-};
-
-/* RX element. */
-struct rxq_elt {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sge; /* Scatter/Gather Element. */
- /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
-};
-
-/* RX queue descriptor. */
-struct rxq {
- struct priv *priv; /* Back pointer to private data. */
- struct rte_mempool *mp; /* Memory Pool for allocations. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
- /*
- * Each VLAN ID requires a separate flow steering rule.
- */
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
- struct ibv_flow *promisc_flow; /* Promiscuous flow. */
- struct ibv_flow *allmulti_flow; /* Multicast flow. */
- unsigned int port_id; /* Port ID for incoming packets. */
- unsigned int elts_n; /* (*elts)[] length. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- union {
- struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
- struct rxq_elt (*no_sp)[]; /* RX elements. */
- } elts;
- unsigned int sp:1; /* Use scattered RX elements. */
- unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
- struct mlx4_rxq_stats stats; /* RX queue counters. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
-};
-
-/* TX element. */
-struct txq_elt {
- struct rte_mbuf *buf;
-};
-
-/* Linear buffer type. It is used when transmitting buffers with too many
- * segments that do not fit the hardware queue (see max_send_sge).
- * Extra segments are copied (linearized) in such buffers, replacing the
- * last SGE during TX.
- * The size is arbitrary but large enough to hold a jumbo frame with
- * 8 segments considering mbuf.buf_len is about 2048 bytes. */
-typedef uint8_t linear_t[16384];
-
-/* TX queue descriptor. */
-struct txq {
- struct priv *priv; /* Back pointer to private data. */
- struct {
- const struct rte_mempool *mp; /* Cached Memory Pool. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* mr->lkey */
- } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#if MLX4_PMD_MAX_INLINE > 0
- uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
-#endif
- unsigned int elts_n; /* (*elts)[] length. */
- struct txq_elt (*elts)[]; /* TX elements. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- unsigned int elts_tail; /* First element awaiting completion. */
- unsigned int elts_comp; /* Number of completion requests. */
- unsigned int elts_comp_cd; /* Countdown for next completion request. */
- unsigned int elts_comp_cd_init; /* Initial value for countdown. */
- struct mlx4_txq_stats stats; /* TX queue counters. */
- linear_t (*elts_linear)[]; /* Linearized buffers. */
- struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
-};
-
-struct priv {
- struct rte_eth_dev *dev; /* Ethernet device. */
- struct ibv_context *ctx; /* Verbs context. */
- struct ibv_device_attr device_attr; /* Device properties. */
- struct ibv_pd *pd; /* Protection Domain. */
- /*
- * MAC addresses array and configuration bit-field.
- * An extra entry that cannot be modified by the DPDK is reserved
- * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
- */
- struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- /* VLAN filters. */
- struct {
- unsigned int enabled:1; /* If enabled. */
- unsigned int id:12; /* VLAN ID (0-4095). */
- } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
- /* Device properties. */
- uint16_t mtu; /* Configured MTU. */
- uint8_t port; /* Physical port number. */
- unsigned int started:1; /* Device started, flows enabled. */
- unsigned int promisc:1; /* Device in promiscuous mode. */
- unsigned int allmulti:1; /* Device receives all multicast packets. */
- unsigned int hw_qpg:1; /* QP groups are supported. */
- unsigned int hw_tss:1; /* TSS is supported. */
- unsigned int hw_rss:1; /* RSS is supported. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int rss:1; /* RSS is enabled. */
- unsigned int vf:1; /* This is a VF device. */
- unsigned int pending_alarm:1; /* An alarm is pending. */
-#ifdef INLINE_RECV
- unsigned int inl_recv_size; /* Inline recv size */
-#endif
- unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
- /* RX/TX queues. */
- struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
- unsigned int rxqs_n; /* RX queues array size. */
- unsigned int txqs_n; /* TX queues array size. */
- struct rxq *(*rxqs)[]; /* RX queues. */
- struct txq *(*txqs)[]; /* TX queues. */
- struct rte_intr_handle intr_handle; /* Interrupt handler. */
- rte_spinlock_t lock; /* Lock for control functions. */
-};
-
/* Local storage for secondary process data. */
struct mlx4_secondary_data {
struct rte_eth_dev_data data; /* Local device data. */
@@ -296,6 +117,16 @@ struct mlx4_secondary_data {
rte_spinlock_t lock; /* Port configuration lock. */
} mlx4_secondary_data[RTE_MAX_ETHPORTS];
+struct mlx4_conf {
+ uint8_t active_ports;
+};
+
+/* Available parameters list. */
+const char *pmd_mlx4_init_params[] = {
+ MLX4_PMD_PORT_KVARG,
+ NULL,
+};
+
/**
* Check if running as a secondary process.
*
@@ -335,8 +166,7 @@ mlx4_get_priv(struct rte_eth_dev *dev)
* @param priv
* Pointer to private structure.
*/
-static void
-priv_lock(struct priv *priv)
+void priv_lock(struct priv *priv)
{
rte_spinlock_lock(&priv->lock);
}
@@ -347,8 +177,7 @@ priv_lock(struct priv *priv)
* @param priv
* Pointer to private structure.
*/
-static void
-priv_unlock(struct priv *priv)
+void priv_unlock(struct priv *priv)
{
rte_spinlock_unlock(&priv->lock);
}
@@ -2526,6 +2355,7 @@ rxq_add_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
*attr = (struct ibv_flow_attr){
.type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 3,
.num_of_specs = 1,
.port = priv->port,
.flags = 0
@@ -3340,6 +3170,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Increase out of memory counters. */
++rxq->stats.rx_nombuf;
++rxq->priv->dev->data->rx_mbuf_alloc_failed;
+ /* Add SGE to array for repost. */
+ sges[i] = elt->sge;
goto repost;
}
@@ -3604,7 +3436,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
}
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
+ if (dev->data->dev_conf.rxmode.enable_scatter &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
@@ -3826,11 +3658,19 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (mb_len - RTE_PKTMBUF_HEADROOM))) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl.sp = 0;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
tmpl.sp = 1;
desc /= MLX4_PMD_SGE_WR_N;
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: %s scattered packets support (%u WRs)",
(void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
@@ -4092,9 +3932,15 @@ mlx4_rx_queue_release(void *dpdk_rxq)
priv_unlock(priv);
}
-static void
+static int
priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+static int
+priv_dev_removal_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
+static int
+priv_dev_link_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to start the device.
*
@@ -4113,6 +3959,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
unsigned int i = 0;
unsigned int r;
struct rxq *rxq;
+ int ret;
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
@@ -4132,8 +3979,6 @@ mlx4_dev_start(struct rte_eth_dev *dev)
}
/* Iterate only once when RSS is enabled. */
do {
- int ret;
-
/* Ignore nonexistent RX queues. */
if (rxq == NULL)
continue;
@@ -4146,22 +3991,41 @@ mlx4_dev_start(struct rte_eth_dev *dev)
continue;
WARN("%p: QP flow attachment failed: %s",
(void *)dev, strerror(ret));
- /* Rollback. */
- while (i != 0) {
- rxq = (*priv->rxqs)[--i];
- if (rxq != NULL) {
- rxq_allmulticast_disable(rxq);
- rxq_promiscuous_disable(rxq);
- rxq_mac_addrs_del(rxq);
- }
- }
- priv->started = 0;
- priv_unlock(priv);
- return -ret;
+ goto err;
} while ((--r) && ((rxq = (*priv->rxqs)[++i]), i));
- priv_dev_interrupt_handler_install(priv, dev);
+ ret = priv_dev_link_interrupt_handler_install(priv, dev);
+ if (ret) {
+ ERROR("%p: LSC handler install failed",
+ (void *)dev);
+ goto err;
+ }
+ ret = priv_dev_removal_interrupt_handler_install(priv, dev);
+ if (ret) {
+ ERROR("%p: RMV handler install failed",
+ (void *)dev);
+ goto err;
+ }
+ ret = mlx4_priv_flow_start(priv);
+ if (ret) {
+ ERROR("%p: flow start failed: %s",
+ (void *)dev, strerror(ret));
+ goto err;
+ }
priv_unlock(priv);
return 0;
+err:
+ /* Rollback. */
+ while (i != 0) {
+ rxq = (*priv->rxqs)[i--];
+ if (rxq != NULL) {
+ rxq_allmulticast_disable(rxq);
+ rxq_promiscuous_disable(rxq);
+ rxq_mac_addrs_del(rxq);
+ }
+ }
+ priv->started = 0;
+ priv_unlock(priv);
+ return -ret;
}
/**
@@ -4196,6 +4060,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
rxq = (*priv->rxqs)[0];
r = priv->rxqs_n;
}
+ mlx4_priv_flow_stop(priv);
/* Iterate only once when RSS is enabled. */
do {
/* Ignore nonexistent RX queues. */
@@ -4258,9 +4123,16 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
}
-static void
+static int
priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+static int
+priv_dev_removal_interrupt_handler_uninstall(struct priv *,
+ struct rte_eth_dev *);
+
+static int
+priv_dev_link_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to close the device.
*
@@ -4323,7 +4195,8 @@ mlx4_dev_close(struct rte_eth_dev *dev)
claim_zero(ibv_close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
- priv_dev_interrupt_handler_uninstall(priv, dev);
+ priv_dev_removal_interrupt_handler_uninstall(priv, dev);
+ priv_dev_link_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -4427,6 +4300,8 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
if (priv == NULL)
return;
priv_lock(priv);
@@ -4628,26 +4503,30 @@ end:
* @param vmdq
* VMDq pool index to associate address with (ignored).
*/
-static void
+static int
mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
struct priv *priv = dev->data->dev_private;
+ int re;
if (mlx4_is_secondary())
- return;
+ return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
- if (index >= (elemof(priv->mac) - 1))
+ if (index >= (elemof(priv->mac) - 1)) {
+ re = EINVAL;
goto end;
- priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
+ }
+ re = priv_mac_addr_add(priv, index,
+ (const uint8_t (*)[ETHER_ADDR_LEN])
+ mac_addr->addr_bytes);
end:
priv_unlock(priv);
+ return -re;
}
/**
@@ -4827,7 +4706,7 @@ end:
}
/**
- * DPDK callback to retrieve physical link information (unlocked version).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -4835,9 +4714,9 @@ end:
* Wait for request completion (ignored).
*/
static int
-mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
+mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx4_get_priv(dev);
+ const struct priv *priv = mlx4_get_priv(dev);
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET
};
@@ -4845,6 +4724,8 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
struct rte_eth_link dev_link;
int link_speed = 0;
+ /* priv_lock() is not taken to allow concurrent calls. */
+
if (priv == NULL)
return -EINVAL;
(void)wait_to_complete;
@@ -4879,27 +4760,9 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
return -1;
}
-/**
- * DPDK callback to retrieve physical link information.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
static int
-mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct priv *priv = mlx4_get_priv(dev);
- int ret;
-
- if (priv == NULL)
- return -EINVAL;
- priv_lock(priv);
- ret = mlx4_link_update_unlocked(dev, wait_to_complete);
- priv_unlock(priv);
- return ret;
-}
+mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr);
/**
* DPDK callback to change the MTU.
@@ -4949,21 +4812,16 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int mb_len;
unsigned int max_frame_len;
- int sp;
if (rxq == NULL)
continue;
- /* Calculate new maximum frame length according to MTU and
- * toggle scattered support (sp) if necessary. */
+ /* Calculate new maximum frame length according to MTU. */
max_frame_len = (priv->mtu + ETHER_HDR_LEN +
(ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame = sp;
+ dev->data->dev_conf.rxmode.jumbo_frame =
+ (max_frame_len > ETHER_MAX_LEN);
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
ret = rxq_rehash(dev, rxq);
if (ret) {
@@ -5215,6 +5073,55 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return -ret;
}
+const struct rte_flow_ops mlx4_flow_ops = {
+ .validate = mlx4_flow_validate,
+ .create = mlx4_flow_create,
+ .destroy = mlx4_flow_destroy,
+ .flush = mlx4_flow_flush,
+ .query = NULL,
+};
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx4_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_FDIR:
+ DEBUG("%p: filter type FDIR is not supported by this PMD",
+ (void *)dev);
+ break;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ return -ret;
+}
+
static const struct eth_dev_ops mlx4_dev_ops = {
.dev_configure = mlx4_dev_configure,
.dev_start = mlx4_dev_start,
@@ -5249,6 +5156,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.mac_addr_add = mlx4_mac_addr_add,
.mac_addr_set = mlx4_mac_addr_set,
.mtu_set = mlx4_dev_set_mtu,
+ .filter_ctrl = mlx4_dev_filter_ctrl,
};
/**
@@ -5379,35 +5287,44 @@ mlx4_getenv_int(const char *name)
static void
mlx4_dev_link_status_handler(void *);
static void
-mlx4_dev_interrupt_handler(struct rte_intr_handle *, void *);
+mlx4_dev_interrupt_handler(void *);
/**
- * Link status handler.
+ * Link/device status handler.
*
* @param priv
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @param events
+ * Pointer to event flags holder.
*
* @return
- * Nonzero if the callback process can be called immediately.
+ * Number of events
*/
static int
-priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
+priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
+ uint32_t *events)
{
struct ibv_async_event event;
int port_change = 0;
int ret = 0;
+ *events = 0;
/* Read all message and acknowledge them. */
for (;;) {
if (ibv_get_async_event(priv->ctx, &event))
break;
-
- if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
- event.event_type == IBV_EVENT_PORT_ERR)
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (priv->intr_conf.lsc == 1)) {
port_change = 1;
- else
+ ret++;
+ } else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ priv->intr_conf.rmv == 1) {
+ *events |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ ret++;
+ } else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
@@ -5417,7 +5334,7 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
struct rte_eth_link *link = &dev->data->dev_link;
priv->pending_alarm = 0;
- mlx4_link_update_unlocked(dev, 0);
+ mlx4_link_update(dev, 0);
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/* Inconsistent status, check again later. */
@@ -5425,8 +5342,9 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
mlx4_dev_link_status_handler,
dev);
- } else
- ret = 1;
+ } else {
+ *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ }
}
return ret;
}
@@ -5442,13 +5360,14 @@ mlx4_dev_link_status_handler(void *arg)
{
struct rte_eth_dev *dev = arg;
struct priv *priv = dev->data->dev_private;
+ uint32_t events;
int ret;
priv_lock(priv);
assert(priv->pending_alarm == 1);
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_dev_status_handler(priv, dev, &events);
priv_unlock(priv);
- if (ret)
+ if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -5461,18 +5380,31 @@ mlx4_dev_link_status_handler(void *arg)
* Callback argument.
*/
static void
-mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+mlx4_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
int ret;
+ uint32_t ev;
+ int i;
- (void)intr_handle;
priv_lock(priv);
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_dev_status_handler(priv, dev, &ev);
priv_unlock(priv);
- if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ if (ret > 0) {
+ for (i = RTE_ETH_EVENT_UNKNOWN;
+ i < RTE_ETH_EVENT_MAX;
+ i++) {
+ if (ev & (1 << i)) {
+ ev &= ~(1 << i);
+ _rte_eth_dev_callback_process(dev, i, NULL);
+ ret--;
+ }
+ }
+ if (ret)
+ WARN("%d event%s not processed", ret,
+ (ret > 1 ? "s were" : " was"));
+ }
}
/**
@@ -5482,20 +5414,30 @@ mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative errno value on failure.
*/
-static void
+static int
priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
{
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
- rte_intr_callback_unregister(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
- if (priv->pending_alarm)
- rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev);
- priv->pending_alarm = 0;
+ int ret;
+
+ if (priv->intr_conf.lsc ||
+ priv->intr_conf.rmv)
+ return 0;
+ ret = rte_intr_callback_unregister(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ if (ret < 0) {
+ ERROR("rte_intr_callback_unregister failed with %d"
+ "%s%s%s", ret,
+ (errno ? " (errno: " : ""),
+ (errno ? strerror(errno) : ""),
+ (errno ? ")" : ""));
+ }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ return ret;
}
/**
@@ -5505,30 +5447,229 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative errno value on failure.
*/
-static void
-priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+static int
+priv_dev_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
{
- int rc, flags;
+ int flags;
+ int rc;
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
+ /* Check whether the interrupt handler has already been installed
+ * for either type of interrupt
+ */
+ if (priv->intr_conf.lsc &&
+ priv->intr_conf.rmv &&
+ priv->intr_handle.fd)
+ return 0;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
INFO("failed to change file descriptor async event queue");
dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ return -errno;
} else {
priv->intr_handle.fd = priv->ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
- rte_intr_callback_register(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
+ rc = rte_intr_callback_register(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ if (rc) {
+ ERROR("rte_intr_callback_register failed "
+ " (errno: %s)", strerror(errno));
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_removal_interrupt_handler_uninstall(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_conf.intr_conf.rmv) {
+ priv->intr_conf.rmv = 0;
+ return priv_dev_interrupt_handler_uninstall(priv, dev);
+ }
+ return 0;
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error,
+ */
+static int
+priv_dev_link_interrupt_handler_uninstall(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret = 0;
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ priv->intr_conf.lsc = 0;
+ ret = priv_dev_interrupt_handler_uninstall(priv, dev);
+ if (ret)
+ return ret;
+ }
+ if (priv->pending_alarm)
+ if (rte_eal_alarm_cancel(mlx4_dev_link_status_handler,
+ dev)) {
+ ERROR("rte_eal_alarm_cancel failed "
+ " (errno: %s)", strerror(rte_errno));
+ return -rte_errno;
+ }
+ priv->pending_alarm = 0;
+ return 0;
+}
+
+/**
+ * Install link interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_link_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ ret = priv_dev_interrupt_handler_install(priv, dev);
+ if (ret)
+ return ret;
+ priv->intr_conf.lsc = 1;
+ }
+ return 0;
+}
+
+/**
+ * Install removal interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_removal_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rmv) {
+ ret = priv_dev_interrupt_handler_install(priv, dev);
+ if (ret)
+ return ret;
+ priv->intr_conf.rmv = 1;
+ }
+ return 0;
+}
+
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param out
+ * User data.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_arg_parse(const char *key, const char *val, void *out)
+{
+ struct mlx4_conf *conf = out;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ WARN("%s: \"%s\" is not a valid integer", key, val);
+ return -errno;
+ }
+ if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
+ if (tmp >= MLX4_PMD_MAX_PHYS_PORTS) {
+ ERROR("invalid port index %lu (max: %u)",
+ tmp, MLX4_PMD_MAX_PHYS_PORTS - 1);
+ return -EINVAL;
+ }
+ conf->active_ports |= 1 << tmp;
+ } else {
+ WARN("%s: unknown parameter", key);
+ return -EINVAL;
}
+ return 0;
}
-static struct eth_driver mlx4_driver;
+/**
+ * Parse device parameters.
+ *
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int arg_count;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
+ if (kvlist == NULL) {
+ ERROR("failed to parse kvargs");
+ return -EINVAL;
+ }
+ /* Process parameters. */
+ for (i = 0; pmd_mlx4_init_params[i]; ++i) {
+ arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
+ while (arg_count-- > 0) {
+ ret = rte_kvargs_process(kvlist, MLX4_PMD_PORT_KVARG,
+ mlx4_arg_parse, conf);
+ if (ret != 0)
+ goto free_kvlist;
+ }
+ }
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct rte_pci_driver mlx4_driver;
/**
* DPDK callback to register a PCI device.
@@ -5552,12 +5693,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
+ struct mlx4_conf conf = {
+ .active_ports = 0,
+ };
unsigned int vf;
int idx;
int i;
(void)pci_drv;
- assert(pci_drv == &mlx4_driver.pci_drv);
+ assert(pci_drv == &mlx4_driver);
/* Get mlx4_dev[] index. */
idx = mlx4_dev_idx(&pci_dev->addr);
if (idx == -1) {
@@ -5571,10 +5715,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -5606,11 +5748,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx4_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx4_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
@@ -5622,6 +5764,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
goto error;
INFO("%u port(s) detected", device_attr.phys_port_cnt);
+ if (mlx4_args(pci_dev->device.devargs, &conf)) {
+ ERROR("failed to process device arguments");
+ goto error;
+ }
+ /* Use all ports when none are defined */
+ if (conf.active_ports == 0) {
+ for (i = 0; i < MLX4_PMD_MAX_PHYS_PORTS; i++)
+ conf.active_ports |= 1 << i;
+ }
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
uint32_t test = (1 << i);
@@ -5635,6 +5786,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
+ /* If port is not active, skip. */
+ if (!(conf.active_ports & (1 << i)))
+ continue;
#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
#ifdef RSS_SUPPORT
@@ -5840,23 +5994,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->rx_pkt_burst = mlx4_rx_burst_secondary_setup;
} else {
eth_dev->data->dev_private = priv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
- eth_dev->data->mtu = ETHER_MTU;
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->driver = &mlx4_driver;
+ eth_dev->device->driver = &mlx4_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ /* Update link status once if waiting for LSC. */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ mlx4_link_update(eth_dev, 0);
continue;
port_error:
@@ -5910,16 +6064,14 @@ static const struct rte_pci_id mlx4_pci_id_map[] = {
}
};
-static struct eth_driver mlx4_driver = {
- .pci_drv = {
- .driver = {
- .name = MLX4_DRIVER_NAME
- },
- .id_table = mlx4_pci_id_map,
- .probe = mlx4_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC,
+static struct rte_pci_driver mlx4_driver = {
+ .driver = {
+ .name = MLX4_DRIVER_NAME
},
- .dev_private_size = sizeof(struct priv)
+ .id_table = mlx4_pci_id_map,
+ .probe = mlx4_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_INTR_RMV,
};
/**
@@ -5938,8 +6090,10 @@ rte_mlx4_pmd_init(void)
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
- rte_eal_pci_register(&mlx4_driver.pci_drv);
+ rte_pci_register(&mlx4_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
+ "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 4c7505e2..9a3bae90 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2015 6WIND S.A.
- * Copyright 2012 Mellanox.
+ * Copyright 2012-2017 6WIND S.A.
+ * Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,33 @@
#include <limits.h>
/*
+ * Runtime logging through RTE_LOG() is enabled when not in debugging mode.
+ * Intermediate LOG_*() macros add the required end-of-line characters.
+ */
+#ifndef NDEBUG
+#define INFO(...) DEBUG(__VA_ARGS__)
+#define WARN(...) DEBUG(__VA_ARGS__)
+#define ERROR(...) DEBUG(__VA_ARGS__)
+#else
+#define LOG__(level, m, ...) \
+ RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
+#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
+#define INFO(...) LOG_(INFO, __VA_ARGS__)
+#define WARN(...) LOG_(WARNING, __VA_ARGS__)
+#define ERROR(...) LOG_(ERR, __VA_ARGS__)
+#endif
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/*
* Maximum number of simultaneous MAC addresses supported.
*
* According to ConnectX's Programmer Reference Manual:
@@ -54,6 +81,9 @@
/* Request send completion once in every 64 sends, might be less. */
#define MLX4_PMD_TX_PER_COMP_REQ 64
+/* Maximum number of physical ports. */
+#define MLX4_PMD_MAX_PHYS_PORTS 2
+
/* Maximum number of Scatter/Gather Elements per Work Request. */
#ifndef MLX4_PMD_SGE_WR_N
#define MLX4_PMD_SGE_WR_N 4
@@ -86,6 +116,9 @@
/* Alarm timeout. */
#define MLX4_ALARM_TIMEOUT_US 100000
+/* Port parameter. */
+#define MLX4_PMD_PORT_KVARG "port"
+
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};
@@ -160,4 +193,165 @@ enum {
#define claim_positive(...) (__VA_ARGS__)
#endif /* NDEBUG */
+struct mlx4_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+#endif
+ uint64_t idropped; /**< Total of packets dropped when RX ring full. */
+ uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
+};
+
+/* RX element (scattered packets). */
+struct rxq_elt_sp {
+ struct ibv_recv_wr wr; /* Work Request. */
+ struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
+ struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
+};
+
+/* RX element. */
+struct rxq_elt {
+ struct ibv_recv_wr wr; /* Work Request. */
+ struct ibv_sge sge; /* Scatter/Gather Element. */
+ /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
+};
+
+/* RX queue descriptor. */
+struct rxq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct rte_mempool *mp; /* Memory Pool for allocations. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ /*
+ * Each VLAN ID requires a separate flow steering rule.
+ */
+ BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
+ struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
+ struct ibv_flow *promisc_flow; /* Promiscuous flow. */
+ struct ibv_flow *allmulti_flow; /* Multicast flow. */
+ unsigned int port_id; /* Port ID for incoming packets. */
+ unsigned int elts_n; /* (*elts)[] length. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ union {
+ struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
+ struct rxq_elt (*no_sp)[]; /* RX elements. */
+ } elts;
+ unsigned int sp:1; /* Use scattered RX elements. */
+ unsigned int csum:1; /* Enable checksum offloading. */
+ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ struct mlx4_rxq_stats stats; /* RX queue counters. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+/* TX element. */
+struct txq_elt {
+ struct rte_mbuf *buf;
+};
+
+struct mlx4_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+#endif
+ uint64_t odropped; /**< Total of packets not sent when TX ring full. */
+};
+
+/*
+ * Linear buffer type. It is used when transmitting buffers with too many
+ * segments that do not fit the hardware queue (see max_send_sge).
+ * Extra segments are copied (linearized) in such buffers, replacing the
+ * last SGE during TX.
+ * The size is arbitrary but large enough to hold a jumbo frame with
+ * 8 segments considering mbuf.buf_len is about 2048 bytes.
+ */
+typedef uint8_t linear_t[16384];
+
+/* TX queue descriptor. */
+struct txq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct {
+ const struct rte_mempool *mp; /* Cached Memory Pool. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ uint32_t lkey; /* mr->lkey */
+ } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+#if MLX4_PMD_MAX_INLINE > 0
+ uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
+#endif
+ unsigned int elts_n; /* (*elts)[] length. */
+ struct txq_elt (*elts)[]; /* TX elements. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ unsigned int elts_tail; /* First element awaiting completion. */
+ unsigned int elts_comp; /* Number of completion requests. */
+ unsigned int elts_comp_cd; /* Countdown for next completion request. */
+ unsigned int elts_comp_cd_init; /* Initial value for countdown. */
+ struct mlx4_txq_stats stats; /* TX queue counters. */
+ linear_t (*elts_linear)[]; /* Linearized buffers. */
+ struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+struct rte_flow;
+
+struct priv {
+ struct rte_eth_dev *dev; /* Ethernet device. */
+ struct ibv_context *ctx; /* Verbs context. */
+ struct ibv_device_attr device_attr; /* Device properties. */
+ struct ibv_pd *pd; /* Protection Domain. */
+ /*
+ * MAC addresses array and configuration bit-field.
+ * An extra entry that cannot be modified by the DPDK is reserved
+ * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
+ */
+ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
+ BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
+ /* VLAN filters. */
+ struct {
+ unsigned int enabled:1; /* If enabled. */
+ unsigned int id:12; /* VLAN ID (0-4095). */
+ } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
+ /* Device properties. */
+ uint16_t mtu; /* Configured MTU. */
+ uint8_t port; /* Physical port number. */
+ unsigned int started:1; /* Device started, flows enabled. */
+ unsigned int promisc:1; /* Device in promiscuous mode. */
+ unsigned int allmulti:1; /* Device receives all multicast packets. */
+ unsigned int hw_qpg:1; /* QP groups are supported. */
+ unsigned int hw_tss:1; /* TSS is supported. */
+ unsigned int hw_rss:1; /* RSS is supported. */
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int rss:1; /* RSS is enabled. */
+ unsigned int vf:1; /* This is a VF device. */
+ unsigned int pending_alarm:1; /* An alarm is pending. */
+#ifdef INLINE_RECV
+ unsigned int inl_recv_size; /* Inline recv size */
+#endif
+ unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
+ /* RX/TX queues. */
+ struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
+ unsigned int rxqs_n; /* RX queues array size. */
+ unsigned int txqs_n; /* TX queues array size. */
+ struct rxq *(*rxqs)[]; /* RX queues. */
+ struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle; /* Interrupt handler. */
+ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
+ LIST_HEAD(mlx4_flows, rte_flow) flows;
+ struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
+ rte_spinlock_t lock; /* Lock for control functions. */
+};
+
+void priv_lock(struct priv *priv);
+void priv_unlock(struct priv *priv);
+
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
new file mode 100644
index 00000000..edfac038
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -0,0 +1,1090 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
+/* Generated configuration header. */
+#include "mlx4_autoconf.h"
+
+/* PMD headers. */
+#include "mlx4.h"
+#include "mlx4_flow.h"
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx4_flow_items {
+ /** List of possible actions for these items. */
+ const enum rte_flow_action_type *const actions;
+ /** Bit-masks corresponding to the possibilities for the item. */
+ const void *mask;
+ /**
+ * Default bit-masks to use when item->mask is not provided. When
+ * \default_mask is also NULL, the full supported bit-mask (\mask) is
+ * used instead.
+ */
+ const void *default_mask;
+ /** Bit-masks size in bytes. */
+ const unsigned int mask_sz;
+ /**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec,
+ * last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*validate)(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size);
+ /**
+ * Conversion function from rte_flow to NIC specific flow.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param default_mask
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+ /** Size in bytes of the destination structure. */
+ const unsigned int dst_sz;
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+struct rte_flow_drop {
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 2;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *eth = (struct ibv_flow_spec_eth) {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = eth_size,
+ };
+ if (!spec) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
+ return 0;
+ }
+ if (!mask)
+ mask = default_mask;
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+ eth->val.src_mac[i] &= eth->mask.src_mac[i];
+ }
+ return 0;
+}
+
+/**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_ipv4 *ipv4;
+ unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv4 = (struct ibv_flow_spec_ipv4) {
+ .type = IBV_FLOW_SPEC_IPV4,
+ .size = ipv4_size,
+ };
+ if (!spec)
+ return 0;
+ ipv4->val = (struct ibv_flow_ipv4_filter) {
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ };
+ if (!mask)
+ mask = default_mask;
+ ipv4->mask = (struct ibv_flow_ipv4_filter) {
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4->val.src_ip &= ipv4->mask.src_ip;
+ ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+ return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_tcp_udp *udp;
+ unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *udp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_UDP,
+ .size = udp_size,
+ };
+ if (!spec)
+ return 0;
+ udp->val.dst_port = spec->hdr.dst_port;
+ udp->val.src_port = spec->hdr.src_port;
+ if (!mask)
+ mask = default_mask;
+ udp->mask.dst_port = mask->hdr.dst_port;
+ udp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp->val.src_port &= udp->mask.src_port;
+ udp->val.dst_port &= udp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_tcp_udp *tcp;
+ unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_TCP,
+ .size = tcp_size,
+ };
+ if (!spec)
+ return 0;
+ tcp->val.dst_port = spec->hdr.dst_port;
+ tcp->val.src_port = spec->hdr.src_port;
+ if (!mask)
+ mask = default_mask;
+ tcp->mask.dst_port = mask->hdr.dst_port;
+ tcp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp->val.src_port &= tcp->mask.src_port;
+ tcp->val.dst_port &= tcp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+static int
+mlx4_flow_item_validate(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ int ret = 0;
+
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+static int
+mlx4_flow_validate_eth(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_eth *mask = item->mask;
+
+ if (mask->dst.addr_bytes[0] != 0xff ||
+ mask->dst.addr_bytes[1] != 0xff ||
+ mask->dst.addr_bytes[2] != 0xff ||
+ mask->dst.addr_bytes[3] != 0xff ||
+ mask->dst.addr_bytes[4] != 0xff ||
+ mask->dst.addr_bytes[5] != 0xff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_vlan(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_vlan *mask = item->mask;
+
+ if (mask->tci != 0 &&
+ ntohs(mask->tci) != 0x0fff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+
+ if (mask->hdr.src_addr != 0 &&
+ mask->hdr.src_addr != 0xffffffff)
+ return -1;
+ if (mask->hdr.dst_addr != 0 &&
+ mask->hdr.dst_addr != 0xffffffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_udp(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_udp *mask = item->mask;
+
+ if (mask->hdr.src_port != 0 &&
+ mask->hdr.src_port != 0xffff)
+ return -1;
+ if (mask->hdr.dst_port != 0 &&
+ mask->hdr.dst_port != 0xffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_tcp(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_tcp *mask = item->mask;
+
+ if (mask->hdr.src_port != 0 &&
+ mask->hdr.src_port != 0xffff)
+ return -1;
+ if (mask->hdr.dst_port != 0 &&
+ mask->hdr.dst_port != 0xffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+/** Graph of supported items and associated actions. */
+static const struct mlx4_flow_items mlx4_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .default_mask = &rte_flow_item_eth_mask,
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .validate = mlx4_flow_validate_eth,
+ .convert = mlx4_flow_create_eth,
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vlan){
+ /* rte_flow_item_vlan_mask is invalid for mlx4. */
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ .tci = 0x0fff,
+#else
+ .tci = 0xff0f,
+#endif
+ },
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .validate = mlx4_flow_validate_vlan,
+ .convert = mlx4_flow_create_vlan,
+ .dst_sz = 0,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .validate = mlx4_flow_validate_ipv4,
+ .convert = mlx4_flow_create_ipv4,
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_udp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .validate = mlx4_flow_validate_udp,
+ .convert = mlx4_flow_create_udp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_tcp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .validate = mlx4_flow_validate_tcp,
+ .convert = mlx4_flow_create_tcp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+};
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx4_flow *flow)
+{
+ const struct mlx4_flow_items *cur_item = mlx4_flow_items;
+ struct mlx4_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ };
+
+ (void)priv;
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+ /* Go over items list. */
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx4_flow_items *token = NULL;
+ unsigned int i;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ /*
+ * The nic can support patterns with NULL eth spec only
+ * if eth is a single item in a rule.
+ */
+ if (!items->spec &&
+ items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ const struct rte_flow_item *next = items + 1;
+
+ if (next->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "the rule requires"
+ " an Ethernet spec");
+ return -rte_errno;
+ }
+ }
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx4_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = cur_item->validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow->ibv_attr && cur_item->convert) {
+ err = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ flow);
+ if (err)
+ goto exit_item_not_supported;
+ }
+ flow->offset += cur_item->dst_sz;
+ }
+ /* Go over actions list */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action.drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+
+ if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ goto exit_action_not_supported;
+ action.queue = 1;
+ } else {
+ goto exit_action_not_supported;
+ }
+ }
+ if (!action.queue && !action.drop) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no valid action");
+ return -rte_errno;
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
+
+ priv_lock(priv);
+ ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
+ * Destroy a drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_flow_destroy_drop_queue(struct priv *priv)
+{
+ if (priv->flow_drop_queue) {
+ struct rte_flow_drop *fdq = priv->flow_drop_queue;
+
+ priv->flow_drop_queue = NULL;
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ }
+}
+
+/**
+ * Create a single drop queue for all drop flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+static int
+mlx4_flow_create_drop_queue(struct priv *priv)
+{
+ struct ibv_qp *qp;
+ struct ibv_cq *cq;
+ struct rte_flow_drop *fdq;
+
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ ERROR("Cannot allocate memory for drop struct");
+ goto err;
+ }
+ cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+ &(struct ibv_exp_cq_init_attr){
+ .comp_mask = 0,
+ });
+ if (!cq) {
+ ERROR("Cannot create drop CQ");
+ goto err_create_cq;
+ }
+ qp = ibv_exp_create_qp(priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .send_cq = cq,
+ .recv_cq = cq,
+ .cap = {
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT,
+ .pd = priv->pd,
+ .port_num = priv->port,
+ });
+ if (!qp) {
+ ERROR("Cannot create drop QP");
+ goto err_create_qp;
+ }
+ *fdq = (struct rte_flow_drop){
+ .qp = qp,
+ .cq = cq,
+ };
+ priv->flow_drop_queue = fdq;
+ return 0;
+err_create_qp:
+ claim_zero(ibv_destroy_cq(cq));
+err_create_cq:
+ rte_free(fdq);
+err:
+ return -1;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param ibv_attr
+ * Verbs flow attributes.
+ * @param action
+ * Target action structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+ struct ibv_flow_attr *ibv_attr,
+ struct mlx4_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct ibv_qp *qp;
+ struct rte_flow *rte_flow;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ if (action->drop) {
+ qp = priv->flow_drop_queue->qp;
+ } else {
+ struct rxq *rxq = (*priv->rxqs)[action->queue_id];
+
+ qp = rxq->qp;
+ rte_flow->qp = qp;
+ }
+ rte_flow->ibv_attr = ibv_attr;
+ rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+
+error:
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ struct mlx4_flow_action action;
+ struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
+ int err;
+
+ err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ if (err)
+ return NULL;
+ flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+ if (!flow.ibv_attr) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate ibv_attr memory");
+ return NULL;
+ }
+ flow.offset = sizeof(struct ibv_flow_attr);
+ *flow.ibv_attr = (struct ibv_flow_attr){
+ .comp_mask = 0,
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(struct ibv_flow_attr),
+ .priority = attr->priority,
+ .num_of_specs = 0,
+ .port = priv->port,
+ .flags = 0,
+ };
+ claim_zero(priv_flow_validate(priv, attr, items, actions,
+ error, &flow));
+ action = (struct mlx4_flow_action){
+ .queue = 0,
+ .drop = 0,
+ };
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ action.queue = 1;
+ action.queue_id =
+ ((const struct rte_flow_action_queue *)
+ actions->conf)->index;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action.drop = 1;
+ } else {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "unsupported action");
+ goto exit;
+ }
+ }
+ rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
+ &action, error);
+ if (rte_flow)
+ return rte_flow;
+exit:
+ rte_free(flow.ibv_attr);
+ return NULL;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx4_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ priv_lock(priv);
+ flow = priv_flow_create(priv, attr, items, actions, error);
+ if (flow) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ }
+ priv_unlock(priv);
+ return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] flow
+ * Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
+{
+ (void)priv;
+ LIST_REMOVE(flow, next);
+ if (flow->ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ rte_free(flow->ibv_attr);
+ DEBUG("Flow destroyed %p", (void *)flow);
+ rte_free(flow);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow;
+
+ flow = LIST_FIRST(&priv->flows);
+ priv_flow_destroy(priv, flow);
+ }
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_priv_flow_stop(struct priv *priv)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ DEBUG("Flow %p removed", (void *)flow);
+ }
+ mlx4_flow_destroy_drop_queue(priv);
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_priv_flow_start(struct priv *priv)
+{
+ int ret;
+ struct ibv_qp *qp;
+ struct rte_flow *flow;
+
+ ret = mlx4_flow_create_drop_queue(priv);
+ if (ret)
+ return -1;
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
+ flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ if (!flow->ibv_flow) {
+ DEBUG("Flow %p cannot be applied", (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ }
+ return 0;
+}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
new file mode 100644
index 00000000..12a293e4
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX4_FLOW_H_
+#define RTE_PMD_MLX4_FLOW_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_byteorder.h>
+
+#include "mlx4.h"
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+};
+
+int
+mlx4_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+struct rte_flow *
+mlx4_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+
+/** Structure to pass to the conversion function. */
+struct mlx4_flow {
+ struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+};
+
+struct mlx4_flow_action {
+ uint32_t drop:1; /**< Target is a drop queue. */
+ uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t queue_id; /**< Identifier of the queue. */
+};
+
+int mlx4_priv_flow_start(struct priv *priv);
+void mlx4_priv_flow_stop(struct priv *priv);
+
+#endif /* RTE_PMD_MLX4_FLOW_H_ */
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b1..c0799591 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,13 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
-
-# Dependencies.
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_kvargs
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -104,7 +98,7 @@ endif
mlx5_autoconf.h.new: FORCE
-mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
@@ -136,6 +130,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
/usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_UPDATE_CQ_CI \
+ infiniband/mlx5_hw.h \
+ func ibv_mlx5_exp_update_cq_ci \
+ $(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cb45fd0f..fc99c0d5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -56,6 +56,7 @@
#endif
#include <rte_malloc.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_pci.h>
#include <rte_common.h>
#include <rte_kvargs.h>
@@ -84,6 +85,27 @@
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+/* Device parameter to include 2 dsegs in the title WQEBB. */
+#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
+
+/* Device parameter to limit the size of inlining packet. */
+#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
+
+/* Device parameter to enable hardware TSO offload. */
+#define MLX5_TSO "tso"
+
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+struct mlx5_args {
+ int cqe_comp;
+ int txq_inline;
+ int txqs_inline;
+ int mps;
+ int mpw_hdr_dseg;
+ int inline_max_packet_sz;
+ int tso;
+};
/**
* Retrieve integer value from environment variable.
*
@@ -199,6 +221,9 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
.dev_infos_get = mlx5_dev_infos_get,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
@@ -219,6 +244,10 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
.filter_ctrl = mlx5_dev_filter_ctrl,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_intr_enable = mlx5_rx_intr_enable,
+ .rx_queue_intr_disable = mlx5_rx_intr_disable,
};
static struct {
@@ -270,7 +299,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
{
- struct priv *priv = opaque;
+ struct mlx5_args *args = opaque;
unsigned long tmp;
errno = 0;
@@ -280,13 +309,19 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
- priv->cqe_comp = !!tmp;
+ args->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
- priv->txq_inline = tmp;
+ args->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
- priv->txqs_inline = tmp;
+ args->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- priv->mps = !!tmp;
+ args->mps = !!tmp;
+ } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
+ args->mpw_hdr_dseg = !!tmp;
+ } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
+ args->inline_max_packet_sz = tmp;
+ } else if (strcmp(MLX5_TSO, key) == 0) {
+ args->tso = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -306,13 +341,16 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure.
*/
static int
-mlx5_args(struct priv *priv, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TSO,
NULL,
};
struct rte_kvargs *kvlist;
@@ -329,7 +367,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, priv);
+ mlx5_args_check, args);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -340,7 +378,35 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
return 0;
}
-static struct eth_driver mlx5_driver;
+static struct rte_pci_driver mlx5_driver;
+
+/**
+ * Assign parameters from args into priv, only non default
+ * values are considered.
+ *
+ * @param[out] priv
+ * Pointer to private structure.
+ * @param[in] args
+ * Pointer to args values.
+ */
+static void
+mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
+{
+ if (args->cqe_comp != MLX5_ARG_UNSET)
+ priv->cqe_comp = args->cqe_comp;
+ if (args->txq_inline != MLX5_ARG_UNSET)
+ priv->txq_inline = args->txq_inline;
+ if (args->txqs_inline != MLX5_ARG_UNSET)
+ priv->txqs_inline = args->txqs_inline;
+ if (args->mps != MLX5_ARG_UNSET)
+ priv->mps = args->mps ? priv->mps : 0;
+ if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
+ priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
+ if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
+ priv->inline_max_packet_sz = args->inline_max_packet_sz;
+ if (args->tso != MLX5_ARG_UNSET)
+ priv->tso = args->tso;
+}
/**
* DPDK callback to register a PCI device.
@@ -366,11 +432,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_device_attr device_attr;
unsigned int sriov;
unsigned int mps;
+ unsigned int tunnel_en;
int idx;
int i;
(void)pci_drv;
- assert(pci_drv == &mlx5_driver.pci_drv);
+ assert(pci_drv == &mlx5_driver);
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
if (idx == -1) {
@@ -384,10 +451,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -410,15 +475,39 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sriov = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
- /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
- mps = (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ tunnel_en = 1;
+ mps = MLX5_MPW_DISABLED;
+ break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
+ mps = MLX5_MPW;
+ break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ tunnel_en = 1;
+ mps = MLX5_MPW_ENHANCED;
+ break;
+ default:
+ mps = MLX5_MPW_DISABLED;
+ }
INFO("PCI information matches, using device \"%s\""
- " (SR-IOV: %s, MPS: %s)",
+ " (SR-IOV: %s, %sMPS: %s)",
list[i]->name,
sriov ? "true" : "false",
- mps ? "true" : "false");
+ mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ mps != MLX5_MPW_DISABLED ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
break;
@@ -427,11 +516,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx5_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx5_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
@@ -454,12 +543,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_exp_device_attr exp_device_attr;
struct ether_addr mac;
uint16_t num_vfs = 0;
+ struct mlx5_args args = {
+ .cqe_comp = MLX5_ARG_UNSET,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .mps = MLX5_ARG_UNSET,
+ .mpw_hdr_dseg = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .tso = MLX5_ARG_UNSET,
+ };
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
IBV_EXP_DEVICE_ATTR_RX_HASH |
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
+ IBV_EXP_DEVICE_ATTR_TSO_CAPS |
0;
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -513,12 +612,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
- err = mlx5_args(priv, pci_dev->device.devargs);
+ priv->tunnel_en = tunnel_en;
+ err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
+ mlx5_args_assign(priv, &args);
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
goto port_error;
@@ -540,8 +641,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
- if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE)
- priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
+ if (priv->ind_table_max_size >
+ (unsigned int)ETH_RSS_RETA_SIZE_512)
+ priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
@@ -560,11 +662,36 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
+ priv->tso = ((priv->tso) &&
+ (exp_device_attr.tso_caps.max_tso > 0) &&
+ (exp_device_attr.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_ETH)));
+ if (priv->tso)
+ priv->max_tso_payload_sz =
+ exp_device_attr.tso_caps.max_tso;
if (priv->mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
+ } else if (priv->mps && priv->tso) {
+ WARN("multi-packet send not supported in conjunction "
+ "with TSO. MPS disabled");
+ priv->mps = 0;
+ }
+ INFO("%sMPS is %s",
+ priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* Set default values for Enhanced MPW, a.k.a MPWv2. */
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (args.txqs_inline == MLX5_ARG_UNSET)
+ priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
+ priv->inline_max_packet_sz =
+ MLX5_EMPW_MAX_INLINE_LEN;
+ if (args.txq_inline == MLX5_ARG_UNSET)
+ priv->txq_inline = MLX5_WQE_SIZE_MAX -
+ MLX5_WQE_SIZE;
}
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
@@ -654,23 +781,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup;
} else {
eth_dev->data->dev_private = priv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
- eth_dev->data->mtu = ETHER_MTU;
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->driver = &mlx5_driver;
+ eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
- mlx5_link_update_unlocked(priv->dev, 1);
+ mlx5_link_update(priv->dev, 1);
continue;
port_error:
@@ -725,20 +848,33 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
+ },
+ {
.vendor_id = 0
}
};
-static struct eth_driver mlx5_driver = {
- .pci_drv = {
- .driver = {
- .name = MLX5_DRIVER_NAME
- },
- .id_table = mlx5_pci_id_map,
- .probe = mlx5_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC,
+static struct rte_pci_driver mlx5_driver = {
+ .driver = {
+ .name = MLX5_DRIVER_NAME
},
- .dev_private_size = sizeof(struct priv)
+ .id_table = mlx5_pci_id_map,
+ .probe = mlx5_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC,
};
/**
@@ -756,8 +892,9 @@ rte_mlx5_pmd_init(void)
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
- rte_eal_pci_register(&mlx5_driver.pci_drv);
+ rte_pci_register(&mlx5_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a600..67fd7428 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
#include <rte_spinlock.h>
#include <rte_interrupts.h>
#include <rte_errno.h>
+#include <rte_flow.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -82,6 +83,18 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+};
+
+struct mlx5_xstats_ctrl {
+ /* Number of device stats. */
+ uint16_t stats_n;
+ /* Index in the device counters table. */
+ uint16_t dev_table_idx[MLX5_MAX_XSTATS];
+ uint64_t base[MLX5_MAX_XSTATS];
};
struct priv {
@@ -110,11 +123,17 @@ struct priv {
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:1; /* Whether multi-packet send is supported. */
+ unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
+ unsigned int tso:1; /* Whether TSO is supported. */
+ unsigned int tunnel_en:1;
+ /* Whether Tx offloads for tunneled packets are supported. */
+ unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
+ unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -135,7 +154,10 @@ struct priv {
unsigned int reta_idx_n; /* RETA index size. */
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
+ LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
uint32_t link_speed_capa; /* Link speed capabilities. */
+ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
};
@@ -182,13 +204,14 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
int mlx5_is_secondary(void);
int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
int priv_ifreq(const struct priv *, int req, struct ifreq *);
+int priv_is_ib_cntr(const char *);
+int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *);
int priv_get_num_vfs(struct priv *, uint16_t *);
int priv_get_mtu(struct priv *, uint16_t *);
int priv_set_flags(struct priv *, unsigned int, unsigned int);
int mlx5_dev_configure(struct rte_eth_dev *);
void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-int mlx5_link_update_unlocked(struct rte_eth_dev *, int);
int mlx5_link_update(struct rte_eth_dev *, int);
int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
@@ -196,7 +219,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
int mlx5_ibv_device_to_pci_addr(const struct ibv_device *,
struct rte_pci_addr *);
void mlx5_dev_link_status_handler(void *);
-void mlx5_dev_interrupt_handler(struct rte_intr_handle *, void *);
+void mlx5_dev_interrupt_handler(void *);
void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
@@ -215,8 +238,8 @@ int hash_rxq_mac_addrs_add(struct hash_rxq *);
int priv_mac_addr_add(struct priv *, unsigned int,
const uint8_t (*)[ETHER_ADDR_LEN]);
int priv_mac_addrs_enable(struct priv *);
-void mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
- uint32_t);
+int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t);
void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
/* mlx5_rss.c */
@@ -244,8 +267,14 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
/* mlx5_stats.c */
+void priv_xstats_init(struct priv *);
void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
+int mlx5_xstats_get(struct rte_eth_dev *,
+ struct rte_eth_xstat *, unsigned int);
+void mlx5_xstats_reset(struct rte_eth_dev *);
+int mlx5_xstats_get_names(struct rte_eth_dev *,
+ struct rte_eth_xstat_name *, unsigned int);
/* mlx5_vlan.c */
@@ -268,4 +297,22 @@ void priv_fdir_enable(struct priv *);
int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
enum rte_filter_op, void *);
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+ const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+ struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int priv_flow_start(struct priv *);
+void priv_flow_stop(struct priv *);
+int priv_flow_rxq_in_use(struct priv *, struct rxq *);
+
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index b32816e6..201bb336 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -54,8 +54,12 @@
*/
#define MLX5_TX_COMP_THRESH 32
-/* RSS Indirection table size. */
-#define RSS_INDIRECTION_TABLE_SIZE 256
+/*
+ * Request TX completion every time the total number of WQEBBs used for inlining
+ * packets exceeds the size of WQ divided by this divisor. Better to be power of
+ * two for performance.
+ */
+#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
/*
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
@@ -79,4 +83,10 @@
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
+/* Maximum number of extended statistics counters. */
+#define MLX5_MAX_XSTATS 32
+
+/* Maximum Packet headers size (L2+L3+L4) for TSO. */
+#define MLX5_MAX_TSO_HEADER 128
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 06cfd016..3fd22cb8 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -234,6 +234,23 @@ try_dev_id:
}
/**
+ * Check if the counter is located on ib counters file.
+ *
+ * @param[in] cntr
+ * Counter name.
+ *
+ * @return
+ * 1 if counter is located on ib counters file , 0 otherwise.
+ */
+int
+priv_is_ib_cntr(const char *cntr)
+{
+ if (!strcmp(cntr, "out_of_buffer"))
+ return 1;
+ return 0;
+}
+
+/**
* Read from sysfs entry.
*
* @param[in] priv
@@ -260,10 +277,15 @@ priv_sysfs_read(const struct priv *priv, const char *entry,
if (priv_get_ifname(priv, &ifname))
return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
+ if (priv_is_ib_cntr(entry)) {
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ctx->device->ibdev_path, entry);
+ file = fopen(path, "rb");
+ } else {
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ctx->device->ibdev_path, ifname, entry);
+ file = fopen(path, "rb");
+ }
if (file == NULL)
return -1;
ret = fread(buf, 1, size, file);
@@ -469,6 +491,30 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
}
/**
+ * Read device counter from sysfs.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param name
+ * Counter name.
+ * @param[out] cntr
+ * Counter output buffer.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
+ */
+int
+priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
+{
+ unsigned long ulong_ctr;
+
+ if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
+ return -1;
+ *cntr = ulong_ctr;
+ return 0;
+}
+
+/**
* Set device MTU.
*
* @param priv
@@ -615,6 +661,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
@@ -645,14 +693,16 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (priv->tso)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->tunnel_en)
+ info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
- /* FIXME: RETA update/query API expects the callee to know the size of
- * the indirection table, for this PMD the size varies depending on
- * the number of RX queues, it becomes impossible to find the correct
- * size if it is not fixed.
- * The API should be updated to solve this problem. */
- info->reta_size = priv->ind_table_max_size;
+ info->reta_size = priv->reta_idx_n ?
+ priv->reta_idx_n : priv->ind_table_max_size;
info->hash_key_size = ((*priv->rss_conf) ?
(*priv->rss_conf)[0]->rss_key_len :
0);
@@ -665,10 +715,10 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_INNER_L3_IPV4,
- RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
@@ -679,7 +729,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
/**
- * Retrieve physical link information (unlocked version using legacy ioctl).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -697,6 +747,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
struct rte_eth_link dev_link;
int link_speed = 0;
+ /* priv_lock() is not taken to allow concurrent calls. */
+
(void)wait_to_complete;
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
@@ -827,7 +879,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information (unlocked version).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -835,7 +887,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
* Wait for request completion (ignored).
*/
int
-mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct utsname utsname;
int ver[3];
@@ -849,26 +901,6 @@ mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-int
-mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct priv *priv = mlx5_get_priv(dev);
- int ret;
-
- priv_lock(priv);
- ret = mlx5_link_update_unlocked(dev, wait_to_complete);
- priv_unlock(priv);
- return ret;
-}
-
-/**
* DPDK callback to change the MTU.
*
* Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
@@ -959,7 +991,6 @@ recover:
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
- int sp;
unsigned int mb_len;
unsigned int tmp;
@@ -967,10 +998,9 @@ recover:
continue;
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* Toggle scattered support (sp) if necessary. */
- sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame = sp;
+ dev->data->dev_conf.rxmode.jumbo_frame =
+ (max_frame_len > ETHER_MAX_LEN);
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
if (rehash)
ret = rxq_rehash(dev, rxq_ctrl);
@@ -1244,13 +1274,12 @@ mlx5_dev_link_status_handler(void *arg)
* Callback argument.
*/
void
-mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
int ret;
- (void)intr_handle;
priv_lock(priv);
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
@@ -1553,14 +1582,15 @@ void
priv_select_tx_function(struct priv *priv)
{
priv->dev->tx_pkt_burst = mlx5_tx_burst;
- /* Display warning for unsupported configurations. */
- if (priv->sriov && priv->mps)
- WARN("multi-packet send WQE cannot be used on a SR-IOV setup");
/* Select appropriate TX function. */
- if ((priv->sriov == 0) && priv->mps && priv->txq_inline) {
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ priv->dev->tx_pkt_burst =
+ mlx5_tx_burst_empw;
+ DEBUG("selected Enhanced MPW TX function");
+ } else if (priv->mps && priv->txq_inline) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if ((priv->sriov == 0) && priv->mps) {
+ } else if (priv->mps) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf6826..f80c58b4 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
return ret;
}
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .query = NULL,
+};
+
/**
* Manage filter operations.
*
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
case RTE_ETH_FILTER_FDIR:
priv_lock(priv);
ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 00000000..adcbe3f5
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,1586 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
+#include "mlx5.h"
+#include "mlx5_prm.h"
+
+/* Number of Work Queue necessary for the DROP queue. */
+#define MLX5_DROP_WQ_N 4
+
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
+ struct ibv_exp_wq *wq; /**< Verbs work queue. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+ uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
+ uint32_t mark:1; /**< Set if the flow is marked. */
+ uint32_t drop:1; /**< Drop queue. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct rxq *rxqs[]; /**< Pointer to the queues array. */
+};
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx5_flow_items {
+ /** List of possible actions for these items. */
+ const enum rte_flow_action_type *const actions;
+ /** Bit-masks corresponding to the possibilities for the item. */
+ const void *mask;
+ /**
+ * Default bit-masks to use when item->mask is not provided. When
+ * \default_mask is also NULL, the full supported bit-mask (\mask) is
+ * used instead.
+ */
+ const void *default_mask;
+ /** Bit-masks size in bytes. */
+ const unsigned int mask_sz;
+ /**
+ * Conversion function from rte_flow to NIC specific flow.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param default_mask
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+ /** Size in bytes of the destination structure. */
+ const unsigned int dst_sz;
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Graph of supported items and associated actions. */
+static const struct mlx5_flow_items mlx5_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VXLAN),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
+ },
+ .default_mask = &rte_flow_item_eth_mask,
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .convert = mlx5_flow_create_eth,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vlan){
+ .tci = -1,
+ },
+ .default_mask = &rte_flow_item_vlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .convert = mlx5_flow_create_vlan,
+ .dst_sz = 0,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ .type_of_service = -1,
+ .next_proto_id = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .convert = mlx5_flow_create_ipv4,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv6){
+ .hdr = {
+ .src_addr = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ },
+ .dst_addr = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ },
+ .vtc_flow = -1,
+ .proto = -1,
+ .hop_limits = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv6_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv6),
+ .convert = mlx5_flow_create_ipv6,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_udp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .convert = mlx5_flow_create_udp,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_tcp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .convert = mlx5_flow_create_tcp,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vxlan){
+ .vni = "\xff\xff\xff",
+ },
+ .default_mask = &rte_flow_item_vxlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vxlan),
+ .convert = mlx5_flow_create_vxlan,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+ },
+};
+
+/** Structure to pass to the conversion function. */
+struct mlx5_flow {
+ struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+ uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+};
+
+/** Structure for Drop queue. */
+struct rte_flow_drop {
+ struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+};
+
+struct mlx5_flow_action {
+ uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t drop:1; /**< Target is a drop queue. */
+ uint32_t mark:1; /**< Mark is present in the flow. */
+ uint32_t mark_id; /**< Mark identifier. */
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+};
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ int ret = 0;
+
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->mask;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ * @param[in, out] action
+ * Action structure to update.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow *flow,
+ struct mlx5_flow_action *action)
+{
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+
+ (void)priv;
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx5_flow_items *token = NULL;
+ unsigned int i;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx5_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = mlx5_flow_item_validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow->ibv_attr && cur_item->convert) {
+ err = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ flow);
+ if (err)
+ goto exit_item_not_supported;
+ } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (flow->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ flow->inner = 1;
+ }
+ flow->offset += cur_item->dst_sz;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action->drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ uint16_t n;
+ uint16_t found = 0;
+
+ if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ goto exit_action_not_supported;
+ for (n = 0; n < action->queues_n; ++n) {
+ if (action->queues[n] == queue->index) {
+ found = 1;
+ break;
+ }
+ }
+ if (action->queues_n > 1 && !found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS queues");
+ return -rte_errno;
+ }
+ if (!found) {
+ action->queue = 1;
+ action->queues_n = 1;
+ action->queues[0] = queue->index;
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+ uint16_t n;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no valid queues");
+ return -rte_errno;
+ }
+ if (action->queues_n == 1) {
+ uint16_t found = 0;
+
+ assert(action->queues_n);
+ for (n = 0; n < rss->num; ++n) {
+ if (action->queues[0] ==
+ rss->queue[n]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ for (n = 0; n < rss->num; ++n) {
+ if (rss->queue[n] >= priv->rxqs_n) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue id > number of"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ action->queue = 1;
+ for (n = 0; n < rss->num; ++n)
+ action->queues[n] = rss->queue[n];
+ action->queues_n = rss->num;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (!mark) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be defined");
+ return -rte_errno;
+ } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be between 0"
+ " and 16777199");
+ return -rte_errno;
+ }
+ action->mark = 1;
+ action->mark_id = mark->id;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
+ action->mark = 1;
+ } else {
+ goto exit_action_not_supported;
+ }
+ }
+ if (action->mark && !flow->ibv_attr && !action->drop)
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+ if (!action->queue && !action->drop) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no valid action");
+ return -rte_errno;
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
+ struct mlx5_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ .mark = 0,
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues_n = 0,
+ };
+
+ priv_lock(priv);
+ ret = priv_flow_validate(priv, attr, items, actions, error, &flow,
+ &action);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 2;
+ flow->hash_fields = 0;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *eth = (struct ibv_exp_flow_spec_eth) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
+ .size = eth_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->val.ether_type = spec->type;
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+ eth->val.src_mac[i] &= eth->mask.src_mac[i];
+ }
+ eth->val.ether_type &= eth->mask.ether_type;
+ return 0;
+}
+
+/**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_ipv4_ext *ipv4;
+ unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+ IBV_EXP_RX_HASH_DST_IPV4);
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
+ .size = ipv4_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4->val.src_ip &= ipv4->mask.src_ip;
+ ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+ ipv4->val.proto &= ipv4->mask.proto;
+ ipv4->val.tos &= ipv4->mask.tos;
+ return 0;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_ipv6_ext *ipv6;
+ unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+ IBV_EXP_RX_HASH_DST_IPV6);
+ ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
+ .size = ipv6_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6->val.src_ip));
+ memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6->val.dst_ip));
+ memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6->mask.src_ip));
+ memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6->mask.dst_ip));
+ ipv6->mask.flow_label = mask->hdr.vtc_flow;
+ ipv6->mask.next_hdr = mask->hdr.proto;
+ ipv6->mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+ ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+ ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+ }
+ ipv6->val.flow_label &= ipv6->mask.flow_label;
+ ipv6->val.next_hdr &= ipv6->mask.next_hdr;
+ ipv6->val.hop_limit &= ipv6->mask.hop_limit;
+ return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tcp_udp *udp;
+ unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
+ IBV_EXP_RX_HASH_DST_PORT_UDP);
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *udp = (struct ibv_exp_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
+ .size = udp_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ udp->val.dst_port = spec->hdr.dst_port;
+ udp->val.src_port = spec->hdr.src_port;
+ udp->mask.dst_port = mask->hdr.dst_port;
+ udp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp->val.src_port &= udp->mask.src_port;
+ udp->val.dst_port &= udp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tcp_udp *tcp;
+ unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
+ IBV_EXP_RX_HASH_DST_PORT_TCP);
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
+ .size = tcp_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ tcp->val.dst_port = spec->hdr.dst_port;
+ tcp->val.src_port = spec->hdr.src_port;
+ tcp->mask.dst_port = mask->hdr.dst_port;
+ tcp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp->val.src_port &= tcp->mask.src_port;
+ tcp->val.dst_port &= tcp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tunnel *vxlan;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ id.vni[0] = 0;
+ vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *vxlan = (struct ibv_exp_flow_spec_tunnel) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ flow->inner = IBV_EXP_FLOW_SPEC_INNER;
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan->val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan->mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+ return 0;
+}
+
+/**
+ * Convert mark/flag action to Verbs specification.
+ *
+ * @param flow
+ * Pointer to MLX5 flow structure.
+ * @param mark_id
+ * Mark identifier.
+ */
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+{
+ struct ibv_exp_flow_spec_action_tag *tag;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
+
+ tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tag = (struct ibv_exp_flow_spec_action_tag){
+ .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(mark_id),
+ };
+ ++flow->ibv_attr->num_of_specs;
+ return 0;
+}
+
+/**
+ * Complete flow rule creation with a drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue_drop(struct priv *priv,
+ struct mlx5_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ rte_flow->drop = 1;
+ rte_flow->ibv_attr = flow->ibv_attr;
+ rte_flow->qp = priv->flow_drop_queue->qp;
+ if (!priv->started)
+ return rte_flow;
+ rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+ rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+error:
+ assert(rte_flow);
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param action
+ * Target action structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+ struct mlx5_flow *flow,
+ struct mlx5_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ unsigned int i;
+ unsigned int j;
+ const unsigned int wqs_n = 1 << log2above(action->queues_n);
+ struct ibv_exp_wq *wqs[wqs_n];
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ assert(!action->drop);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) +
+ sizeof(*rte_flow->rxqs) * action->queues_n, 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ for (i = 0; i < action->queues_n; ++i) {
+ struct rxq_ctrl *rxq;
+
+ rxq = container_of((*priv->rxqs)[action->queues[i]],
+ struct rxq_ctrl, rxq);
+ wqs[i] = rxq->wq;
+ rte_flow->rxqs[i] = &rxq->rxq;
+ ++rte_flow->rxqs_n;
+ rxq->rxq.mark |= action->mark;
+ }
+ /* finalise indirection table. */
+ for (j = 0; i < wqs_n; ++i, ++j) {
+ wqs[i] = wqs[j];
+ if (j == action->queues_n)
+ j = 0;
+ }
+ rte_flow->mark = action->mark;
+ rte_flow->ibv_attr = flow->ibv_attr;
+ rte_flow->hash_fields = flow->hash_fields;
+ rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+ priv->ctx,
+ &(struct ibv_exp_rwq_ind_table_init_attr){
+ .pd = priv->pd,
+ .log_ind_tbl_size = log2above(action->queues_n),
+ .ind_tbl = wqs,
+ .comp_mask = 0,
+ });
+ if (!rte_flow->ind_table) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate indirection table");
+ goto error;
+ }
+ rte_flow->qp = ibv_exp_create_qp(
+ priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH,
+ .pd = priv->pd,
+ .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ .rx_hash_function =
+ IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = rte_flow->hash_fields,
+ .rwq_ind_tbl = rte_flow->ind_table,
+ },
+ .port_num = priv->port,
+ });
+ if (!rte_flow->qp) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate QP");
+ goto error;
+ }
+ if (!priv->started)
+ return rte_flow;
+ rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+ rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+error:
+ assert(rte_flow);
+ if (rte_flow->qp)
+ ibv_destroy_qp(rte_flow->qp);
+ if (rte_flow->ind_table)
+ ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
+ struct mlx5_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ .mark = 0,
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues_n = 0,
+ };
+ int err;
+
+ err = priv_flow_validate(priv, attr, items, actions, error, &flow,
+ &action);
+ if (err)
+ goto exit;
+ flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+ flow.offset = sizeof(struct ibv_exp_flow_attr);
+ if (!flow.ibv_attr) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate ibv_attr memory");
+ goto exit;
+ }
+ *flow.ibv_attr = (struct ibv_exp_flow_attr){
+ .type = IBV_EXP_FLOW_ATTR_NORMAL,
+ .size = sizeof(struct ibv_exp_flow_attr),
+ .priority = attr->priority,
+ .num_of_specs = 0,
+ .port = 0,
+ .flags = 0,
+ .reserved = 0,
+ };
+ flow.inner = 0;
+ flow.hash_fields = 0;
+ claim_zero(priv_flow_validate(priv, attr, items, actions,
+ error, &flow, &action));
+ if (action.mark && !action.drop) {
+ mlx5_flow_create_flag_mark(&flow, action.mark_id);
+ flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+ }
+ if (action.drop)
+ rte_flow =
+ priv_flow_create_action_queue_drop(priv, &flow, error);
+ else
+ rte_flow = priv_flow_create_action_queue(priv, &flow, &action,
+ error);
+ if (!rte_flow)
+ goto exit;
+ return rte_flow;
+exit:
+ rte_free(flow.ibv_attr);
+ return NULL;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ priv_lock(priv);
+ flow = priv_flow_create(priv, attr, items, actions, error);
+ if (flow) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ }
+ priv_unlock(priv);
+ return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] flow
+ * Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+ struct rte_flow *flow)
+{
+ (void)priv;
+ LIST_REMOVE(flow, next);
+ if (flow->ibv_flow)
+ claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+ if (flow->drop)
+ goto free;
+ if (flow->qp)
+ claim_zero(ibv_destroy_qp(flow->qp));
+ if (flow->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
+ if (flow->drop && flow->wq)
+ claim_zero(ibv_exp_destroy_wq(flow->wq));
+ if (flow->drop && flow->cq)
+ claim_zero(ibv_destroy_cq(flow->cq));
+ if (flow->mark) {
+ struct rte_flow *tmp;
+ struct rxq *rxq;
+ uint32_t mark_n = 0;
+ uint32_t queue_n;
+
+ /*
+ * To remove the mark from the queue, the queue must not be
+ * present in any other marked flow (RSS or not).
+ */
+ for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
+ rxq = flow->rxqs[queue_n];
+ for (tmp = LIST_FIRST(&priv->flows);
+ tmp;
+ tmp = LIST_NEXT(tmp, next)) {
+ uint32_t tqueue_n;
+
+ if (tmp->drop)
+ continue;
+ for (tqueue_n = 0;
+ tqueue_n < tmp->rxqs_n;
+ ++tqueue_n) {
+ struct rxq *trxq;
+
+ trxq = tmp->rxqs[tqueue_n];
+ if (rxq == trxq)
+ ++mark_n;
+ }
+ }
+ rxq->mark = !!mark_n;
+ }
+ }
+free:
+ rte_free(flow->ibv_attr);
+ DEBUG("Flow destroyed %p", (void *)flow);
+ rte_free(flow);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow;
+
+ flow = LIST_FIRST(&priv->flows);
+ priv_flow_destroy(priv, flow);
+ }
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Create drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+priv_flow_create_drop_queue(struct priv *priv)
+{
+ struct rte_flow_drop *fdq = NULL;
+ unsigned int i;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ WARN("cannot allocate memory for drop queue");
+ goto error;
+ }
+ fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+ &(struct ibv_exp_cq_init_attr){
+ .comp_mask = 0,
+ });
+ if (!fdq->cq) {
+ WARN("cannot allocate CQ for drop queue");
+ goto error;
+ }
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
+ &(struct ibv_exp_wq_init_attr){
+ .wq_type = IBV_EXP_WQT_RQ,
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ .pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wqs[i]) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ }
+ fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_exp_rwq_ind_table_init_attr){
+ .pd = priv->pd,
+ .log_ind_tbl_size = 0,
+ .ind_tbl = fdq->wqs,
+ .comp_mask = 0,
+ });
+ if (!fdq->ind_table) {
+ WARN("cannot allocate indirection table for drop queue");
+ goto error;
+ }
+ fdq->qp = ibv_exp_create_qp(priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH,
+ .pd = priv->pd,
+ .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ .rx_hash_function =
+ IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ .rwq_ind_tbl = fdq->ind_table,
+ },
+ .port_num = priv->port,
+ });
+ if (!fdq->qp) {
+ WARN("cannot allocate QP for drop queue");
+ goto error;
+ }
+ priv->flow_drop_queue = fdq;
+ return 0;
+error:
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ if (fdq->wqs[i])
+ claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
+ }
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ if (fdq)
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+ return -1;
+}
+
+/**
+ * Delete drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_delete_drop_queue(struct priv *priv)
+{
+ struct rte_flow_drop *fdq = priv->flow_drop_queue;
+ unsigned int i;
+
+ if (!fdq)
+ return;
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ if (fdq->wqs[i])
+ claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
+ }
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_flow_stop(struct priv *priv)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->mark) {
+ unsigned int n;
+
+ for (n = 0; n < flow->rxqs_n; ++n)
+ flow->rxqs[n]->mark = 0;
+ }
+ DEBUG("Flow %p removed", (void *)flow);
+ }
+ priv_flow_delete_drop_queue(priv);
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_start(struct priv *priv)
+{
+ int ret;
+ struct rte_flow *flow;
+
+ ret = priv_flow_create_drop_queue(priv);
+ if (ret)
+ return -1;
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ struct ibv_qp *qp;
+
+ if (flow->drop)
+ qp = priv->flow_drop_queue->qp;
+ else
+ qp = flow->qp;
+ flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
+ if (!flow->ibv_flow) {
+ DEBUG("Flow %p cannot be applied", (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ if (flow->mark) {
+ unsigned int n;
+
+ for (n = 0; n < flow->rxqs_n; ++n)
+ flow->rxqs[n]->mark = 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Verify if the Rx queue is used in a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rxq
+ * Pointer to the queue to search.
+ *
+ * @return
+ * Nonzero if the queue is used by a flow.
+ */
+int
+priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ unsigned int n;
+
+ if (flow->drop)
+ continue;
+ for (n = 0; n < flow->rxqs_n; ++n) {
+ if (flow->rxqs[n] == rxq)
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 4fcfd3b8..79e0c410 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -470,26 +470,30 @@ priv_mac_addrs_enable(struct priv *priv)
* @param vmdq
* VMDq pool index to associate address with (ignored).
*/
-void
+int
mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
struct priv *priv = dev->data->dev_private;
+ int re;
if (mlx5_is_secondary())
- return;
+ return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
- if (index >= RTE_DIM(priv->mac))
+ if (index >= RTE_DIM(priv->mac)) {
+ re = EINVAL;
goto end;
- priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
+ }
+ re = priv_mac_addr_add(priv, index,
+ (const uint8_t (*)[ETHER_ADDR_LEN])
+ mac_addr->addr_bytes);
end:
priv_unlock(priv);
+ return -re;
}
/**
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index ed088eea..608072f7 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -34,6 +34,8 @@
#ifndef RTE_PMD_MLX5_PRM_H_
#define RTE_PMD_MLX5_PRM_H_
+#include <assert.h>
+
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
@@ -44,6 +46,7 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_vect.h>
#include "mlx5_autoconf.h"
/* Get CQE owner bit. */
@@ -70,6 +73,9 @@
/* WQE size */
#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE)
+/* Max size of a WQE session. */
+#define MLX5_WQE_SIZE_MAX 960U
+
/* Compute the number of DS. */
#define MLX5_WQE_DS(n) \
(((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE)
@@ -77,10 +83,19 @@
/* Room for inline data in multi-packet WQE. */
#define MLX5_MWQE64_INL_DATA 28
+/* Default minimum number of Tx queues for inlining packets. */
+#define MLX5_EMPW_MIN_TXQS 8
+
+/* Default max packet length to be inlined. */
+#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE)
+
#ifndef HAVE_VERBS_MLX5_OPCODE_TSO
#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
#endif
+#define MLX5_OPC_MOD_ENHANCED_MPSW 0
+#define MLX5_OPCODE_ENHANCED_MPSW 0x29
+
/* CQE value to inform that VLAN is stripped. */
#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
@@ -117,6 +132,28 @@
/* Tunnel packet bit in the CQE. */
#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
+/* Is flow mark valid. */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
+#else
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
+#endif
+
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
/* Subset of struct mlx5_wqe_eth_seg. */
struct mlx5_wqe_eth_seg_small {
uint32_t rsvd0;
@@ -126,12 +163,19 @@ struct mlx5_wqe_eth_seg_small {
uint32_t rsvd2;
uint16_t inline_hdr_sz;
uint8_t inline_hdr[2];
-};
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
struct mlx5_wqe_inl_small {
uint32_t byte_cnt;
uint8_t raw;
-};
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
+
+struct mlx5_wqe_ctrl {
+ uint32_t ctrl0;
+ uint32_t ctrl1;
+ uint32_t ctrl2;
+ uint32_t ctrl3;
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
/* Small common part of the WQE. */
struct mlx5_wqe {
@@ -139,16 +183,30 @@ struct mlx5_wqe {
struct mlx5_wqe_eth_seg_small eseg;
};
+/* Vectorize WQE header. */
+struct mlx5_wqe_v {
+ rte_v128u32_t ctrl;
+ rte_v128u32_t eseg;
+};
+
/* WQE. */
struct mlx5_wqe64 {
struct mlx5_wqe hdr;
uint8_t raw[32];
-} __rte_aligned(64);
+} __rte_aligned(MLX5_WQE_SIZE);
+
+/* MPW mode. */
+enum mlx5_mpw_mode {
+ MLX5_MPW_DISABLED,
+ MLX5_MPW,
+ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
+};
/* MPW session status. */
enum mlx5_mpw_state {
MLX5_MPW_STATE_OPENED,
MLX5_MPW_INL_STATE_OPENED,
+ MLX5_MPW_ENHANCED_STATE_OPENED,
MLX5_MPW_STATE_CLOSED,
};
@@ -180,10 +238,68 @@ struct mlx5_cqe {
uint8_t rsvd2[12];
uint32_t byte_cnt;
uint64_t timestamp;
- uint8_t rsvd3[4];
+ uint32_t sop_drop_qpn;
uint16_t wqe_counter;
uint8_t rsvd4;
uint8_t op_own;
};
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+ uint32_t ret;
+
+ /*
+ * Add one to the user value to differentiate un-marked flows from
+ * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
+ * remains untouched.
+ */
+ if (val != MLX5_FLOW_MARK_DEFAULT)
+ ++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+ * word, byte-swapped by the kernel on little-endian systems. In this
+ * case, left-shifting the resulting big-endian value ensures the
+ * least significant 24 bits are retained when converting it back.
+ */
+ ret = rte_cpu_to_be_32(val) >> 8;
+#else
+ ret = val;
+#endif
+ return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+ /*
+ * Subtract one from the retrieved value. It was added by
+ * mlx5_flow_mark_set() to distinguish unmarked flows.
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ return (val >> 8) - 1;
+#else
+ return val - 1;
+#endif
+}
+
#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 0bed74ee..a2dd7d17 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -257,13 +257,9 @@ priv_dev_rss_reta_query(struct priv *priv,
{
unsigned int idx;
unsigned int i;
- int ret;
-
- /* See RETA comment in mlx5_dev_infos_get(). */
- ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size);
- if (ret)
- return ret;
+ if (!reta_size || reta_size > priv->reta_idx_n)
+ return EINVAL;
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -296,8 +292,9 @@ priv_dev_rss_reta_update(struct priv *priv,
unsigned int pos;
int ret;
- /* See RETA comment in mlx5_dev_infos_get(). */
- ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size);
+ if (!reta_size)
+ return EINVAL;
+ ret = priv_rss_reta_index_resize(priv, reta_size);
if (ret)
return ret;
@@ -360,8 +357,11 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
+ mlx5_dev_stop(dev);
priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
priv_unlock(priv);
- return -ret;
+ if (ret)
+ return -ret;
+ return mlx5_dev_start(dev);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 28e93d3e..8b782336 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -36,6 +36,7 @@
#include <errno.h>
#include <string.h>
#include <stdint.h>
+#include <fcntl.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -57,6 +58,8 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_debug.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -741,49 +744,16 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
void
rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
{
- struct ibv_exp_release_intf_params params;
-
DEBUG("cleaning up %p", (void *)rxq_ctrl);
rxq_free_elts(rxq_ctrl);
if (rxq_ctrl->fdir_queue != NULL)
priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
- if (rxq_ctrl->if_wq != NULL) {
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- assert(rxq_ctrl->wq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
- rxq_ctrl->if_wq,
- &params));
- }
- if (rxq_ctrl->if_cq != NULL) {
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- assert(rxq_ctrl->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
- rxq_ctrl->if_cq,
- &params));
- }
if (rxq_ctrl->wq != NULL)
claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
if (rxq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
- if (rxq_ctrl->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx,
- rxq_ctrl->rd,
- &attr));
- }
+ if (rxq_ctrl->channel != NULL)
+ claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
if (rxq_ctrl->mr != NULL)
claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -931,13 +901,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
};
struct ibv_exp_wq_attr mod;
union {
- struct ibv_exp_query_intf_params params;
struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_wq_init_attr wq;
struct ibv_exp_cq_attr cq_attr;
} attr;
- enum ibv_exp_query_intf_status status;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int cqe_n = desc - 1;
struct rte_mbuf *(*elts)[desc] = NULL;
@@ -946,14 +913,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void)conf; /* Thresholds configuration (ignored). */
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* If smaller than MRU, multi-segment support must be enabled. */
- if (mb_len < (priv->mtu > dev->data->dev_conf.rxmode.max_rx_pkt_len ?
- dev->data->dev_conf.rxmode.max_rx_pkt_len :
- priv->mtu))
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (mb_len - RTE_PKTMBUF_HEADROOM))) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl.rxq.sges_n = 0;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -976,6 +939,13 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
return EOVERFLOW;
}
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: maximum number of segments per packet: %u",
(void *)dev, 1 << tmpl.rxq.sges_n);
@@ -1001,29 +971,25 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void *)dev, strerror(ret));
goto error;
}
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ tmpl.channel = ibv_create_comp_channel(priv->ctx);
+ if (tmpl.channel == NULL) {
+ dev->data->dev_conf.intr_conf.rxq = 0;
+ ret = ENOMEM;
+ ERROR("%p: Comp Channel creation failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
}
attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
+ .comp_mask = 0,
};
if (priv->cqe_comp) {
attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
}
- tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0,
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
&attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
@@ -1048,10 +1014,8 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
.pd = priv->pd,
.cq = tmpl.cq,
.comp_mask =
- IBV_EXP_CREATE_WQ_RES_DOMAIN |
IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
0,
- .res_domain = tmpl.rd,
.vlan_offloads = (tmpl.rxq.vlan_strip ?
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
0),
@@ -1112,29 +1076,6 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
/* Save port ID. */
tmpl.rxq.port_id = dev->data->port_id;
DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf_version = 1,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_WQ,
- .obj = tmpl.wq,
- };
- tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_wq == NULL) {
- ERROR("%p: WQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Change queue state to ready. */
mod = (struct ibv_exp_wq_attr){
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
@@ -1247,6 +1188,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->rxqs)[idx] = NULL;
rxq_cleanup(rxq_ctrl);
+ /* Resize if rxq size is changed. */
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ rxq_ctrl = rte_realloc(rxq_ctrl,
+ sizeof(*rxq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
desc * sizeof(struct rte_mbuf *),
@@ -1295,6 +1249,9 @@ mlx5_rx_queue_release(void *dpdk_rxq)
rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
priv_lock(priv);
+ if (priv_flow_rxq_in_use(priv, rxq))
+ rte_panic("Rx queue %p is still used by a flow and cannot be"
+ " removed\n", (void *)rxq_ctrl);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
@@ -1347,3 +1304,113 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
rxq = (*priv->rxqs)[index];
return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
}
+
+/**
+ * Fill epoll fd list for rxq interrupts.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+priv_intr_efd_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (n == 0)
+ return 0;
+ if (n < rxqs_n) {
+ WARN("rxqs num is larger than EAL max interrupt vector "
+ "%u > %u unable to supprt rxq interrupts",
+ rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ return -EINVAL;
+ }
+ intr_handle->type = RTE_INTR_HANDLE_EXT;
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct rxq_ctrl, rxq);
+ int fd = rxq_ctrl->channel->fd;
+ int flags;
+ int rc;
+
+ flags = fcntl(fd, F_GETFL);
+ rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ WARN("failed to change rxq interrupt file "
+ "descriptor %d for queue index %d", fd, i);
+ return -1;
+ }
+ intr_handle->efds[i] = fd;
+ }
+ intr_handle->nb_efd = n;
+ return 0;
+}
+
+/**
+ * Clean epoll fd list for rxq interrupts.
+ *
+ * @param priv
+ * Private structure.
+ */
+void
+priv_intr_efd_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+}
+
+/**
+ * Create and init interrupt vector array.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+priv_create_intr_vec(struct priv *priv)
+{
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int i;
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (rxqs_n == 0)
+ return 0;
+ intr_handle->intr_vec = (int *)
+ rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ WARN("Failed to allocate memory for intr_vec "
+ "rxq interrupt will not be supported");
+ return -ENOMEM;
+ }
+ for (i = 0; i != rxqs_n; ++i) {
+ /* 1:1 mapping between rxq and interrupt. */
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ }
+ return 0;
+}
+
+/**
+ * Destroy init interrupt vector array.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+void
+priv_destroy_intr_vec(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_free(intr_handle->intr_vec);
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 3997b27a..de6e0fa4 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -74,6 +74,9 @@ check_cqe(volatile struct mlx5_cqe *cqe,
unsigned int cqes_n, const uint16_t ci)
__attribute__((always_inline));
+static inline void
+txq_complete(struct txq *txq) __attribute__((always_inline));
+
static inline uint32_t
txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
__attribute__((always_inline));
@@ -110,7 +113,7 @@ static inline int
check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
int ret = 1;
unsigned int i;
@@ -173,8 +176,79 @@ check_cqe(volatile struct mlx5_cqe *cqe,
return 0;
}
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ *
+ * @return
+ * WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+{
+ ci &= ((1 << txq->wqe_n) - 1);
+ return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
+
+/**
+ * Return the size of tailroom of WQ.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param addr
+ * Pointer to tail of WQ.
+ *
+ * @return
+ * Size of tailroom.
+ */
+static inline size_t
+tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+{
+ size_t tailroom;
+ tailroom = (uintptr_t)(txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE -
+ (uintptr_t)addr;
+ return tailroom;
+}
+
+/**
+ * Copy data to tailroom of circular queue.
+ *
+ * @param dst
+ * Pointer to destination.
+ * @param src
+ * Pointer to source.
+ * @param n
+ * Number of bytes to copy.
+ * @param base
+ * Pointer to head of queue.
+ * @param tailroom
+ * Size of tailroom from dst.
+ *
+ * @return
+ * Pointer after copied data.
+ */
+static inline void *
+mlx5_copy_to_wq(void *dst, const void *src, size_t n,
+ void *base, size_t tailroom)
+{
+ void *ret;
+
+ if (n > tailroom) {
+ rte_memcpy(dst, src, tailroom);
+ rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
+ n - tailroom);
+ ret = (uint8_t *)base + n - tailroom;
+ } else {
+ rte_memcpy(dst, src, n);
+ ret = (n == tailroom) ? base : (uint8_t *)dst + n;
+ }
+ return ret;
+}
/**
* Manage TX completions.
@@ -194,7 +268,7 @@ txq_complete(struct txq *txq)
uint16_t elts_tail;
uint16_t cq_ci = txq->cq_ci;
volatile struct mlx5_cqe *cqe = NULL;
- volatile struct mlx5_wqe *wqe;
+ volatile struct mlx5_wqe_ctrl *ctrl;
do {
volatile struct mlx5_cqe *tmp;
@@ -220,9 +294,10 @@ txq_complete(struct txq *txq)
} while (1);
if (unlikely(cqe == NULL))
return;
- wqe = &(*txq->wqes)[ntohs(cqe->wqe_counter) &
- ((1 << txq->wqe_n) - 1)].hdr;
- elts_tail = wqe->ctrl[3];
+ txq->wqe_pi = ntohs(cqe->wqe_counter);
+ ctrl = (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, txq->wqe_pi);
+ elts_tail = ctrl->ctrl3;
assert(elts_tail < (1 << txq->wqe_n));
/* Free buffers. */
while (elts_free != elts_tail) {
@@ -326,37 +401,79 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
}
/**
- * Prefetch a CQE.
+ * DPDK callback to check the status of a tx descriptor.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param cqe_ci
- * CQE consumer index.
+ * @param tx_queue
+ * The tx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
*/
-static inline void
-tx_prefetch_cqe(struct txq *txq, uint16_t ci)
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- volatile struct mlx5_cqe *cqe;
+ struct txq *txq = tx_queue;
+ const unsigned int elts_n = 1 << txq->elts_n;
+ const unsigned int elts_cnt = elts_n - 1;
+ unsigned int used;
- cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)];
- rte_prefetch0(cqe);
+ txq_complete(txq);
+ used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+ if (offset < used)
+ return RTE_ETH_TX_DESC_FULL;
+ return RTE_ETH_TX_DESC_DONE;
}
/**
- * Prefetch a WQE.
+ * DPDK callback to check the status of a rx descriptor.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe_ci
- * WQE consumer index.
+ * @param rx_queue
+ * The rx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
*/
-static inline void
-tx_prefetch_wqe(struct txq *txq, uint16_t ci)
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
- volatile struct mlx5_wqe64 *wqe;
+ struct rxq *rxq = rx_queue;
+ struct rxq_zip *zip = &rxq->zip;
+ volatile struct mlx5_cqe *cqe;
+ const unsigned int cqe_n = (1 << rxq->cqe_n);
+ const unsigned int cqe_cnt = cqe_n - 1;
+ unsigned int cq_ci;
+ unsigned int used;
+
+ /* if we are processing a compressed cqe */
+ if (zip->ai) {
+ used = zip->cqe_cnt - zip->ca;
+ cq_ci = zip->cq_ci;
+ } else {
+ used = 0;
+ cq_ci = rxq->cq_ci;
+ }
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+ int8_t op_own;
+ unsigned int n;
- wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)];
- rte_prefetch0(wqe);
+ op_own = cqe->op_own;
+ if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
+ n = ntohl(cqe->byte_cnt);
+ else
+ n = 1;
+ cq_ci += n;
+ used += n;
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ }
+ used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+ if (offset < used)
+ return RTE_ETH_RX_DESC_DONE;
+ return RTE_ETH_RX_DESC_AVAIL;
}
/**
@@ -380,9 +497,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
+ unsigned int k = 0;
unsigned int max;
+ unsigned int max_inline = txq->max_inline;
+ const unsigned int inline_en = !!max_inline && txq->inline_en;
+ uint16_t max_wqe;
unsigned int comp;
- volatile struct mlx5_wqe *wqe = NULL;
+ volatile struct mlx5_wqe_v *wqe = NULL;
+ volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
struct rte_mbuf *buf = NULL;
uint8_t *raw;
@@ -390,25 +512,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_cqe(txq, txq->cq_ci + 1);
rte_prefetch0(*pkts);
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
do {
- volatile struct mlx5_wqe_data_seg *dseg = NULL;
+ volatile rte_v128u32_t *dseg = NULL;
uint32_t length;
unsigned int ds = 0;
+ unsigned int sg = 0; /* counter of additional segs attached. */
uintptr_t addr;
+ uint64_t naddr;
+ uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
+ uint16_t tso_header_sz = 0;
+ uint16_t ehdr;
+ uint8_t cs_flags = 0;
+ uint64_t tso = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
/* first_seg */
- buf = *(pkts++);
+ buf = *pkts;
segs_n = buf->nb_segs;
/*
* Make sure there is enough room to store this packet and
@@ -419,104 +549,204 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
max -= segs_n;
--segs_n;
- if (!segs_n)
- --pkts_n;
- wqe = &(*txq->wqes)[txq->wqe_ci &
- ((1 << txq->wqe_n) - 1)].hdr;
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
- if (pkts_n > 1)
- rte_prefetch0(*pkts);
+ if (unlikely(--max_wqe == 0))
+ break;
+ wqe = (volatile struct mlx5_wqe_v *)
+ tx_mlx5_wqe(txq, txq->wqe_ci);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+ if (pkts_n - i > 1)
+ rte_prefetch0(*(pkts + 1));
addr = rte_pktmbuf_mtod(buf, uintptr_t);
length = DATA_LEN(buf);
+ ehdr = (((uint8_t *)addr)[1] << 8) |
+ ((uint8_t *)addr)[0];
#ifdef MLX5_PMD_SOFT_COUNTERS
total_length = length;
#endif
- assert(length >= MLX5_WQE_DWORD_SIZE);
+ if (length < (MLX5_WQE_DWORD_SIZE + 2))
+ break;
/* Update element. */
(*txq->elts)[elts_head] = buf;
- elts_head = (elts_head + 1) & (elts_n - 1);
/* Prefetch next buffer data. */
- if (pkts_n > 1) {
- volatile void *pkt_addr;
-
- pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
- rte_prefetch0(pkt_addr);
- }
+ if (pkts_n - i > 1)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- wqe->eseg.cs_flags =
- MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- } else {
- wqe->eseg.cs_flags = 0;
+ const uint64_t is_tunneled = buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
}
- raw = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0];
- /* Start the know and common part of the WQE structure. */
- wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
- wqe->ctrl[2] = 0;
- wqe->ctrl[3] = 0;
- wqe->eseg.rsvd0 = 0;
- wqe->eseg.rsvd1 = 0;
- wqe->eseg.mss = 0;
- wqe->eseg.rsvd2 = 0;
- /* Start by copying the Ethernet Header. */
- memcpy((uint8_t *)raw, ((uint8_t *)addr), 16);
- length -= MLX5_WQE_DWORD_SIZE;
- addr += MLX5_WQE_DWORD_SIZE;
+ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
-
- memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE -
- sizeof(vlan)),
- &vlan, sizeof(vlan));
- addr -= sizeof(vlan);
- length += sizeof(vlan);
+ unsigned int len = 2 * ETHER_ADDR_LEN - 2;
+
+ addr += 2;
+ length -= 2;
+ /* Copy Destination and source mac address. */
+ memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
+ /* Copy VLAN. */
+ memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
+ /* Copy missing two bytes to end the DSeg. */
+ memcpy((uint8_t *)raw + len + sizeof(vlan),
+ ((uint8_t *)addr) + len, 2);
+ addr += len + 2;
+ length -= (len + 2);
+ } else {
+ memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
+ MLX5_WQE_DWORD_SIZE);
+ length -= pkt_inline_sz;
+ addr += pkt_inline_sz;
+ }
+ if (txq->tso_en) {
+ tso = buf->ol_flags & PKT_TX_TCP_SEG;
+ if (tso) {
+ uintptr_t end = (uintptr_t)
+ (((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) *
+ MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz = (buf->ol_flags &
+ PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint64_t is_tunneled =
+ buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ tso_header_sz = buf->l2_len + vlan_sz +
+ buf->l3_len + buf->l4_len;
+
+ if (is_tunneled && txq->tunnel_en) {
+ tso_header_sz += buf->outer_l2_len +
+ buf->outer_l3_len;
+ cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ if (unlikely(tso_header_sz >
+ MLX5_MAX_TSO_HEADER))
+ break;
+ copy_b = tso_header_sz - pkt_inline_sz;
+ /* First seg must contain all headers. */
+ assert(copy_b <= length);
+ raw += MLX5_WQE_DWORD_SIZE;
+ if (copy_b &&
+ ((end - (uintptr_t)raw) > copy_b)) {
+ uint16_t n = (MLX5_WQE_DS(copy_b) -
+ 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
+ break;
+ max_wqe -= n;
+ rte_memcpy((void *)raw,
+ (void *)addr, copy_b);
+ addr += copy_b;
+ length -= copy_b;
+ pkt_inline_sz += copy_b;
+ /*
+ * Another DWORD will be added
+ * in the inline part.
+ */
+ raw += MLX5_WQE_DS(copy_b) *
+ MLX5_WQE_DWORD_SIZE -
+ MLX5_WQE_DWORD_SIZE;
+ } else {
+ /* NOP WQE. */
+ wqe->ctrl = (rte_v128u32_t){
+ htonl(txq->wqe_ci << 8),
+ htonl(txq->qp_num_8s | 1),
+ 0,
+ 0,
+ };
+ ds = 1;
+ total_length = 0;
+ k++;
+ goto next_wqe;
+ }
+ }
}
/* Inline if enough room. */
- if (txq->max_inline != 0) {
- uintptr_t end =
- (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n];
- uint16_t max_inline =
- txq->max_inline * RTE_CACHE_LINE_SIZE;
- uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
- uint16_t room;
+ if (inline_en || tso) {
+ uintptr_t end = (uintptr_t)
+ (((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int inline_room = max_inline *
+ RTE_CACHE_LINE_SIZE -
+ (pkt_inline_sz - 2);
+ uintptr_t addr_end = (addr + inline_room) &
+ ~(RTE_CACHE_LINE_SIZE - 1);
+ unsigned int copy_b = (addr_end > addr) ?
+ RTE_MIN((addr_end - addr), length) :
+ 0;
raw += MLX5_WQE_DWORD_SIZE;
- room = end - (uintptr_t)raw;
- if (room > max_inline) {
- uintptr_t addr_end = (addr + max_inline) &
- ~(RTE_CACHE_LINE_SIZE - 1);
- uint16_t copy_b = ((addr_end - addr) > length) ?
- length :
- (addr_end - addr);
-
+ if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ /*
+ * One Dseg remains in the current WQE. To
+ * keep the computation positive, it is
+ * removed after the bytes to Dseg conversion.
+ */
+ uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
+ break;
+ max_wqe -= n;
+ if (tso) {
+ uint32_t inl =
+ htonl(copy_b | MLX5_INLINE_SEG);
+
+ pkt_inline_sz =
+ MLX5_WQE_DS(tso_header_sz) *
+ MLX5_WQE_DWORD_SIZE;
+ rte_memcpy((void *)raw,
+ (void *)&inl, sizeof(inl));
+ raw += sizeof(inl);
+ pkt_inline_sz += sizeof(inl);
+ }
rte_memcpy((void *)raw, (void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
pkt_inline_sz += copy_b;
- /* Sanity check. */
- assert(addr <= addr_end);
}
- /* Store the inlined packet size in the WQE. */
- wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz);
/*
- * 2 DWORDs consumed by the WQE header + 1 DSEG +
+ * 2 DWORDs consumed by the WQE header + ETH segment +
* the size of the inline part of the packet.
*/
ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
if (length > 0) {
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)wqe +
- (ds * MLX5_WQE_DWORD_SIZE));
- if ((uintptr_t)dseg >= end)
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)&(*txq->wqes)[0]);
+ if (ds % (MLX5_WQE_SIZE /
+ MLX5_WQE_DWORD_SIZE) == 0) {
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci +
+ ds / 4);
+ } else {
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe +
+ (ds * MLX5_WQE_DWORD_SIZE));
+ }
goto use_dseg;
} else if (!segs_n) {
goto next_pkt;
} else {
+ /* dseg will be advance as part of next_seg */
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe +
+ ((ds - 1) * MLX5_WQE_DWORD_SIZE));
goto next_seg;
}
} else {
@@ -524,16 +754,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* No inline has been done in the packet, only the
* Ethernet Header as been stored.
*/
- wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE);
- dseg = (struct mlx5_wqe_data_seg *)
+ dseg = (volatile rte_v128u32_t *)
((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- *dseg = (struct mlx5_wqe_data_seg) {
- .addr = htonll(addr),
- .byte_count = htonl(length),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr = htonll(addr);
+ *dseg = (rte_v128u32_t){
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
};
++ds;
if (!segs_n)
@@ -550,12 +781,12 @@ next_seg:
*/
assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
- unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) &
- ((1 << txq->wqe_n) - 1);
-
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)&(*txq->wqes)[n]);
- tx_prefetch_wqe(txq, n + 1);
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
+ rte_prefetch0(tx_mlx5_wqe(txq,
+ txq->wqe_ci + ds / 4 + 1));
} else {
++dseg;
}
@@ -567,38 +798,73 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- *dseg = (struct mlx5_wqe_data_seg) {
- .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
- .byte_count = htonl(length),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ *dseg = (rte_v128u32_t){
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
};
- (*txq->elts)[elts_head] = buf;
elts_head = (elts_head + 1) & (elts_n - 1);
- ++j;
- --segs_n;
- if (segs_n)
+ (*txq->elts)[elts_head] = buf;
+ ++sg;
+ /* Advance counter only if all segs are successfully posted. */
+ if (sg < segs_n)
goto next_seg;
else
- --pkts_n;
+ j += sg;
next_pkt:
+ elts_head = (elts_head + 1) & (elts_n - 1);
+ ++pkts;
++i;
- wqe->ctrl[1] = htonl(txq->qp_num_8s | ds);
+ /* Initialize known and common part of the WQE structure. */
+ if (tso) {
+ wqe->ctrl = (rte_v128u32_t){
+ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
+ htonl(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ 0,
+ cs_flags | (htons(buf->tso_segsz) << 16),
+ 0,
+ (ehdr << 16) | htons(tso_header_sz),
+ };
+ } else {
+ wqe->ctrl = (rte_v128u32_t){
+ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+ htonl(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ 0,
+ cs_flags,
+ 0,
+ (ehdr << 16) | htons(pkt_inline_sz),
+ };
+ }
+next_wqe:
txq->wqe_ci += (ds + 3) / 4;
+ /* Save the last successful WQE for completion request */
+ last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += total_length;
#endif
- } while (pkts_n);
+ } while (i < pkts_n);
/* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
+ if (unlikely((i + k) == 0))
return 0;
+ txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
/* Check whether completion threshold has been reached. */
- comp = txq->elts_comp + i + j;
+ comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ last_wqe->ctrl2 = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
+ last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -608,8 +874,7 @@ next_pkt:
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
- txq->elts_head = elts_head;
+ mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
return i;
}
@@ -629,13 +894,13 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
(volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)];
+ tx_mlx5_wqe(txq, idx + 1);
mpw->state = MLX5_MPW_STATE_OPENED;
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->eseg.mss = htons(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
@@ -677,8 +942,8 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
++txq->wqe_ci;
else
txq->wqe_ci += 2;
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
}
/**
@@ -703,6 +968,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
+ uint16_t max_wqe;
unsigned int comp;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
@@ -711,14 +977,16 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
do {
struct rte_mbuf *buf = *(pkts++);
unsigned int elts_head_next;
@@ -752,6 +1020,14 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(mpw.wqe->eseg.cs_flags != cs_flags)))
mlx5_mpw_close(txq, &mpw);
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use such
+ * resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
}
@@ -841,7 +1117,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
@@ -907,18 +1183,30 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
+ uint16_t max_wqe;
unsigned int comp;
unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
};
+ /*
+ * Compute the maximum number of WQE which can be consumed by inline
+ * code.
+ * - 2 DSEG for:
+ * - 1 control segment,
+ * - 1 Ethernet segment,
+ * - N Dseg from the inline request.
+ */
+ const unsigned int wqe_inl_n =
+ ((2 * MLX5_WQE_DWORD_SIZE +
+ txq->max_inline * RTE_CACHE_LINE_SIZE) +
+ RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
@@ -944,6 +1232,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
break;
max -= segs_n;
--pkts_n;
+ /*
+ * Compute max_wqe in case less WQE were consumed in previous
+ * iteration.
+ */
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
@@ -969,9 +1262,20 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
if ((segs_n != 1) ||
(length > inline_room)) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use
+ * such resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
} else {
+ if (unlikely(max_wqe < wqe_inl_n))
+ break;
+ max_wqe -= wqe_inl_n;
mlx5_mpw_inline_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
}
@@ -1019,14 +1323,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
addr = rte_pktmbuf_mtod(buf, uintptr_t);
(*txq->elts)[elts_head] = buf;
/* Maximum number of bytes before wrapping. */
- max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] -
+ max = ((((uintptr_t)(txq->wqes)) +
+ (1 << txq->wqe_n) *
+ MLX5_WQE_SIZE) -
(uintptr_t)mpw.data.raw);
if (length > max) {
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)addr,
max);
- mpw.data.raw =
- (volatile void *)&(*txq->wqes)[0];
+ mpw.data.raw = (volatile void *)txq->wqes;
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)(addr + max),
length - max);
@@ -1035,12 +1340,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)addr,
length);
- mpw.data.raw += length;
+
+ if (length == max)
+ mpw.data.raw =
+ (volatile void *)txq->wqes;
+ else
+ mpw.data.raw += length;
}
- if ((uintptr_t)mpw.data.raw ==
- (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n])
- mpw.data.raw =
- (volatile void *)&(*txq->wqes)[0];
++mpw.pkts_n;
mpw.total_len += length;
++j;
@@ -1091,6 +1397,360 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
}
/**
+ * Open an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+{
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+
+ mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->total_len = sizeof(struct mlx5_wqe);
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
+ mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_ENHANCED_MPSW);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
+ if (unlikely(padding)) {
+ uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
+
+ /* Pad the first 2 DWORDs with zero-length inline header. */
+ *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+ *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
+ htonl(MLX5_INLINE_SEG);
+ mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
+ /* Start from the next WQEBB. */
+ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
+ } else {
+ mpw->data.raw = (volatile void *)(mpw->wqe + 1);
+ }
+}
+
+/**
+ * Close an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ *
+ * @return
+ * Number of consumed WQEs.
+ */
+static inline uint16_t
+mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+{
+ uint16_t ret;
+
+ /* Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
+ txq->wqe_ci += ret;
+ return ret;
+}
+
+/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = 1 << txq->elts_n;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int max_elts;
+ uint16_t max_wqe;
+ unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
+ unsigned int mpw_room = 0;
+ unsigned int inl_pad = 0;
+ uint32_t inl_hdr;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Start processing. */
+ txq_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ if (max_elts > elts_n)
+ max_elts -= elts_n;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uintptr_t addr;
+ uint64_t naddr;
+ unsigned int n;
+ unsigned int do_inline = 0; /* Whether inline is possible. */
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint32_t cs_flags = 0;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max_elts - j < segs_n + 1)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX)
+ break;
+ /* Should we enable HW CKSUM offload. */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+ cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ /* Start new session if:
+ * - multi-segment packet
+ * - no space left even for a dseg
+ * - next packet can be inlined with a new WQE
+ * - cs_flag differs
+ * It can't be MLX5_MPW_STATE_OPENED as always have a single
+ * segmented packet.
+ */
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
+ if ((segs_n != 1) ||
+ (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
+ mpw_room) ||
+ (length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length >
+ mpw_room) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags))
+ max_wqe -= mlx5_empw_close(txq, &mpw);
+ }
+ if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
+ if (unlikely(segs_n != 1)) {
+ /* Fall back to legacy MPW.
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ mlx5_mpw_new(txq, &mpw, length);
+ } else {
+ /* In Enhanced MPW, inline as much as the budget
+ * is allowed. The remaining space is to be
+ * filled with dsegs. If the title WQEBB isn't
+ * padded, it will have 2 dsegs there.
+ */
+ mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
+ (max_inline ? max_inline :
+ pkts_n * MLX5_WQE_DWORD_SIZE) +
+ MLX5_WQE_SIZE);
+ if (unlikely(max_wqe * MLX5_WQE_SIZE <
+ mpw_room))
+ break;
+ /* Don't pad the title WQEBB to not waste WQ. */
+ mlx5_empw_new(txq, &mpw, 0);
+ mpw_room -= mpw.total_len;
+ inl_pad = 0;
+ do_inline =
+ length <= txq->inline_max_packet_sz &&
+ sizeof(inl_hdr) + length <= mpw_room &&
+ !txq->mpw_hdr_dseg;
+ }
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ } else {
+ /* Evaluate whether the next packet can be inlined.
+ * Inlininig is possible when:
+ * - length is less than configured value
+ * - length fits for remaining space
+ * - not required to fill the title WQEBB with dsegs
+ */
+ do_inline =
+ length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length <=
+ mpw_room &&
+ (!txq->mpw_hdr_dseg ||
+ mpw.total_len >= MLX5_WQE_SIZE);
+ }
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+ if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+
+ elts_head_next =
+ (elts_head + 1) & (elts_n - 1);
+ assert(buf);
+ (*txq->elts)[elts_head] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count = htonl(DATA_LEN(buf)),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .addr = htonll(addr),
+ };
+ elts_head = elts_head_next;
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++j;
+ ++mpw.pkts_n;
+ } while (--segs_n);
+ /* A multi-segmented packet takes one MPW session.
+ * TODO: Pack more multi-segmented packets if possible.
+ */
+ mlx5_mpw_close(txq, &mpw);
+ if (mpw.pkts_n < 3)
+ max_wqe--;
+ else
+ max_wqe -= 2;
+ } else if (do_inline) {
+ /* Inline packet into WQE. */
+ unsigned int max;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert(length == DATA_LEN(buf));
+ inl_hdr = htonl(length | MLX5_INLINE_SEG);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ mpw.data.raw = (volatile void *)
+ ((uintptr_t)mpw.data.raw + inl_pad);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy inline header. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ &inl_hdr,
+ sizeof(inl_hdr),
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy packet data. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ length,
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ ++mpw.pkts_n;
+ mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
+ /* No need to get completion as the entire packet is
+ * copied to WQ. Free the buf right away.
+ */
+ elts_head_next = elts_head;
+ rte_pktmbuf_free_seg(buf);
+ mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
+ /* Add pad in the next packet if any. */
+ inl_pad = (((uintptr_t)mpw.data.raw +
+ (MLX5_WQE_DWORD_SIZE - 1)) &
+ ~(MLX5_WQE_DWORD_SIZE - 1)) -
+ (uintptr_t)mpw.data.raw;
+ } else {
+ /* No inline. Load a dseg of packet pointer. */
+ volatile rte_v128u32_t *dseg;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert((inl_pad + sizeof(*dseg)) <= mpw_room);
+ assert(length == DATA_LEN(buf));
+ if (!tx_mlx5_wq_tailroom(txq,
+ (void *)((uintptr_t)mpw.data.raw
+ + inl_pad)))
+ dseg = (volatile void *)txq->wqes;
+ else
+ dseg = (volatile void *)
+ ((uintptr_t)mpw.data.raw +
+ inl_pad);
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
+ (*txq->elts)[elts_head] = buf;
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
+ rte_prefetch2((void *)(addr +
+ n * RTE_CACHE_LINE_SIZE));
+ naddr = htonll(addr);
+ *dseg = (rte_v128u32_t) {
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
+ };
+ mpw.data.raw = (volatile void *)(dseg + 1);
+ mpw.total_len += (inl_pad + sizeof(*dseg));
+ ++j;
+ ++mpw.pkts_n;
+ mpw_room -= (inl_pad + sizeof(*dseg));
+ inl_pad = 0;
+ }
+ elts_head = elts_head_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (i < pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
+ (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
+ (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
+
+ /* Request completion on last WQE. */
+ wqe->ctrl[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->ctrl[3] = elts_head;
+ txq->elts_comp = 0;
+ txq->mpw_comp = txq->wqe_ci;
+ txq->cq_pi++;
+ } else {
+ txq->elts_comp += j;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
+ mlx5_empw_close(txq, &mpw);
+ else if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, mpw.wqe);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
* Translate RX completion flags to packet type.
*
* @param[in] cqe
@@ -1153,6 +1813,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
int len = 0;
+ uint16_t idx, end;
/* Process compressed data in the CQE and mini arrays. */
if (zip->ai) {
@@ -1163,6 +1824,14 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
+ /* Invalidate consumed CQEs */
+ idx = zip->ca;
+ end = zip->na;
+ while (idx != end) {
+ (*rxq->cqes)[idx & cqe_cnt].op_own =
+ MLX5_CQE_INVALIDATE;
+ ++idx;
+ }
/*
* Increment consumer index to skip the number of
* CQEs consumed. Hardware leaves holes in the CQ
@@ -1172,8 +1841,9 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
zip->na += 8;
}
if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
- uint16_t idx = rxq->cq_ci;
- uint16_t end = zip->cq_ci;
+ /* Invalidate the rest */
+ idx = zip->ca;
+ end = zip->cq_ci;
while (idx != end) {
(*rxq->cqes)[idx & cqe_cnt].op_own =
@@ -1209,7 +1879,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
* special case the second one is located 7 CQEs after
* the initial CQE instead of 8 for subsequent ones.
*/
- zip->ca = rxq->cq_ci & cqe_cnt;
+ zip->ca = rxq->cq_ci;
zip->na = zip->ca + 7;
/* Compute the next non compressed CQE. */
--rxq->cq_ci;
@@ -1218,6 +1888,13 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
len = ntohl((*mc)[0].byte_cnt);
*rss_hash = ntohl((*mc)[0].rx_hash_result);
zip->ai = 1;
+ /* Prefetch all the entries to be invalidated */
+ idx = zip->ca;
+ end = zip->cq_ci;
+ while (idx != end) {
+ rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
+ ++idx;
+ }
} else {
len = ntohl(cqe->byte_cnt);
*rss_hash = ntohl(cqe->rx_hash_res);
@@ -1290,7 +1967,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
unsigned int i = 0;
unsigned int rq_ci = rxq->rq_ci << sges_n;
- int len; /* keep its value across iterations. */
+ int len = 0; /* keep its value across iterations. */
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
@@ -1317,8 +1994,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
while (pkt != seg) {
assert(pkt != (*rxq->elts)[idx]);
rep = NEXT(pkt);
- rte_mbuf_refcnt_set(pkt, 0);
- __rte_mbuf_raw_free(pkt);
+ NEXT(pkt) = NULL;
+ NB_SEGS(pkt) = 1;
+ rte_mbuf_raw_free(pkt);
pkt = rep;
}
break;
@@ -1328,14 +2006,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
&rss_hash_res);
if (!len) {
- rte_mbuf_refcnt_set(rep, 0);
- __rte_mbuf_raw_free(rep);
+ rte_mbuf_raw_free(rep);
break;
}
if (unlikely(len == -1)) {
/* RX error, packet is likely too large. */
- rte_mbuf_refcnt_set(rep, 0);
- __rte_mbuf_raw_free(rep);
+ rte_mbuf_raw_free(rep);
++rxq->stats.idropped;
goto skip;
}
@@ -1348,23 +2024,31 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->hash.rss = rss_hash_res;
pkt->ol_flags = PKT_RX_RSS_HASH;
}
- if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
- rxq->crc_present) {
- if (rxq->csum) {
- pkt->packet_type =
- rxq_cq_to_pkt_type(cqe);
- pkt->ol_flags |=
- rxq_cq_to_ol_flags(rxq, cqe);
- }
- if (cqe->hdr_type_etc &
- MLX5_CQE_VLAN_STRIPPED) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT |
- PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci = ntohs(cqe->vlan_info);
+ if (rxq->mark &&
+ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
+ pkt->ol_flags |= PKT_RX_FDIR;
+ if (cqe->sop_drop_qpn !=
+ htonl(MLX5_FLOW_MARK_DEFAULT)) {
+ uint32_t mark = cqe->sop_drop_qpn;
+
+ pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->hash.fdir.hi =
+ mlx5_flow_mark_get(mark);
}
- if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
}
+ if (rxq->csum | rxq->csum_l2tun) {
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
+ }
+ if (rxq->vlan_strip &&
+ (cqe->hdr_type_etc &
+ htons(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT |
+ PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = ntohs(cqe->vlan_info);
+ }
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
}
DATA_LEN(rep) = DATA_LEN(seg);
@@ -1466,3 +2150,76 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
(void)pkts_n;
return 0;
}
+
+/**
+ * DPDK callback for rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * RX queue number
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#ifdef HAVE_UPDATE_CQ_CI
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *cq = rxq_ctrl->cq;
+ uint16_t ci = rxq->cq_ci;
+ int ret = 0;
+
+ ibv_mlx5_exp_update_cq_ci(cq, ci);
+ ret = ibv_req_notify_cq(cq, 0);
+#else
+ int ret = -1;
+ (void)dev;
+ (void)rx_queue_id;
+#endif
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
+ return ret;
+}
+
+/**
+ * DPDK callback for rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * RX queue number
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#ifdef HAVE_UPDATE_CQ_CI
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *cq = rxq_ctrl->cq;
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret = 0;
+
+ ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != cq)
+ ret = -1;
+ else
+ ibv_ack_cq_events(cq, 1);
+#else
+ int ret = -1;
+ (void)dev;
+ (void)rx_queue_id;
+#endif
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ rx_queue_id);
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 909d80e6..8db8eb14 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -114,7 +114,8 @@ struct rxq {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
- unsigned int :9; /* Remaining bits. */
+ unsigned int mark:1; /* Marked flow available on the queue. */
+ unsigned int :8; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t rq_ci;
@@ -132,11 +133,9 @@ struct rxq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_exp_wq *wq; /* Work Queue. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
struct fdir_queue *fdir_queue; /* Flow director queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
- struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
- struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
+ struct ibv_comp_channel *channel;
unsigned int socket; /* CPU socket ID for allocations. */
struct rxq rxq; /* Data path structure. */
};
@@ -246,15 +245,24 @@ struct txq {
uint16_t elts_head; /* Current index in (*elts)[]. */
uint16_t elts_tail; /* First element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
+ uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+ uint16_t cq_pi; /* Producer index for completion queue. */
uint16_t wqe_ci; /* Consumer index for work queue. */
+ uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
+ uint16_t inline_en:1; /* When set inline is enabled. */
+ uint16_t tso_en:1; /* When set hardware TSO is enabled. */
+ uint16_t tunnel_en:1;
+ /* When set TX offload for tunneled packets are supported. */
+ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
+ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
- volatile struct mlx5_wqe64 (*wqes)[]; /* Work queue. */
+ volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
@@ -272,9 +280,6 @@ struct txq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
unsigned int socket; /* CPU socket ID for allocations. */
struct txq txq; /* Data path structure. */
};
@@ -293,6 +298,10 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
+int priv_intr_efd_enable(struct priv *priv);
+void priv_intr_efd_disable(struct priv *priv);
+int priv_create_intr_vec(struct priv *priv);
+void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
@@ -318,9 +327,14 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
+int mlx5_rx_descriptor_status(void *, uint16_t);
+int mlx5_tx_descriptor_status(void *, uint16_t);
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_mr.c */
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index f2b5781a..703f48c3 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -31,11 +31,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/sockios.h>
+#include <linux/ethtool.h>
+
/* DPDK headers don't like -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <rte_ethdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -44,6 +49,274 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
+struct mlx5_counter_ctrl {
+ /* Name of the counter. */
+ char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
+ /* Name of the counter on the device table. */
+ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+};
+
+static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
+ {
+ .dpdk_name = "rx_port_unicast_bytes",
+ .ctr_name = "rx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_bytes",
+ .ctr_name = "rx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_bytes",
+ .ctr_name = "rx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_unicast_packets",
+ .ctr_name = "rx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_packets",
+ .ctr_name = "rx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_packets",
+ .ctr_name = "rx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_bytes",
+ .ctr_name = "tx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_bytes",
+ .ctr_name = "tx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_bytes",
+ .ctr_name = "tx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_packets",
+ .ctr_name = "tx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_packets",
+ .ctr_name = "tx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_packets",
+ .ctr_name = "tx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "rx_wqe_err",
+ .ctr_name = "rx_wqe_err",
+ },
+ {
+ .dpdk_name = "rx_crc_errors_phy",
+ .ctr_name = "rx_crc_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_in_range_len_errors_phy",
+ .ctr_name = "rx_in_range_len_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_symbol_err_phy",
+ .ctr_name = "rx_symbol_err_phy",
+ },
+ {
+ .dpdk_name = "tx_errors_phy",
+ .ctr_name = "tx_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_out_of_buffer",
+ .ctr_name = "out_of_buffer",
+ },
+};
+
+static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
+
+/**
+ * Read device counters table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] stats
+ * Counters table output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative on error.
+ */
+static int
+priv_read_dev_counters(struct priv *priv, uint64_t *stats)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ struct ifreq ifr;
+ unsigned int stats_sz = (xstats_ctrl->stats_n * sizeof(uint64_t)) +
+ sizeof(struct ethtool_stats);
+ struct ethtool_stats et_stats[(stats_sz + (
+ sizeof(struct ethtool_stats) - 1)) /
+ sizeof(struct ethtool_stats)];
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = xstats_ctrl->stats_n;
+ ifr.ifr_data = (caddr_t)et_stats;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to read statistic values from device");
+ return -1;
+ }
+ for (i = 0; i != xstats_n; ++i) {
+ if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name))
+ priv_get_cntr_sysfs(priv,
+ mlx5_counters_init[i].ctr_name,
+ &stats[i]);
+ else
+ stats[i] = (uint64_t)
+ et_stats->data[xstats_ctrl->dev_table_idx[i]];
+ }
+ return 0;
+}
+
+/**
+ * Query the number of statistics provided by ETHTOOL.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Number of statistics on success, -1 on error.
+ */
+static int
+priv_ethtool_get_stats_n(struct priv *priv) {
+ struct ethtool_drvinfo drvinfo;
+ struct ifreq ifr;
+
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t)&drvinfo;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to query number of statistics");
+ return -1;
+ }
+ return drvinfo.n_stats;
+}
+
+/**
+ * Init the structures to read device counters.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_xstats_init(struct priv *priv)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int j;
+ struct ifreq ifr;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int dev_stats_n;
+ unsigned int str_sz;
+
+ dev_stats_n = priv_ethtool_get_stats_n(priv);
+ if (dev_stats_n < 1) {
+ WARN("no extended statistics available");
+ return;
+ }
+ xstats_ctrl->stats_n = dev_stats_n;
+ /* Allocate memory to grab stat names and values. */
+ str_sz = dev_stats_n * ETH_GSTRING_LEN;
+ strings = (struct ethtool_gstrings *)
+ rte_malloc("xstats_strings",
+ str_sz + sizeof(struct ethtool_gstrings), 0);
+ if (!strings) {
+ WARN("unable to allocate memory for xstats");
+ return;
+ }
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = dev_stats_n;
+ ifr.ifr_data = (caddr_t)strings;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic names");
+ goto free;
+ }
+ for (j = 0; j != xstats_n; ++j)
+ xstats_ctrl->dev_table_idx[j] = dev_stats_n;
+ for (i = 0; i != dev_stats_n; ++i) {
+ const char *curr_string = (const char *)
+ &strings->data[i * ETH_GSTRING_LEN];
+
+ for (j = 0; j != xstats_n; ++j) {
+ if (!strcmp(mlx5_counters_init[j].ctr_name,
+ curr_string)) {
+ xstats_ctrl->dev_table_idx[j] = i;
+ break;
+ }
+ }
+ }
+ for (j = 0; j != xstats_n; ++j) {
+ if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name))
+ continue;
+ if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
+ WARN("counter \"%s\" is not recognized",
+ mlx5_counters_init[j].dpdk_name);
+ goto free;
+ }
+ }
+ /* Copy to base at first time. */
+ assert(xstats_n <= MLX5_MAX_XSTATS);
+ priv_read_dev_counters(priv, xstats_ctrl->base);
+free:
+ rte_free(strings);
+}
+
+/**
+ * Get device extended statistics.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] stats
+ * Pointer to rte extended stats table.
+ *
+ * @return
+ * Number of extended stats on success and stats is filled,
+ * negative on error.
+ */
+static int
+priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+
+ if (priv_read_dev_counters(priv, counters) < 0)
+ return -1;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
+ return n;
+}
+
+/**
+ * Reset device extended statistics.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_xstats_reset(struct priv *priv)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+
+ if (priv_read_dev_counters(priv, counters) < 0)
+ return;
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
+}
+
/**
* DPDK callback to get device statistics.
*
@@ -142,3 +415,95 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#endif
priv_unlock(priv);
}
+
+/**
+ * DPDK callback to get extended device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats table output buffer.
+ * @param n
+ * The size of the stats table.
+ *
+ * @return
+ * Number of xstats on success, negative on failure.
+ */
+int
+mlx5_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ int ret = xstats_n;
+
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+
+ priv_lock(priv);
+ stats_n = priv_ethtool_get_stats_n(priv);
+ if (stats_n < 0)
+ return -1;
+ if (xstats_ctrl->stats_n != stats_n)
+ priv_xstats_init(priv);
+ ret = priv_xstats_get(priv, stats);
+ priv_unlock(priv);
+ }
+ return ret;
+}
+
+/**
+ * DPDK callback to clear device extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+
+ priv_lock(priv);
+ stats_n = priv_ethtool_get_stats_n(priv);
+ if (stats_n < 0)
+ return;
+ if (xstats_ctrl->stats_n != stats_n)
+ priv_xstats_init(priv);
+ priv_xstats_reset(priv);
+ priv_unlock(priv);
+}
+
+/**
+ * DPDK callback to retrieve names of extended device statistics
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ * Buffer to insert names into.
+ * @param n
+ * Number of names.
+ *
+ * @return
+ * Number of xstats names.
+ */
+int
+mlx5_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ unsigned int i;
+
+ if (n >= xstats_n && xstats_names) {
+ priv_lock(priv);
+ for (i = 0; i != xstats_n; ++i) {
+ strncpy(xstats_names[i].name,
+ mlx5_counters_init[i].dpdk_name,
+ RTE_ETH_XSTATS_NAME_SIZE);
+ xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
+ }
+ priv_unlock(priv);
+ }
+ return xstats_n;
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd88..8c5aa691 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -82,14 +82,33 @@ mlx5_dev_start(struct rte_eth_dev *dev)
ERROR("%p: an error occurred while configuring hash RX queues:"
" %s",
(void *)priv, strerror(err));
- /* Rollback. */
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
+ goto error;
}
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
priv_fdir_enable(priv);
+ err = priv_flow_start(priv);
+ if (err) {
+ priv->started = 0;
+ ERROR("%p: an error occurred while configuring flows:"
+ " %s",
+ (void *)priv, strerror(err));
+ goto error;
+ }
priv_dev_interrupt_handler_install(priv, dev);
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ err = priv_intr_efd_enable(priv);
+ if (!err)
+ err = priv_create_intr_vec(priv);
+ }
+ priv_xstats_init(priv);
+ priv_unlock(priv);
+ return 0;
+error:
+ /* Rollback. */
+ priv_special_flow_disable_all(priv);
+ priv_mac_addrs_disable(priv);
+ priv_destroy_hash_rxqs(priv);
+ priv_flow_stop(priv);
priv_unlock(priv);
return -err;
}
@@ -120,7 +139,12 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
+ priv_flow_stop(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
+ if (priv->dev->data->dev_conf.intr_conf.rxq) {
+ priv_destroy_intr_vec(priv);
+ priv_intr_efd_disable(priv);
+ }
priv->started = 0;
priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 439908fc..de7e28be 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -82,7 +82,9 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
- volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i];
+ volatile struct mlx5_wqe64 *wqe =
+ (volatile struct mlx5_wqe64 *)
+ txq_ctrl->txq.wqes + i;
memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
}
@@ -138,48 +140,14 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
void
txq_cleanup(struct txq_ctrl *txq_ctrl)
{
- struct ibv_exp_release_intf_params params;
size_t i;
DEBUG("cleaning up %p", (void *)txq_ctrl);
txq_free_elts(txq_ctrl);
- if (txq_ctrl->if_qp != NULL) {
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- assert(txq_ctrl->qp != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
- txq_ctrl->if_qp,
- &params));
- }
- if (txq_ctrl->if_cq != NULL) {
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- assert(txq_ctrl->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
- txq_ctrl->if_cq,
- &params));
- }
if (txq_ctrl->qp != NULL)
claim_zero(ibv_destroy_qp(txq_ctrl->qp));
if (txq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(txq_ctrl->cq));
- if (txq_ctrl->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx,
- txq_ctrl->rd,
- &attr));
- }
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
if (txq_ctrl->txq.mp2mr[i].mp == NULL)
break;
@@ -214,9 +182,7 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
}
tmpl->txq.cqe_n = log2above(ibcq->cqe);
tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
- tmpl->txq.wqes =
- (volatile struct mlx5_wqe64 (*)[])
- (uintptr_t)qp->gen_data.sqstart;
+ tmpl->txq.wqes = qp->gen_data.sqstart;
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
@@ -258,14 +224,15 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
.socket = socket,
};
union {
- struct ibv_exp_query_intf_params params;
struct ibv_exp_qp_init_attr init;
- struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_qp_attr mod;
struct ibv_exp_cq_attr cq_attr;
} attr;
- enum ibv_exp_query_intf_status status;
+ unsigned int cqe_n;
+ const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
int ret = 0;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
@@ -276,27 +243,18 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
(void)conf; /* Thresholds configuration (ignored). */
assert(desc > MLX5_TX_COMP_THRESH);
tmpl.txq.elts_n = log2above(desc);
+ if (priv->mps == MLX5_MPW_ENHANCED)
+ tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
+ .comp_mask = 0,
};
+ cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
+ ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
+ if (priv->mps == MLX5_MPW_ENHANCED)
+ cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_exp_create_cq(priv->ctx,
- (((desc / MLX5_TX_COMP_THRESH) - 1) ?
- ((desc / MLX5_TX_COMP_THRESH) - 1) : 1),
+ cqe_n,
NULL, NULL, 0, &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
@@ -332,17 +290,52 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
* TX burst. */
.sq_sig_all = 0,
.pd = priv->pd,
- .res_domain = tmpl.rd,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
+ .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
};
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
tmpl.txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- attr.init.cap.max_inline_data =
- tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
+ tmpl.txq.inline_en = 1;
+ /* TSO and MPS can't be enabled concurrently. */
+ assert(!priv->tso || !priv->mps);
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ tmpl.txq.inline_max_packet_sz =
+ priv->inline_max_packet_sz;
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ attr.init.cap.max_inline_data =
+ ((RTE_MIN(priv->txq_inline,
+ priv->inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else if (priv->tso) {
+ int inline_diff = tmpl.txq.max_inline - max_tso_inline;
+
+ /*
+ * Adjust inline value as Verbs aggregates
+ * tso_inline and txq_inline fields.
+ */
+ attr.init.cap.max_inline_data = inline_diff > 0 ?
+ inline_diff *
+ RTE_CACHE_LINE_SIZE :
+ 0;
+ } else {
+ attr.init.cap.max_inline_data =
+ tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
+ }
}
+ if (priv->tso) {
+ attr.init.max_tso_header =
+ max_tso_inline * RTE_CACHE_LINE_SIZE;
+ attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
+ tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
+ max_tso_inline);
+ tmpl.txq.tso_en = 1;
+ }
+ if (priv->tunnel_en)
+ tmpl.txq.tunnel_en = 1;
tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
@@ -391,36 +384,6 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
(void *)dev, strerror(ret));
goto error;
}
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ret = EINVAL;
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .intf_version = 1,
- .obj = tmpl.qp,
- /* Enable multi-packet send if supported. */
- .family_flags =
- ((priv->mps && !priv->sriov) ?
- IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
- 0),
- };
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ret = EINVAL;
- ERROR("%p: QP interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Clean up txq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
txq_cleanup(txq_ctrl);
@@ -496,6 +459,19 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->txqs)[idx] = NULL;
txq_cleanup(txq_ctrl);
+ /* Resize if txq size is changed. */
+ if (txq_ctrl->txq.elts_n != log2above(desc)) {
+ txq_ctrl = rte_realloc(txq_ctrl,
+ sizeof(*txq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
txq_ctrl =
rte_calloc_socket("TXQ", 1,
diff --git a/drivers/net/mpipe/Makefile b/drivers/net/mpipe/Makefile
deleted file mode 100644
index 846e2e07..00000000
--- a/drivers/net/mpipe/Makefile
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Copyright 2015 EZchip Semiconductor Ltd. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_mpipe.a
-
-CFLAGS += $(WERROR_FLAGS) -O3
-LDLIBS += -lgxio
-
-EXPORT_MAP := rte_pmd_mpipe_version.map
-
-LIBABIVER := 1
-
-SRCS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe_tilegx.c
-
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c
deleted file mode 100644
index fbbbb002..00000000
--- a/drivers/net/mpipe/mpipe_tilegx.c
+++ /dev/null
@@ -1,1655 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <unistd.h>
-
-#include <rte_eal.h>
-#include <rte_vdev.h>
-#include <rte_eal_memconfig.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-
-#include <arch/mpipe_xaui_def.h>
-#include <arch/mpipe_gbe_def.h>
-
-#include <gxio/mpipe.h>
-
-#ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
-#define PMD_DEBUG_RX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
-#define PMD_DEBUG_TX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
-#else
-#define PMD_DEBUG_RX(...)
-#define PMD_DEBUG_TX(...)
-#endif
-
-#define MPIPE_MAX_CHANNELS 128
-#define MPIPE_TX_MAX_QUEUES 128
-#define MPIPE_RX_MAX_QUEUES 16
-#define MPIPE_TX_DESCS 512
-#define MPIPE_RX_BUCKETS 256
-#define MPIPE_RX_STACK_SIZE 65536
-#define MPIPE_RX_IP_ALIGN 2
-#define MPIPE_BSM_ALIGN 128
-
-#define MPIPE_LINK_UPDATE_TIMEOUT 10 /* s */
-#define MPIPE_LINK_UPDATE_INTERVAL 100000 /* us */
-
-struct mpipe_channel_config {
- int enable;
- int first_bucket;
- int num_buckets;
- int head_room;
- gxio_mpipe_rules_stacks_t stacks;
-};
-
-struct mpipe_context {
- rte_spinlock_t lock;
- gxio_mpipe_context_t context;
- struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
-};
-
-/* Per-core local data. */
-struct mpipe_local {
- int mbuf_push_debt[RTE_MAX_ETHPORTS]; /* Buffer push debt. */
-} __rte_cache_aligned;
-
-#define MPIPE_BUF_DEBT_THRESHOLD 32
-static __thread struct mpipe_local mpipe_local;
-static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
-static int mpipe_instances;
-static const char *drivername = "MPIPE PMD";
-
-/* Per queue statistics. */
-struct mpipe_queue_stats {
- uint64_t packets, bytes, errors, nomem;
-};
-
-/* Common tx/rx queue fields. */
-struct mpipe_queue {
- struct mpipe_dev_priv *priv; /* "priv" data of its device. */
- uint16_t nb_desc; /* Number of tx descriptors. */
- uint16_t port_id; /* Device index. */
- uint16_t stat_idx; /* Queue stats index. */
- uint8_t queue_idx; /* Queue index. */
- uint8_t link_status; /* 0 = link down. */
- struct mpipe_queue_stats stats; /* Stat data for the queue. */
-};
-
-/* Transmit queue description. */
-struct mpipe_tx_queue {
- struct mpipe_queue q; /* Common stuff. */
-};
-
-/* Receive queue description. */
-struct mpipe_rx_queue {
- struct mpipe_queue q; /* Common stuff. */
- gxio_mpipe_iqueue_t iqueue; /* mPIPE iqueue. */
- gxio_mpipe_idesc_t *next_desc; /* Next idesc to process. */
- int avail_descs; /* Number of available descs. */
- void *rx_ring_mem; /* DMA ring memory. */
-};
-
-struct mpipe_dev_priv {
- gxio_mpipe_context_t *context; /* mPIPE context. */
- gxio_mpipe_link_t link; /* mPIPE link for the device. */
- gxio_mpipe_equeue_t equeue; /* mPIPE equeue. */
- unsigned equeue_size; /* mPIPE equeue desc count. */
- int instance; /* mPIPE instance. */
- int ering; /* mPIPE eDMA ring. */
- int stack; /* mPIPE buffer stack. */
- int channel; /* Device channel. */
- int port_id; /* DPDK port index. */
- struct rte_eth_dev *eth_dev; /* DPDK device. */
- struct rte_mbuf **tx_comps; /* TX completion array. */
- struct rte_mempool *rx_mpool; /* mpool used by the rx queues. */
- unsigned rx_offset; /* Receive head room. */
- unsigned rx_size_code; /* mPIPE rx buffer size code. */
- int is_xaui:1, /* Is this an xgbe or gbe? */
- initialized:1, /* Initialized port? */
- running:1; /* Running port? */
- struct ether_addr mac_addr; /* MAC address. */
- unsigned nb_rx_queues; /* Configured tx queues. */
- unsigned nb_tx_queues; /* Configured rx queues. */
- int first_bucket; /* mPIPE bucket start index. */
- int first_ring; /* mPIPE notif ring start index. */
- int notif_group; /* mPIPE notif group. */
- rte_atomic32_t dp_count __rte_cache_aligned; /* DP Entry count. */
- int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
- int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
-};
-
-#define mpipe_priv(dev) \
- ((struct mpipe_dev_priv*)(dev)->data->dev_private)
-
-#define mpipe_name(priv) \
- ((priv)->eth_dev->data->name)
-
-#define mpipe_rx_queue(priv, n) \
- ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
-
-#define mpipe_tx_queue(priv, n) \
- ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
-
-static void
-mpipe_xmit_flush(struct mpipe_dev_priv *priv);
-
-static void
-mpipe_recv_flush(struct mpipe_dev_priv *priv);
-
-static int mpipe_equeue_sizes[] = {
- [GXIO_MPIPE_EQUEUE_ENTRY_512] = 512,
- [GXIO_MPIPE_EQUEUE_ENTRY_2K] = 2048,
- [GXIO_MPIPE_EQUEUE_ENTRY_8K] = 8192,
- [GXIO_MPIPE_EQUEUE_ENTRY_64K] = 65536,
-};
-
-static int mpipe_iqueue_sizes[] = {
- [GXIO_MPIPE_IQUEUE_ENTRY_128] = 128,
- [GXIO_MPIPE_IQUEUE_ENTRY_512] = 512,
- [GXIO_MPIPE_IQUEUE_ENTRY_2K] = 2048,
- [GXIO_MPIPE_IQUEUE_ENTRY_64K] = 65536,
-};
-
-static int mpipe_buffer_sizes[] = {
- [GXIO_MPIPE_BUFFER_SIZE_128] = 128,
- [GXIO_MPIPE_BUFFER_SIZE_256] = 256,
- [GXIO_MPIPE_BUFFER_SIZE_512] = 512,
- [GXIO_MPIPE_BUFFER_SIZE_1024] = 1024,
- [GXIO_MPIPE_BUFFER_SIZE_1664] = 1664,
- [GXIO_MPIPE_BUFFER_SIZE_4096] = 4096,
- [GXIO_MPIPE_BUFFER_SIZE_10368] = 10368,
- [GXIO_MPIPE_BUFFER_SIZE_16384] = 16384,
-};
-
-static gxio_mpipe_context_t *
-mpipe_context(int instance)
-{
- if (instance < 0 || instance >= mpipe_instances)
- return NULL;
- return &mpipe_contexts[instance].context;
-}
-
-static int mpipe_channel_config(int instance, int channel,
- struct mpipe_channel_config *config)
-{
- struct mpipe_channel_config *data;
- struct mpipe_context *context;
- gxio_mpipe_rules_t rules;
- int idx, rc = 0;
-
- if (instance < 0 || instance >= mpipe_instances ||
- channel < 0 || channel >= MPIPE_MAX_CHANNELS)
- return -EINVAL;
-
- context = &mpipe_contexts[instance];
-
- rte_spinlock_lock(&context->lock);
-
- gxio_mpipe_rules_init(&rules, &context->context);
-
- for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
- data = (channel == idx) ? config : &context->channels[idx];
-
- if (!data->enable)
- continue;
-
- rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
- data->num_buckets, &data->stacks);
- if (rc < 0) {
- goto done;
- }
-
- rc = gxio_mpipe_rules_add_channel(&rules, idx);
- if (rc < 0) {
- goto done;
- }
-
- rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
- if (rc < 0) {
- goto done;
- }
- }
-
- rc = gxio_mpipe_rules_commit(&rules);
- if (rc == 0) {
- memcpy(&context->channels[channel], config, sizeof(*config));
- }
-
-done:
- rte_spinlock_unlock(&context->lock);
-
- return rc;
-}
-
-static int
-mpipe_get_size_index(int *array, int count, int size,
- bool roundup)
-{
- int i, last = -1;
-
- for (i = 0; i < count && array[i] < size; i++) {
- if (array[i])
- last = i;
- }
-
- if (roundup)
- return i < count ? (int)i : -ENOENT;
- else
- return last >= 0 ? last : -ENOENT;
-}
-
-static int
-mpipe_calc_size(int *array, int count, int size)
-{
- int index = mpipe_get_size_index(array, count, size, 1);
- return index < 0 ? index : array[index];
-}
-
-static int mpipe_equeue_size(int size)
-{
- int result;
- result = mpipe_calc_size(mpipe_equeue_sizes,
- RTE_DIM(mpipe_equeue_sizes), size);
- return result;
-}
-
-static int mpipe_iqueue_size(int size)
-{
- int result;
- result = mpipe_calc_size(mpipe_iqueue_sizes,
- RTE_DIM(mpipe_iqueue_sizes), size);
- return result;
-}
-
-static int mpipe_buffer_size_index(int size)
-{
- int result;
- result = mpipe_get_size_index(mpipe_buffer_sizes,
- RTE_DIM(mpipe_buffer_sizes), size, 0);
- return result;
-}
-
-static inline int
-mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static void
-mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
- struct rte_eth_dev_info *dev_info)
-{
- dev_info->min_rx_bufsize = 128;
- dev_info->max_rx_pktlen = 1518;
- dev_info->max_tx_queues = MPIPE_TX_MAX_QUEUES;
- dev_info->max_rx_queues = MPIPE_RX_MAX_QUEUES;
- dev_info->max_mac_addrs = 1;
- dev_info->rx_offload_capa = 0;
- dev_info->tx_offload_capa = 0;
-}
-
-static int
-mpipe_configure(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
-
- if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
- RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
- mpipe_name(priv), dev->data->nb_tx_queues,
- MPIPE_TX_MAX_QUEUES);
- return -EINVAL;
- }
- priv->nb_tx_queues = dev->data->nb_tx_queues;
-
- if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
- RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
- mpipe_name(priv), dev->data->nb_rx_queues,
- MPIPE_RX_MAX_QUEUES);
- }
- priv->nb_rx_queues = dev->data->nb_rx_queues;
-
- return 0;
-}
-
-static inline int
-mpipe_link_compare(struct rte_eth_link *link1,
- struct rte_eth_link *link2)
-{
- return (*(uint64_t *)link1 == *(uint64_t *)link2)
- ? -1 : 0;
-}
-
-static int
-mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct rte_eth_link old, new;
- int64_t state, speed;
- int count, rc;
-
- memset(&old, 0, sizeof(old));
- memset(&new, 0, sizeof(new));
- mpipe_dev_atomic_read_link_status(dev, &old);
-
- for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
- if (!priv->initialized)
- break;
-
- state = gxio_mpipe_link_get_attr(&priv->link,
- GXIO_MPIPE_LINK_CURRENT_STATE);
- if (state < 0)
- break;
-
- speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
-
- new.link_autoneg = (dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_AUTONEG);
- if (speed == GXIO_MPIPE_LINK_1G) {
- new.link_speed = ETH_SPEED_NUM_1G;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = ETH_LINK_UP;
- } else if (speed == GXIO_MPIPE_LINK_10G) {
- new.link_speed = ETH_SPEED_NUM_10G;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = ETH_LINK_UP;
- }
-
- rc = mpipe_link_compare(&old, &new);
- if (rc == 0 || !wait_to_complete)
- break;
-
- rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
- }
-
- mpipe_dev_atomic_write_link_status(dev, &new);
- return rc;
-}
-
-static int
-mpipe_set_link(struct rte_eth_dev *dev, int up)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int rc;
-
- rc = gxio_mpipe_link_set_attr(&priv->link,
- GXIO_MPIPE_LINK_DESIRED_STATE,
- up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
- mpipe_name(priv), up ? "up" : "down");
- } else {
- mpipe_link_update(dev, 0);
- }
-
- return rc;
-}
-
-static int
-mpipe_set_link_up(struct rte_eth_dev *dev)
-{
- return mpipe_set_link(dev, 1);
-}
-
-static int
-mpipe_set_link_down(struct rte_eth_dev *dev)
-{
- return mpipe_set_link(dev, 0);
-}
-
-static inline void
-mpipe_dp_enter(struct mpipe_dev_priv *priv)
-{
- __insn_mtspr(SPR_DSTREAM_PF, 0);
- rte_atomic32_inc(&priv->dp_count);
-}
-
-static inline void
-mpipe_dp_exit(struct mpipe_dev_priv *priv)
-{
- rte_atomic32_dec(&priv->dp_count);
-}
-
-static inline void
-mpipe_dp_wait(struct mpipe_dev_priv *priv)
-{
- while (rte_atomic32_read(&priv->dp_count) != 0) {
- rte_pause();
- }
-}
-
-static inline int
-mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
-{
- return (mbuf->port < RTE_MAX_ETHPORTS) ?
- mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
- priv->stack;
-}
-
-static inline struct rte_mbuf *
-mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
- int in_port)
-{
- void *va = gxio_mpipe_idesc_get_va(idesc);
- uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
- struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
-
- rte_pktmbuf_reset(mbuf);
- mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
- mbuf->port = in_port;
- mbuf->data_len = size;
- mbuf->pkt_len = size;
- mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
-
- PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
- mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
-
- return mbuf;
-}
-
-static inline void
-mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
-{
- const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
- void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
-
- gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
- PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
- mpipe_name(priv), mbuf, buf_addr, priv->stack);
-}
-
-static inline void
-mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
-{
- struct rte_mbuf *mbuf;
- int i;
-
- for (i = 0; i < count; i++) {
- mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
- if (!mbuf)
- break;
- mpipe_recv_push(priv, mbuf);
- }
-
- PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
-}
-
-static inline void
-mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
-{
- const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
- uint8_t in_port = priv->port_id;
- struct rte_mbuf *mbuf;
- void *va;
-
- while (1) {
- va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
- if (!va)
- break;
- mbuf = RTE_PTR_SUB(va, offset);
-
- PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
- mpipe_name(priv), mbuf, va);
-
- mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
- mbuf->refcnt = 1;
- mbuf->nb_segs = 1;
- mbuf->port = in_port;
- mbuf->packet_type = 0;
- mbuf->data_len = 0;
- mbuf->pkt_len = 0;
-
- __rte_mbuf_raw_free(mbuf);
- }
-}
-
-static void
-mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
-{
- size_t size = ms->hugepage_sz;
- uint8_t *addr, *end;
- int rc;
-
- for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
- rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
- size, 0);
- if (rc < 0)
- break;
- }
-
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
- mpipe_name(priv), ms->addr, rc);
- } else {
- RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
- mpipe_name(priv), ms->addr,
- RTE_PTR_ADD(ms->addr, ms->len - 1));
- }
-}
-
-static int
-mpipe_recv_init(struct mpipe_dev_priv *priv)
-{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
- size_t stack_size;
- void *stack_mem;
- int rc;
-
- if (!priv->rx_mpool) {
- RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
- mpipe_name(priv));
- return -ENODEV;
- }
-
- /* Allocate one NotifRing for each queue. */
- rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
- 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->first_ring = rc;
-
- /* Allocate a NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->notif_group = rc;
-
- /* Allocate required buckets. */
- rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->first_bucket = rc;
-
- rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->stack = rc;
-
- while (seg && seg->addr)
- mpipe_register_segment(priv, seg++);
-
- stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
- stack_mem = rte_zmalloc(NULL, stack_size, 65536);
- if (!stack_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
- mpipe_name(priv), stack_mem,
- RTE_PTR_ADD(stack_mem, stack_size - 1));
- }
-
- rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
- priv->rx_size_code, stack_mem,
- stack_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static int
-mpipe_xmit_init(struct mpipe_dev_priv *priv)
-{
- size_t ring_size;
- void *ring_mem;
- int rc;
-
- /* Allocate eDMA ring. */
- rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->ering = rc;
-
- rc = mpipe_equeue_size(MPIPE_TX_DESCS);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
- mpipe_name(priv), (int)MPIPE_TX_DESCS);
- return -ENOMEM;
- }
- priv->equeue_size = rc;
-
- /* Initialize completion array. */
- ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
- priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
- if (!priv->tx_comps) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- /* Allocate eDMA ring memory. */
- ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
- ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
- if (!ring_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
- mpipe_name(priv), ring_mem,
- RTE_PTR_ADD(ring_mem, ring_size - 1));
- }
-
- /* Initialize eDMA ring. */
- rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
- priv->channel, ring_mem, ring_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static int
-mpipe_link_init(struct mpipe_dev_priv *priv)
-{
- int rc;
-
- /* Open the link. */
- rc = gxio_mpipe_link_open(&priv->link, priv->context,
- mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Get the channel index. */
- rc = gxio_mpipe_link_channel(&priv->link);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Bad channel\n",
- mpipe_name(priv));
- return rc;
- }
- priv->channel = rc;
-
- return 0;
-}
-
-static int
-mpipe_init(struct mpipe_dev_priv *priv)
-{
- int rc;
-
- if (priv->initialized)
- return 0;
-
- rc = mpipe_recv_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
- mpipe_name(priv));
- return rc;
- }
-
- rc = mpipe_xmit_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
- mpipe_name(priv));
- rte_free(priv);
- return rc;
- }
-
- priv->initialized = 1;
-
- return 0;
-}
-
-static int
-mpipe_start(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_channel_config config;
- struct mpipe_rx_queue *rx_queue;
- struct rte_eth_link eth_link;
- unsigned queue, buffers = 0;
- size_t ring_size;
- void *ring_mem;
- int rc;
-
- memset(&eth_link, 0, sizeof(eth_link));
- mpipe_dev_atomic_write_link_status(dev, &eth_link);
-
- rc = mpipe_init(priv);
- if (rc < 0)
- return rc;
-
- /* Initialize NotifRings. */
- for (queue = 0; queue < priv->nb_rx_queues; queue++) {
- rx_queue = mpipe_rx_queue(priv, queue);
- ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
-
- ring_mem = rte_malloc(NULL, ring_size, ring_size);
- if (!ring_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
- mpipe_name(priv), queue, ring_mem,
- RTE_PTR_ADD(ring_mem, ring_size - 1));
- }
-
- rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
- priv->first_ring + queue, ring_mem,
- ring_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
- mpipe_name(priv));
- return rc;
- }
-
- rx_queue->rx_ring_mem = ring_mem;
- buffers += rx_queue->q.nb_desc;
- }
-
- /* Initialize ingress NotifGroup and buckets. */
- rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
- priv->notif_group, priv->first_ring, priv->nb_rx_queues,
- priv->first_bucket, MPIPE_RX_BUCKETS,
- GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Configure the classifier to deliver packets from this port. */
- config.enable = 1;
- config.first_bucket = priv->first_bucket;
- config.num_buckets = MPIPE_RX_BUCKETS;
- memset(&config.stacks, 0xff, sizeof(config.stacks));
- config.stacks.stacks[priv->rx_size_code] = priv->stack;
- config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
-
- rc = mpipe_channel_config(priv->instance, priv->channel,
- &config);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Fill empty buffers into the buffer stack. */
- mpipe_recv_fill_stack(priv, buffers);
-
- /* Bring up the link. */
- mpipe_set_link_up(dev);
-
- /* Start xmit/recv on queues. */
- for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
- for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
- priv->running = 1;
-
- return 0;
-}
-
-static void
-mpipe_stop(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_channel_config config;
- unsigned queue;
- int rc;
-
- for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
- for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
-
- /* Make sure the link_status writes land. */
- rte_wmb();
-
- /*
- * Wait for link_status change to register with straggling datapath
- * threads.
- */
- mpipe_dp_wait(priv);
-
- /* Bring down the link. */
- mpipe_set_link_down(dev);
-
- /* Remove classifier rules. */
- memset(&config, 0, sizeof(config));
- rc = mpipe_channel_config(priv->instance, priv->channel,
- &config);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
- mpipe_name(priv));
- }
-
- /* Flush completed xmit packets. */
- mpipe_xmit_flush(priv);
-
- /* Flush buffer stacks. */
- mpipe_recv_flush(priv);
-
- priv->running = 0;
-}
-
-static void
-mpipe_close(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- if (priv->running)
- mpipe_stop(dev);
-}
-
-static void
-mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_tx_queue *tx_queue;
- struct mpipe_rx_queue *rx_queue;
- unsigned i;
- uint16_t idx;
-
- memset(stats, 0, sizeof(*stats));
-
- for (i = 0; i < priv->nb_tx_queues; i++) {
- tx_queue = mpipe_tx_queue(priv, i);
-
- stats->opackets += tx_queue->q.stats.packets;
- stats->obytes += tx_queue->q.stats.bytes;
- stats->oerrors += tx_queue->q.stats.errors;
-
- idx = tx_queue->q.stat_idx;
- if (idx != (uint16_t)-1) {
- stats->q_opackets[idx] += tx_queue->q.stats.packets;
- stats->q_obytes[idx] += tx_queue->q.stats.bytes;
- stats->q_errors[idx] += tx_queue->q.stats.errors;
- }
- }
-
- for (i = 0; i < priv->nb_rx_queues; i++) {
- rx_queue = mpipe_rx_queue(priv, i);
-
- stats->ipackets += rx_queue->q.stats.packets;
- stats->ibytes += rx_queue->q.stats.bytes;
- stats->ierrors += rx_queue->q.stats.errors;
- stats->rx_nombuf += rx_queue->q.stats.nomem;
-
- idx = rx_queue->q.stat_idx;
- if (idx != (uint16_t)-1) {
- stats->q_ipackets[idx] += rx_queue->q.stats.packets;
- stats->q_ibytes[idx] += rx_queue->q.stats.bytes;
- stats->q_errors[idx] += rx_queue->q.stats.errors;
- }
- }
-}
-
-static void
-mpipe_stats_reset(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_tx_queue *tx_queue;
- struct mpipe_rx_queue *rx_queue;
- unsigned i;
-
- for (i = 0; i < priv->nb_tx_queues; i++) {
- tx_queue = mpipe_tx_queue(priv, i);
- memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
- }
-
- for (i = 0; i < priv->nb_rx_queues; i++) {
- rx_queue = mpipe_rx_queue(priv, i);
- memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
- }
-}
-
-static int
-mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
- uint8_t stat_idx, uint8_t is_rx)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
-
- if (is_rx) {
- priv->rx_stat_mapping[stat_idx] = queue_id;
- } else {
- priv->tx_stat_mapping[stat_idx] = queue_id;
- }
-
- return 0;
-}
-
-static int
-mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t nb_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- uint16_t idx;
-
- tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
- RTE_CACHE_LINE_SIZE);
- if (!tx_queue) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- memset(&tx_queue->q, 0, sizeof(tx_queue->q));
- tx_queue->q.priv = priv;
- tx_queue->q.queue_idx = queue_idx;
- tx_queue->q.port_id = dev->data->port_id;
- tx_queue->q.nb_desc = nb_desc;
-
- tx_queue->q.stat_idx = -1;
- for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
- if (priv->tx_stat_mapping[idx] == queue_idx)
- tx_queue->q.stat_idx = idx;
- }
-
- dev->data->tx_queues[queue_idx] = tx_queue;
-
- return 0;
-}
-
-static void
-mpipe_tx_queue_release(void *_txq)
-{
- rte_free(_txq);
-}
-
-static int
-mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t nb_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mp)
-{
- struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- uint16_t idx;
- int size, rc;
-
- rc = mpipe_iqueue_size(nb_desc);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
- mpipe_name(priv), (int)nb_desc);
- return -ENOMEM;
- }
-
- if (rc != nb_desc) {
- RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
- mpipe_name(priv), (int)nb_desc, rc);
- nb_desc = rc;
- }
-
- size = sizeof(*rx_queue);
- rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
- if (!rx_queue) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- memset(&rx_queue->q, 0, sizeof(rx_queue->q));
- rx_queue->q.priv = priv;
- rx_queue->q.nb_desc = nb_desc;
- rx_queue->q.port_id = dev->data->port_id;
- rx_queue->q.queue_idx = queue_idx;
-
- if (!priv->rx_mpool) {
- int size = (rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM -
- MPIPE_RX_IP_ALIGN);
-
- priv->rx_offset = (sizeof(struct rte_mbuf) +
- rte_pktmbuf_priv_size(mp) +
- RTE_PKTMBUF_HEADROOM +
- MPIPE_RX_IP_ALIGN);
- if (size < 0) {
- RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
- mpipe_name(priv),
- rte_pktmbuf_data_room_size(mp));
- return -ENOMEM;
- }
-
- priv->rx_size_code = mpipe_buffer_size_index(size);
- priv->rx_mpool = mp;
- }
-
- if (priv->rx_mpool != mp) {
- RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
- mpipe_name(priv));
- }
-
- rx_queue->q.stat_idx = -1;
- for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
- if (priv->rx_stat_mapping[idx] == queue_idx)
- rx_queue->q.stat_idx = idx;
- }
-
- dev->data->rx_queues[queue_idx] = rx_queue;
-
- return 0;
-}
-
-static void
-mpipe_rx_queue_release(void *_rxq)
-{
- rte_free(_rxq);
-}
-
-#define MPIPE_XGBE_ENA_HASH_MULTI \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
-#define MPIPE_XGBE_ENA_HASH_UNI \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
-#define MPIPE_XGBE_COPY_ALL \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
-#define MPIPE_GBE_ENA_MULTI_HASH \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
-#define MPIPE_GBE_ENA_UNI_HASH \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
-#define MPIPE_GBE_COPY_ALL \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
-
-static void
-mpipe_promiscuous_enable(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int64_t reg;
- int addr;
-
- if (priv->is_xaui) {
- addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
- reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
- reg |= MPIPE_XGBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- } else {
- addr = MPIPE_GBE_NETWORK_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
- reg &= ~MPIPE_GBE_ENA_UNI_HASH;
- reg |= MPIPE_GBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- }
-}
-
-static void
-mpipe_promiscuous_disable(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int64_t reg;
- int addr;
-
- if (priv->is_xaui) {
- addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg |= MPIPE_XGBE_ENA_HASH_MULTI;
- reg |= MPIPE_XGBE_ENA_HASH_UNI;
- reg &= ~MPIPE_XGBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- } else {
- addr = MPIPE_GBE_NETWORK_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg |= MPIPE_GBE_ENA_MULTI_HASH;
- reg |= MPIPE_GBE_ENA_UNI_HASH;
- reg &= ~MPIPE_GBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- }
-}
-
-static const struct eth_dev_ops mpipe_dev_ops = {
- .dev_infos_get = mpipe_infos_get,
- .dev_configure = mpipe_configure,
- .dev_start = mpipe_start,
- .dev_stop = mpipe_stop,
- .dev_close = mpipe_close,
- .stats_get = mpipe_stats_get,
- .stats_reset = mpipe_stats_reset,
- .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
- .tx_queue_setup = mpipe_tx_queue_setup,
- .rx_queue_setup = mpipe_rx_queue_setup,
- .tx_queue_release = mpipe_tx_queue_release,
- .rx_queue_release = mpipe_rx_queue_release,
- .link_update = mpipe_link_update,
- .dev_set_link_up = mpipe_set_link_up,
- .dev_set_link_down = mpipe_set_link_down,
- .promiscuous_enable = mpipe_promiscuous_enable,
- .promiscuous_disable = mpipe_promiscuous_disable,
-};
-
-static inline void
-mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
-{
- gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- int64_t slot;
-
- for (slot = start; slot < end; slot++) {
- gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
- }
-}
-
-static void
-mpipe_xmit_flush(struct mpipe_dev_priv *priv)
-{
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- int64_t slot;
-
- /* Post a dummy descriptor and wait for its return. */
- slot = gxio_mpipe_equeue_reserve(equeue, 1);
- if (slot < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
- mpipe_name(priv));
- return;
- }
-
- mpipe_xmit_null(priv, slot, slot + 1);
-
- while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
- rte_pause();
- }
-
- for (slot = 0; slot < priv->equeue_size; slot++) {
- if (priv->tx_comps[slot])
- rte_pktmbuf_free_seg(priv->tx_comps[slot]);
- }
-}
-
-static void
-mpipe_recv_flush(struct mpipe_dev_priv *priv)
-{
- uint8_t in_port = priv->port_id;
- struct mpipe_rx_queue *rx_queue;
- gxio_mpipe_iqueue_t *iqueue;
- gxio_mpipe_idesc_t idesc;
- struct rte_mbuf *mbuf;
- unsigned queue;
-
- /* Release packets on the buffer stack. */
- mpipe_recv_flush_stack(priv);
-
- /* Flush packets sitting in recv queues. */
- for (queue = 0; queue < priv->nb_rx_queues; queue++) {
- rx_queue = mpipe_rx_queue(priv, queue);
- iqueue = &rx_queue->iqueue;
- while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
- /* Skip idesc with the 'buffer error' bit set. */
- if (idesc.be)
- continue;
- mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
- rte_pktmbuf_free(mbuf);
- }
- rte_free(rx_queue->rx_ring_mem);
- }
-}
-
-static inline uint16_t
-mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- struct mpipe_dev_priv *priv = tx_queue->q.priv;
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- unsigned nb_bytes = 0;
- unsigned nb_sent = 0;
- int nb_slots, i;
- uint8_t port_id;
-
- PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
- nb_pkts, mpipe_name(tx_queue->q.priv),
- tx_queue->q.queue_idx);
-
- /* Optimistic assumption that we need exactly one slot per packet. */
- nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
-
- do {
- struct rte_mbuf *mbuf = NULL, *pkt = NULL;
- int64_t slot;
-
- /* Reserve eDMA ring slots. */
- slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
- if (unlikely(slot < 0)) {
- break;
- }
-
- for (i = 0; i < nb_slots; i++) {
- unsigned idx = (slot + i) & (priv->equeue_size - 1);
- rte_prefetch0(priv->tx_comps[idx]);
- }
-
- /* Fill up slots with descriptor and completion info. */
- for (i = 0; i < nb_slots; i++) {
- unsigned idx = (slot + i) & (priv->equeue_size - 1);
- gxio_mpipe_edesc_t desc;
- struct rte_mbuf *next;
-
- /* Starting on a new packet? */
- if (likely(!mbuf)) {
- int room = nb_slots - i;
-
- pkt = mbuf = tx_pkts[nb_sent];
-
- /* Bail out if we run out of descs. */
- if (unlikely(pkt->nb_segs > room))
- break;
-
- nb_sent++;
- }
-
- /* We have a segment to send. */
- next = mbuf->next;
-
- if (priv->tx_comps[idx])
- rte_pktmbuf_free_seg(priv->tx_comps[idx]);
-
- port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
- mbuf->port : priv->port_id;
- desc = (gxio_mpipe_edesc_t) { {
- .va = rte_pktmbuf_mtod(mbuf, uintptr_t),
- .xfer_size = rte_pktmbuf_data_len(mbuf),
- .bound = next ? 0 : 1,
- .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
- .size = priv->rx_size_code,
- } };
- if (mpipe_local.mbuf_push_debt[port_id] > 0) {
- mpipe_local.mbuf_push_debt[port_id]--;
- desc.hwb = 1;
- priv->tx_comps[idx] = NULL;
- } else
- priv->tx_comps[idx] = mbuf;
-
- nb_bytes += mbuf->data_len;
- gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
-
- PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
- mpipe_name(priv),
- tx_queue->q.queue_idx,
- rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_data_len(mbuf));
-
- mbuf = next;
- }
-
- if (unlikely(nb_sent < nb_pkts)) {
-
- /* Fill remaining slots with null descriptors. */
- mpipe_xmit_null(priv, slot + i, slot + nb_slots);
-
- /*
- * Calculate exact number of descriptors needed for
- * the next go around.
- */
- nb_slots = 0;
- for (i = nb_sent; i < nb_pkts; i++) {
- nb_slots += tx_pkts[i]->nb_segs;
- }
-
- nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
- }
- } while (nb_sent < nb_pkts);
-
- tx_queue->q.stats.packets += nb_sent;
- tx_queue->q.stats.bytes += nb_bytes;
-
- return nb_sent;
-}
-
-static inline uint16_t
-mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct mpipe_dev_priv *priv = rx_queue->q.priv;
- gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
- gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
- uint8_t in_port = rx_queue->q.port_id;
- const unsigned look_ahead = 8;
- int room = nb_pkts, rc = 0;
- unsigned nb_packets = 0;
- unsigned nb_dropped = 0;
- unsigned nb_nomem = 0;
- unsigned nb_bytes = 0;
- unsigned nb_descs, i;
-
- while (room && !rc) {
- if (rx_queue->avail_descs < room) {
- rc = gxio_mpipe_iqueue_try_peek(iqueue,
- &rx_queue->next_desc);
- rx_queue->avail_descs = rc < 0 ? 0 : rc;
- }
-
- if (unlikely(!rx_queue->avail_descs)) {
- break;
- }
-
- nb_descs = RTE_MIN(room, rx_queue->avail_descs);
-
- first_idesc = rx_queue->next_desc;
- last_idesc = first_idesc + nb_descs;
-
- rx_queue->next_desc += nb_descs;
- rx_queue->avail_descs -= nb_descs;
-
- for (i = 1; i < look_ahead; i++) {
- rte_prefetch0(first_idesc + i);
- }
-
- PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx,
- nb_descs);
-
- for (idesc = first_idesc; idesc < last_idesc; idesc++) {
- struct rte_mbuf *mbuf;
-
- PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
- mpipe_name(priv),
- rx_queue->q.queue_idx,
- nb_packets, nb_descs);
-
- rte_prefetch0(idesc + look_ahead);
-
- PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
- "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
- mpipe_name(priv),
- rx_queue->q.queue_idx,
- idesc,
- idesc->me ? "me, " : "",
- idesc->tr ? "tr, " : "",
- idesc->ce ? "ce, " : "",
- idesc->ct ? "ct, " : "",
- idesc->cs ? "cs, " : "",
- idesc->nr ? "nr, " : "",
- idesc->sq ? "sq, " : "",
- idesc->ts ? "ts, " : "",
- idesc->ps ? "ps, " : "",
- idesc->be ? "be, " : "",
- idesc->l2_size,
- idesc->bucket_id,
- idesc->channel,
- idesc->notif_ring,
- (unsigned long)idesc->packet_sqn,
- (unsigned long)idesc->va);
-
- if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
- nb_dropped++;
- gxio_mpipe_iqueue_drop(iqueue, idesc);
- PMD_DEBUG_RX("%s:%d: Descriptor error\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx);
- continue;
- }
-
- if (mpipe_local.mbuf_push_debt[in_port] <
- MPIPE_BUF_DEBT_THRESHOLD)
- mpipe_local.mbuf_push_debt[in_port]++;
- else {
- mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
- if (unlikely(!mbuf)) {
- nb_nomem++;
- gxio_mpipe_iqueue_drop(iqueue, idesc);
- PMD_DEBUG_RX("%s:%d: alloc failure\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx);
- continue;
- }
-
- mpipe_recv_push(priv, mbuf);
- }
-
- /* Get and setup the mbuf for the received packet. */
- mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
-
- /* Update results and statistics counters. */
- rx_pkts[nb_packets] = mbuf;
- nb_bytes += mbuf->pkt_len;
- nb_packets++;
- }
-
- /*
- * We release the ring in bursts, but do not track and release
- * buckets. This therefore breaks dynamic flow affinity, but
- * we always operate in static affinity mode, and so we're OK
- * with this optimization.
- */
- gxio_mpipe_iqueue_advance(iqueue, nb_descs);
- gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
-
- /*
- * Go around once more if we haven't yet peeked the queue, and
- * if we have more room to receive.
- */
- room = nb_pkts - nb_packets;
- }
-
- rx_queue->q.stats.packets += nb_packets;
- rx_queue->q.stats.bytes += nb_bytes;
- rx_queue->q.stats.errors += nb_dropped;
- rx_queue->q.stats.nomem += nb_nomem;
-
- PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
- mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
- nb_packets, nb_bytes, nb_dropped, nb_nomem);
-
- return nb_packets;
-}
-
-static uint16_t
-mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- struct mpipe_rx_queue *rx_queue = _rxq;
- uint16_t result = 0;
-
- if (rx_queue) {
- mpipe_dp_enter(rx_queue->q.priv);
- if (likely(rx_queue->q.link_status))
- result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
- mpipe_dp_exit(rx_queue->q.priv);
- }
-
- return result;
-}
-
-static uint16_t
-mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct mpipe_tx_queue *tx_queue = _txq;
- uint16_t result = 0;
-
- if (tx_queue) {
- mpipe_dp_enter(tx_queue->q.priv);
- if (likely(tx_queue->q.link_status))
- result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
- mpipe_dp_exit(tx_queue->q.priv);
- }
-
- return result;
-}
-
-static int
-mpipe_link_mac(const char *ifname, uint8_t *mac)
-{
- int rc, idx;
- char name[GXIO_MPIPE_LINK_NAME_LEN];
-
- for (idx = 0, rc = 0; !rc; idx++) {
- rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
- if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
- return 0;
- }
- return -ENODEV;
-}
-
-static int
-rte_pmd_mpipe_probe(const char *ifname,
- const char *params __rte_unused)
-{
- gxio_mpipe_context_t *context;
- struct rte_eth_dev *eth_dev;
- struct mpipe_dev_priv *priv;
- int instance, rc;
- uint8_t *mac;
-
- /* Get the mPIPE instance that the device belongs to. */
- instance = gxio_mpipe_link_instance(ifname);
- context = mpipe_context(instance);
- if (!context) {
- RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
- return -ENODEV;
- }
-
- priv = rte_zmalloc(NULL, sizeof(*priv), 0);
- if (!priv) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
- return -ENOMEM;
- }
-
- memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
- memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
- priv->context = context;
- priv->instance = instance;
- priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
- priv->channel = -1;
-
- mac = priv->mac_addr.addr_bytes;
- rc = mpipe_link_mac(ifname, mac);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
- rte_free(priv);
- return -ENODEV;
- }
-
- eth_dev = rte_eth_dev_allocate(ifname);
- if (!eth_dev) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
- rte_free(priv);
- return -ENOMEM;
- }
-
- RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
- "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
- ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
-
- priv->eth_dev = eth_dev;
- priv->port_id = eth_dev->data->port_id;
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = &priv->mac_addr;
-
- eth_dev->data->dev_flags = 0;
- eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->driver = NULL;
- eth_dev->data->drv_name = drivername;
- eth_dev->data->numa_node = instance;
-
- eth_dev->dev_ops = &mpipe_dev_ops;
- eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
- eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
-
- rc = mpipe_link_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
- .probe = rte_pmd_mpipe_probe,
-};
-
-static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
- .probe = rte_pmd_mpipe_probe,
-};
-
-RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
-RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
-RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
-RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
-
-static void __attribute__((constructor, used))
-mpipe_init_contexts(void)
-{
- struct mpipe_context *context;
- int rc, instance;
-
- for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
- context = &mpipe_contexts[instance];
-
- rte_spinlock_init(&context->lock);
- rc = gxio_mpipe_init(&context->context, instance);
- if (rc < 0)
- break;
- }
-
- mpipe_instances = instance;
-}
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index 4cadd131..4ee2c2dc 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -50,9 +50,4 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 099d82b3..5c5cba19 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -46,6 +46,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ether.h>
#include <rte_malloc.h>
@@ -63,8 +64,7 @@
/* Prototypes */
static void nfp_net_close(struct rte_eth_dev *dev);
static int nfp_net_configure(struct rte_eth_dev *dev);
-static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void nfp_net_dev_interrupt_handler(void *param);
static void nfp_net_dev_interrupt_delayed_handler(void *param);
static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static void nfp_net_infos_get(struct rte_eth_dev *dev,
@@ -205,26 +205,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/* Creating memzone for hardware rings. */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name,
- ring_name, dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0,
- NFP_MEMZONE_ALIGN);
-}
-
/*
* Atomically reads link status information from global structure rte_eth_dev.
*
@@ -308,7 +288,6 @@ static void
nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
{
nfp_net_rx_queue_release_mbufs(rxq);
- rxq->wr_p = 0;
rxq->rd_p = 0;
rxq->nb_rx_hold = 0;
}
@@ -347,8 +326,6 @@ nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
nfp_net_tx_queue_release_mbufs(txq);
txq->wr_p = 0;
txq->rd_p = 0;
- txq->tail = 0;
- txq->qcp_rd_p = 0;
}
static int
@@ -377,12 +354,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
if (new == 0)
break;
if (new & NFP_NET_CFG_UPDATE_ERR) {
- PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
+ PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
return -1;
}
if (cnt >= NFP_NET_POLL_TIMEOUT) {
PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
- " %dms\n", update, cnt);
+ " %dms", update, cnt);
rte_panic("Exiting\n");
}
nanosleep(&wait, 0); /* waiting for a 1ms */
@@ -426,7 +403,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
* Reconfig errors imply situations where they can be handled.
* Otherwise, rte_panic is called inside __nfp_net_reconfig
*/
- PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
+ PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
ctrl, update);
return -EIO;
}
@@ -456,7 +433,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
* called after that internal process
*/
- PMD_INIT_LOG(DEBUG, "Configure\n");
+ PMD_INIT_LOG(DEBUG, "Configure");
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
@@ -464,7 +441,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
/* Checking TX mode */
if (txmode->mq_mode) {
- PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
+ PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
return -EINVAL;
}
@@ -474,13 +451,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
update = NFP_NET_CFG_UPDATE_RSS;
new_ctrl = NFP_NET_CFG_CTRL_RSS;
} else {
- PMD_INIT_LOG(INFO, "RSS not supported\n");
+ PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
}
}
if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
+ PMD_INIT_LOG(INFO, "rxmode does not support split header");
return -EINVAL;
}
@@ -488,13 +465,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
} else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
+ PMD_INIT_LOG(INFO, "RXCSUM not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN filter not supported");
return -EINVAL;
}
@@ -502,13 +479,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
} else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
+ PMD_INIT_LOG(INFO, "hw vlan strip not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN extended not supported");
return -EINVAL;
}
@@ -520,12 +497,12 @@ nfp_net_configure(struct rte_eth_dev *dev)
/* this is handled in rte_eth_dev_configure */
if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported\n");
+ PMD_INIT_LOG(INFO, "strip CRC not supported");
return -EINVAL;
}
if (rxmode->enable_scatter) {
- PMD_INIT_LOG(INFO, "Scatter not supported\n");
+ PMD_INIT_LOG(INFO, "Scatter not supported");
return -EINVAL;
}
@@ -629,15 +606,57 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw)
}
static int
+nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct nfp_net_hw *hw;
+ int i;
+
+ if (!intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
+ /* UIO just supports one queue and no LSC*/
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+ } else {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ /*
+ * The first msix vector is reserved for non
+ * efd interrupts
+ */
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+ }
+
+ /* Avoiding TX interrupts */
+ hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
+ return 0;
+}
+
+static int
nfp_net_start(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ uint32_t intr_vector;
int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- PMD_INIT_LOG(DEBUG, "Start\n");
+ PMD_INIT_LOG(DEBUG, "Start");
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
@@ -648,10 +667,40 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ /*
+ * Better not to share LSC with RX interrupts.
+ * Unregistering LSC interrupt handler
+ */
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler, (void *)dev);
+
+ if (dev->data->nb_rx_queues > 1) {
+ PMD_INIT_LOG(ERR, "PMD rx interrupt only "
+ "supports 1 queue with UIO");
+ return -EIO;
+ }
+ }
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle))
+ nfp_configure_rx_interrupt(dev, intr_handle);
+
+ rte_intr_enable(intr_handle);
+
/* Enable device */
- new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX;
+ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
+ /* Just configuring queues interrupts when necessary */
+ if (rte_intr_dp_is_en(intr_handle))
+ update |= NFP_NET_CFG_UPDATE_MSIX;
+
if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
@@ -697,7 +746,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
{
int i;
- PMD_INIT_LOG(DEBUG, "Stop\n");
+ PMD_INIT_LOG(DEBUG, "Stop");
nfp_net_disable_queues(dev);
@@ -718,10 +767,12 @@ static void
nfp_net_close(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
- PMD_INIT_LOG(DEBUG, "Close\n");
+ PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
/*
* We assume that the DPDK application is stopping all the
@@ -730,11 +781,11 @@ nfp_net_close(struct rte_eth_dev *dev)
nfp_net_stop(dev);
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
nfp_net_dev_interrupt_handler,
(void *)dev);
@@ -755,7 +806,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
- PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
+ PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
return;
}
@@ -816,6 +867,17 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
struct rte_eth_link link, old;
uint32_t nn_link_status;
+ static const uint32_t ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
+ };
+
PMD_DRV_LOG(DEBUG, "Link update\n");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -831,8 +893,21 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- /* Other cards can limit the tx and rx rate per VF */
- link.link_speed = ETH_SPEED_NUM_40G;
+
+ nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+ NFP_NET_CFG_STS_LINK_RATE_MASK;
+
+ if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
+ ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
+ (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
+ /* We really do not know the speed wil old firmware */
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ else {
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ else
+ link.link_speed = ls_to_ethtool[nn_link_status];
+ }
if (old.link_status != link.link_status) {
nfp_net_dev_atomic_write_link_status(dev, &link);
@@ -1006,7 +1081,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1055,7 +1130,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
+ ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
+ ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
static const uint32_t *
@@ -1085,13 +1165,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
- if (rxq == NULL) {
- PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
- return 0;
- }
-
- idx = rxq->rd_p % rxq->rx_count;
- rxds = &rxq->rxds[idx];
+ idx = rxq->rd_p;
count = 0;
@@ -1119,9 +1193,49 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
return count;
}
+static int
+nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
+ NFP_NET_CFG_ICR_UNMASKED);
+ return 0;
+}
+
+static int
+nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
+ return 0;
+}
+
static void
nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -1136,8 +1250,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
(int)(dev->data->port_id));
RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
- dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
}
/* Interrupt configuration and handling */
@@ -1152,13 +1266,15 @@ static void
nfp_net_irq_unmask(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
/* If MSI-X auto-masking is used, clear the entry */
rte_wmb();
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
} else {
/* Make sure all updates are written before un-masking */
rte_wmb();
@@ -1168,8 +1284,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
}
static void
-nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+nfp_net_dev_interrupt_handler(void *param)
{
int64_t timeout;
struct rte_eth_link link;
@@ -1321,9 +1436,10 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
sizeof(struct nfp_net_rx_desc) *
- NFP_NET_MAX_RX_DESC, socket_id);
+ NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
if (tz == NULL) {
RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
@@ -1390,8 +1506,6 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxe[i].mbuf = mbuf;
PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
-
- rxq->wr_p++;
}
/* Make sure all writes are flushed before telling the hardware */
@@ -1465,9 +1579,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
sizeof(struct nfp_net_tx_desc) *
- NFP_NET_MAX_TX_DESC, socket_id);
+ NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
if (tz == NULL) {
RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
nfp_net_tx_queue_release(txq);
@@ -1475,7 +1590,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
txq->tx_count = nb_desc;
- txq->tail = 0;
txq->tx_free_thresh = tx_free_thresh;
txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
@@ -1519,6 +1633,33 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+/* nfp_net_tx_tso - Set TX descriptor for TSO */
+static inline void
+nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+ struct rte_mbuf *mb)
+{
+ uint64_t ol_flags;
+ struct nfp_net_hw *hw = txq->hw;
+
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
+ goto clean_txd;
+
+ ol_flags = mb->ol_flags;
+
+ if (!(ol_flags & PKT_TX_TCP_SEG))
+ goto clean_txd;
+
+ txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
+ txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
+ txd->flags = PCIE_DESC_TX_LSO;
+ return;
+
+clean_txd:
+ txd->flags = 0;
+ txd->l4_offset = 0;
+ txd->lso = 0;
+}
+
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
static inline void
nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
@@ -1604,12 +1745,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
- /*
- * hash type is sharing the same word with input port info
- * 31-8: input port
- * 7:0: hash type
- */
- hash_type &= 0xff;
mbuf->hash.rss = hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
@@ -1628,29 +1763,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
}
}
-/* nfp_net_check_port - Set mbuf in_port field */
-static void
-nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf)
-{
- uint32_t port;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) {
- mbuf->port = 0;
- return;
- }
-
- port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr +
- mbuf->data_off - 8));
-
- /*
- * hash type is sharing the same word with input port info
- * 31-8: input port
- * 7:0: hash type
- */
- port = (uint8_t)(port >> 8);
- mbuf->port = port;
-}
-
static inline void
nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
{
@@ -1696,7 +1808,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct nfp_net_hw *hw;
struct rte_mbuf *mb;
struct rte_mbuf *new_mb;
- int idx;
uint16_t nb_hold;
uint64_t dma_addr;
int avail;
@@ -1707,7 +1818,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* DPDK just checks the queue is lower than max queues
* enabled. But the queue needs to be configured
*/
- RTE_LOG(ERR, PMD, "RX Bad queue\n");
+ RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
return -EINVAL;
}
@@ -1716,11 +1827,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_hold = 0;
while (avail < nb_pkts) {
- idx = rxq->rd_p % rxq->rx_count;
-
- rxb = &rxq->rxbufs[idx];
+ rxb = &rxq->rxbufs[rxq->rd_p];
if (unlikely(rxb == NULL)) {
- RTE_LOG(ERR, PMD, "rxb does not exist!\n");
+ RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
break;
}
@@ -1730,7 +1839,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rte_rmb();
- rxds = &rxq->rxds[idx];
+ rxds = &rxq->rxds[rxq->rd_p];
if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
break;
@@ -1740,7 +1849,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
- RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
+ RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
"queue_id=%u\n", (unsigned)rxq->port_id,
(unsigned)rxq->qidx);
nfp_net_mbuf_alloc_failed(rxq);
@@ -1771,7 +1880,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* responsibility of avoiding it. But we have
* to give some info about the error
*/
- RTE_LOG(ERR, PMD,
+ RTE_LOG_DP(ERR, PMD,
"mbuf overflow likely due to the RX offset.\n"
"\t\tYour mbuf size should have extra space for"
" RX offset=%u bytes.\n"
@@ -1800,9 +1909,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* Checking the checksum flag */
nfp_net_rx_cksum(rxq, rxds, mb);
- /* Checking the port flag */
- nfp_net_check_port(rxds, mb);
-
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
@@ -1821,6 +1927,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxq->rd_p++;
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ rxq->rd_p = 0;
}
if (nb_hold == 0)
@@ -1866,33 +1974,40 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
/* Work out how many packets have been sent */
qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
- if (qcp_rd_p == txq->qcp_rd_p) {
+ if (qcp_rd_p == txq->rd_p) {
PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
"packets (%u, %u)\n", txq->qidx,
- qcp_rd_p, txq->qcp_rd_p);
+ qcp_rd_p, txq->rd_p);
return 0;
}
- if (qcp_rd_p > txq->qcp_rd_p)
- todo = qcp_rd_p - txq->qcp_rd_p;
+ if (qcp_rd_p > txq->rd_p)
+ todo = qcp_rd_p - txq->rd_p;
else
- todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p;
+ todo = qcp_rd_p + txq->tx_count - txq->rd_p;
- PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n",
- qcp_rd_p, txq->qcp_rd_p, txq->rd_p);
+ PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
+ qcp_rd_p, txq->rd_p, txq->rd_p);
if (todo == 0)
return todo;
- txq->qcp_rd_p += todo;
- txq->qcp_rd_p %= txq->tx_count;
txq->rd_p += todo;
+ if (unlikely(txq->rd_p >= txq->tx_count))
+ txq->rd_p -= txq->tx_count;
return todo;
}
/* Leaving always free descriptors for avoiding wrapping confusion */
-#define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8)
+static inline
+uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+{
+ if (txq->wr_p >= txq->rd_p)
+ return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+ else
+ return txq->rd_p - txq->wr_p - 8;
+}
/*
* nfp_net_txq_full - Check if the TX queue free descriptors
@@ -1903,9 +2018,9 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
* This function uses the host copy* of read/write pointers
*/
static inline
-int nfp_net_txq_full(struct nfp_net_txq *txq)
+uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
{
- return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh;
+ return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
}
static uint16_t
@@ -1913,7 +2028,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct nfp_net_txq *txq;
struct nfp_net_hw *hw;
- struct nfp_net_tx_desc *txds;
+ struct nfp_net_tx_desc *txds, txd;
struct rte_mbuf *pkt;
uint64_t dma_addr;
int pkt_size, dma_size;
@@ -1923,15 +2038,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
hw = txq->hw;
- txds = &txq->txds[txq->tail];
+ txds = &txq->txds[txq->wr_p];
PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
- txq->qidx, txq->tail, nb_pkts);
+ txq->qidx, txq->wr_p, nb_pkts);
- if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+ if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
nfp_net_tx_free_bufs(txq);
- free_descs = (uint16_t)NFP_FREE_TX_DESC(txq);
+ free_descs = (uint16_t)nfp_free_tx_desc(txq);
if (unlikely(free_descs == 0))
return 0;
@@ -1944,7 +2059,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Sending packets */
while ((i < nb_pkts) && free_descs) {
/* Grabbing the mbuf linked to the current descriptor */
- lmbuf = &txq->txbufs[txq->tail].mbuf;
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
/* Warming the cache for releasing the mbuf later on */
RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
@@ -1952,7 +2067,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (unlikely((pkt->nb_segs > 1) &&
!(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
- PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
+ PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
rte_panic("Multisegment packet unsupported\n");
}
@@ -1962,19 +2077,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/*
* Checksum and VLAN flags just in the first descriptor for a
- * multisegment packet
+ * multisegment packet, but TSO info needs to be in all of them.
*/
- nfp_net_tx_cksum(txq, txds, pkt);
+ txd.data_len = pkt->pkt_len;
+ nfp_net_tx_tso(txq, &txd, pkt);
+ nfp_net_tx_cksum(txq, &txd, pkt);
if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
- txds->flags |= PCIE_DESC_TX_VLAN;
- txds->vlan = pkt->vlan_tci;
+ txd.flags |= PCIE_DESC_TX_VLAN;
+ txd.vlan = pkt->vlan_tci;
}
- if (pkt->ol_flags & PKT_TX_TCP_SEG)
- rte_panic("TSO is not supported\n");
-
/*
* mbuf data_len is the data in one segment and pkt_len data
* in the whole packet. When the packet is just one segment,
@@ -1982,16 +2096,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*/
pkt_size = pkt->pkt_len;
- /* Releasing mbuf which was prefetched above */
- if (*lmbuf)
- rte_pktmbuf_free(*lmbuf);
- /*
- * Linking mbuf with descriptor for being released
- * next time descriptor is used
- */
- *lmbuf = pkt;
-
while (pkt_size) {
+ /* Copying TSO, VLAN and cksum info */
+ *txds = txd;
+
+ /* Releasing mbuf used by this descriptor previously*/
+ if (*lmbuf)
+ rte_pktmbuf_free_seg(*lmbuf);
+
+ /*
+ * Linking mbuf with descriptor for being released
+ * next time descriptor is used
+ */
+ *lmbuf = pkt;
+
dma_size = pkt->data_len;
dma_addr = rte_mbuf_data_dma_addr(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -1999,16 +2117,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Filling descriptors fields */
txds->dma_len = dma_size;
- txds->data_len = pkt->pkt_len;
+ txds->data_len = txd.data_len;
txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
txds->dma_addr_lo = (dma_addr & 0xffffffff);
ASSERT(free_descs > 0);
free_descs--;
txq->wr_p++;
- txq->tail++;
- if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
- txq->tail = 0;
+ if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ txq->wr_p = 0;
pkt_size -= dma_size;
if (!pkt_size) {
@@ -2019,7 +2136,8 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt = pkt->next;
}
/* Referencing next free TX descriptor */
- txds = &txq->txds[txq->tail];
+ txds = &txq->txds[txq->wr_p];
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
issued_descs++;
}
i++;
@@ -2306,6 +2424,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.rx_queue_count = nfp_net_rx_queue_count,
.tx_queue_setup = nfp_net_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
+ .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
+ .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
static int
@@ -2330,15 +2450,16 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
+ PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
pci_dev->id.vendor_id, pci_dev->id.device_id,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
@@ -2365,13 +2486,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
- PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
+ PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
nfp_net_cfg_queue_setup(hw);
@@ -2387,9 +2508,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
else
hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
- PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
+ PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
@@ -2400,13 +2521,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
- pci_dev = eth_dev->pci_dev;
hw->ctrl = 0;
hw->stride_rx = stride;
hw->stride_tx = stride;
- PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
+ PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
hw->max_rx_queues, hw->max_tx_queues);
/* Initializing spinlock for reconfigs */
@@ -2441,9 +2561,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
nfp_net_dev_interrupt_handler,
(void *)eth_dev);
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
-
/* Telling the firmware about the LSC interrupt entry */
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
@@ -2453,7 +2570,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct rte_pci_id pci_id_nfp_net_map[] = {
+static const struct rte_pci_id pci_id_nfp_net_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
PCI_DEVICE_ID_NFP6000_PF_NIC)
@@ -2467,20 +2584,28 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
},
};
-static struct eth_driver rte_nfp_net_pmd = {
- .pci_drv = {
- .id_table = pci_id_nfp_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = nfp_net_init,
- .dev_private_size = sizeof(struct nfp_net_adapter),
+static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct nfp_net_adapter), nfp_net_init);
+}
+
+static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nfp_net_pmd = {
+ .id_table = pci_id_nfp_net_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_nfp_pci_probe,
+ .remove = eth_nfp_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
/*
* Local variables:
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index fce82515..2c500433 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -112,6 +112,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */
+#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -157,6 +158,17 @@
#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
#define NFP_NET_CFG_STS 0x0034
#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
+#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index c1809720..eec56bc1 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -121,25 +121,26 @@ struct nfp_net_adapter;
#define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff)
#include <linux/types.h>
+#include <rte_io.h>
static inline uint8_t nn_readb(volatile const void *addr)
{
- return *((volatile const uint8_t *)(addr));
+ return rte_read8(addr);
}
static inline void nn_writeb(uint8_t val, volatile void *addr)
{
- *((volatile uint8_t *)(addr)) = val;
+ rte_write8(val, addr);
}
static inline uint32_t nn_readl(volatile const void *addr)
{
- return *((volatile const uint32_t *)(addr));
+ return rte_read32(addr);
}
static inline void nn_writel(uint32_t val, volatile void *addr)
{
- *((volatile uint32_t *)(addr)) = val;
+ rte_write32(val, addr);
}
static inline uint64_t nn_readq(volatile void *addr)
@@ -216,12 +217,10 @@ struct nfp_net_txq {
uint32_t wr_p;
uint32_t rd_p;
- uint32_t qcp_rd_p;
uint32_t tx_count;
uint32_t tx_free_thresh;
- uint32_t tail;
/*
* For each descriptor keep a reference to the mbuff and
@@ -240,7 +239,7 @@ struct nfp_net_txq {
struct nfp_net_tx_desc *txds;
/*
- * At this point 56 bytes have been used for all the fields in the
+ * At this point 48 bytes have been used for all the fields in the
* TX critical path. We have room for 8 bytes and still all placed
* in a cache line. We are not using the threshold values below nor
* the txq_flags but if we need to, we can add the most used in the
@@ -269,7 +268,7 @@ struct nfp_net_txq {
#define PCIE_DESC_RX_I_TCP_CSUM_OK (1 << 11)
#define PCIE_DESC_RX_I_UDP_CSUM (1 << 10)
#define PCIE_DESC_RX_I_UDP_CSUM_OK (1 << 9)
-#define PCIE_DESC_RX_INGRESS_PORT (1 << 8)
+#define PCIE_DESC_RX_SPARE (1 << 8)
#define PCIE_DESC_RX_EOP (1 << 7)
#define PCIE_DESC_RX_IP4_CSUM (1 << 6)
#define PCIE_DESC_RX_IP4_CSUM_OK (1 << 5)
@@ -326,7 +325,6 @@ struct nfp_net_rxq {
* freelist descriptors and @rd_p is where the driver start
* reading descriptors for newly arrive packets from.
*/
- uint32_t wr_p;
uint32_t rd_p;
/*
diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile
index 0c909c6f..77810bce 100644
--- a/drivers/net/null/Makefile
+++ b/drivers/net/null/Makefile
@@ -41,23 +41,11 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_pmd_null_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_null.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 836d982a..abf3ec75 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -33,14 +33,13 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
-#include "rte_eth_null.h"
-
#define ETH_NULL_PACKET_SIZE_ARG "size"
#define ETH_NULL_PACKET_COPY_ARG "copy"
@@ -50,6 +49,7 @@ static unsigned default_packet_copy;
static const char *valid_arguments[] = {
ETH_NULL_PACKET_SIZE_ARG,
ETH_NULL_PACKET_COPY_ARG,
+ "driver",
NULL
};
@@ -88,7 +88,6 @@ struct pmd_internals {
static struct ether_addr eth_addr = { .addr_bytes = {0} };
-static const char *drivername = "Null PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -113,8 +112,6 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
break;
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
- bufs[i]->nb_segs = 1;
- bufs[i]->next = NULL;
bufs[i]->port = h->internals->port_id;
}
@@ -295,13 +292,11 @@ eth_dev_info(struct rte_eth_dev *dev,
return;
internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
}
@@ -480,9 +475,10 @@ static const struct eth_dev_ops ops = {
.rss_hash_conf_get = eth_rss_hash_conf_get
};
-int
-eth_dev_null_create(const char *name,
- const unsigned numa_node,
+static struct rte_vdev_driver pmd_null_drv;
+
+static int
+eth_dev_null_create(struct rte_vdev_device *dev,
unsigned packet_size,
unsigned packet_copy)
{
@@ -499,27 +495,25 @@ eth_dev_null_create(const char *name,
0xBE, 0xAC, 0x01, 0xFA
};
- if (name == NULL)
- return -EINVAL;
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
- numa_node);
+ dev->device.numa_node);
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error;
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
- if (internals == NULL)
- goto error;
+ data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
+ dev->device.numa_node);
+ if (!data)
+ return -ENOMEM;
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL)
- goto error;
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (!eth_dev) {
+ rte_free(data);
+ return -ENOMEM;
+ }
/* now put it all together
* - store queue data in internals,
@@ -530,6 +524,7 @@ eth_dev_null_create(const char *name,
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the nulls are local per-process */
+ internals = eth_dev->data->dev_private;
internals->packet_size = packet_size;
internals->packet_copy = packet_copy;
internals->port_id = eth_dev->data->port_id;
@@ -539,24 +534,16 @@ eth_dev_null_create(const char *name,
rte_memcpy(internals->rss_key, default_rss_key, 40);
- data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = &eth_addr;
- strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
- eth_dev->driver = NULL;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
- data->numa_node = numa_node;
/* finally assign rx and tx ops */
if (packet_copy) {
@@ -568,12 +555,6 @@ eth_dev_null_create(const char *name,
}
return 0;
-
-error:
- rte_free(data);
- rte_free(internals);
-
- return -1;
}
static inline int
@@ -611,21 +592,21 @@ get_packet_copy_arg(const char *key __rte_unused,
}
static int
-rte_pmd_null_probe(const char *name, const char *params)
+rte_pmd_null_probe(struct rte_vdev_device *dev)
{
- unsigned numa_node;
+ const char *name, *params;
unsigned packet_size = default_packet_size;
unsigned packet_copy = default_packet_copy;
struct rte_kvargs *kvlist = NULL;
int ret;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
- numa_node = rte_socket_id();
-
if (params != NULL) {
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist == NULL)
@@ -654,7 +635,7 @@ rte_pmd_null_probe(const char *name, const char *params)
"packet copy is %s\n", packet_size,
packet_copy ? "enabled" : "disabled");
- ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
+ ret = eth_dev_null_create(dev, packet_size, packet_copy);
free_kvlist:
if (kvlist)
@@ -663,18 +644,18 @@ free_kvlist:
}
static int
-rte_pmd_null_remove(const char *name)
+rte_pmd_null_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
rte_socket_id());
/* find the ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
diff --git a/drivers/net/null/rte_pmd_null_version.map b/drivers/net/null/rte_pmd_null_version.map
index 84b1d0fe..ef353984 100644
--- a/drivers/net/null/rte_pmd_null_version.map
+++ b/drivers/net/null/rte_pmd_null_version.map
@@ -2,10 +2,3 @@ DPDK_2.0 {
local: *;
};
-
-DPDK_2.2 {
- global:
-
- eth_dev_null_create;
-
-} DPDK_2.0;
diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile
index 89ac4024..7ebd0bef 100644
--- a/drivers/net/pcap/Makefile
+++ b/drivers/net/pcap/Makefile
@@ -55,11 +55,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 57b0b315..defb3b41 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -40,6 +40,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -119,7 +120,6 @@ static struct ether_addr eth_addr = {
.addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 }
};
-static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -294,9 +294,9 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
- rte_pktmbuf_free(mbuf);
num_tx++;
tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
}
/*
@@ -552,14 +552,12 @@ eth_dev_info(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t) -1;
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -790,15 +788,20 @@ open_tx_iface(const char *key, const char *value, void *extra_args)
return 0;
}
+static struct rte_vdev_driver pmd_pcap_drv;
+
static int
-pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
+pmd_init_internals(struct rte_vdev_device *vdev,
+ const unsigned int nb_rx_queues,
const unsigned int nb_tx_queues,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data = NULL;
- unsigned int numa_node = rte_socket_id();
+ unsigned int numa_node = vdev->device.numa_node;
+ const char *name;
+ name = rte_vdev_device_name(vdev);
RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
numa_node);
@@ -807,17 +810,14 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
- goto error;
-
- *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
- numa_node);
- if (*internals == NULL)
- goto error;
+ return -1;
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
- if (*eth_dev == NULL)
- goto error;
+ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
+ if (*eth_dev == NULL) {
+ rte_free(data);
+ return -1;
+ }
/* now put it all together
* - store queue data in internals,
@@ -825,9 +825,8 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- data->dev_private = *internals;
- data->port_id = (*eth_dev)->data->port_id;
- snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
+ *internals = (*eth_dev)->data->dev_private;
+ rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -839,26 +838,17 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
*/
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->driver = NULL;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
- data->numa_node = numa_node;
return 0;
-
-error:
- rte_free(data);
- rte_free(*internals);
-
- return -1;
}
static int
-eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
- const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
- const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
- struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
+eth_from_pcaps_common(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
{
struct rte_kvargs_pair *pair = NULL;
unsigned int k_idx;
@@ -870,7 +860,7 @@ eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (pmd_init_internals(name, nb_rx_queues, nb_tx_queues, internals,
+ if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
eth_dev) < 0)
return -1;
@@ -908,16 +898,17 @@ eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
}
static int
-eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
- const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
- const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
- int single_iface, unsigned int using_dumpers)
+eth_from_pcaps(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, int single_iface,
+ unsigned int using_dumpers)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
int ret;
- ret = eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
tx_queues, nb_tx_queues, kvlist, &internals, &eth_dev);
if (ret < 0)
@@ -937,8 +928,9 @@ eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
}
static int
-pmd_pcap_probe(const char *name, const char *params)
+pmd_pcap_probe(struct rte_vdev_device *dev)
{
+ const char *name;
unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
@@ -946,13 +938,14 @@ pmd_pcap_probe(const char *name, const char *params)
int single_iface = 0;
int ret;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -1026,7 +1019,7 @@ pmd_pcap_probe(const char *name, const char *params)
goto free_kvlist;
create_eth:
- ret = eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
+ ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
free_kvlist:
@@ -1036,18 +1029,18 @@ free_kvlist:
}
static int
-pmd_pcap_remove(const char *name)
+pmd_pcap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
rte_socket_id());
- if (name == NULL)
+ if (!dev)
return -1;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 29b443df..3323914c 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -56,6 +56,9 @@ endif
CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-format-extra-args
@@ -76,33 +79,31 @@ endif
#
#
BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
-$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_l2.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dcbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_l2.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dcbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
-
-# dependent libs:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 28be9587..3f895cd4 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -98,9 +98,7 @@ inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
u32 nwords = 0;
OSAL_BUILD_BUG_ON(!limit);
nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
- for (i = 0; i < nwords; i++)
- if (~(addr[i] != 0))
- break;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 0b446f2e..32c9b251 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -18,6 +18,7 @@
#include <rte_cycles.h>
#include <rte_debug.h>
#include <rte_ether.h>
+#include <rte_io.h>
/* Forward declaration */
struct ecore_dev;
@@ -88,8 +89,12 @@ typedef int bool;
#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
-#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
-#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) \
+ do { \
+ rte_free((void *)memory); \
+ memory = OSAL_NULL; \
+ } while (0)
#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
@@ -113,18 +118,18 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
/* HW reads/writes */
-#define DIRECT_REG_RD(_dev, _reg_addr) \
- (*((volatile u32 *) (_reg_addr)))
+#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr)
#define REG_RD(_p_hwfn, _reg_offset) \
DIRECT_REG_RD(_p_hwfn, \
((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
-#define DIRECT_REG_WR16(_reg_addr, _val) \
- (*((volatile u16 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr))
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr))
-#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
- (*((volatile u32 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \
+ rte_write32_relaxed((_val), (_reg_addr))
#define REG_WR(_p_hwfn, _reg_offset, _val) \
DIRECT_REG_WR(NULL, \
@@ -134,9 +139,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
(_reg_offset)), (u16)_val)
-#define DOORBELL(_p_hwfn, _db_addr, _val) \
- DIRECT_REG_WR(_p_hwfn, \
- ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR_RELAXED((_p_hwfn), \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
+ (_db_addr)), (u32)_val)
/* Mutexes */
@@ -162,6 +168,7 @@ typedef pthread_mutex_t osal_mutex_t;
#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_SYNC(hwfn) nothing
/* Lists */
@@ -286,7 +293,8 @@ typedef struct osal_list_t {
#define OSAL_WMB(dev) rte_wmb()
#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
-#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BIT(nr) (1UL << (nr))
+#define OSAL_BITS_PER_BYTE (8)
#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
@@ -314,6 +322,8 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BUILD_BUG_ON(cond) nothing
#define ETH_ALEN ETHER_ADDR_LEN
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
@@ -323,6 +333,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
@@ -391,6 +402,7 @@ u32 qede_osal_log2(u32);
#define OSAL_STRCPY(dst, string) strcpy(dst, string)
#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+#define OSAL_STRTOUL(str, base, res) 0
#define OSAL_INLINE inline
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
@@ -409,5 +421,7 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
-
+#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index b431c78d..cbcde227 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -78,8 +78,16 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+/*
+ * Usually LL2 queues are opened in pairs TX-RX.
+ * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
+ * and TX counters (Pstorm RAM).
+ * Number of TX queues is almost unlimited.
+ * The constants are different so as to allow asymmetric LL2 connections
+ */
+
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
/****************************************************************************/
@@ -89,7 +97,7 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
+#define FW_MINOR_VERSION 18
#define FW_REVISION_VERSION 9
#define FW_ENGINEERING_VERSION 0
@@ -107,20 +115,21 @@
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
-#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_K2 (192)
+#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define COMMON_MAX_NUM_VFS (240)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all
* possible PFs and VFs - we need a constant for this size
*/
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -149,9 +158,10 @@
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
+#define E4_NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Clock values */
#define MASTER_CLK_FREQ_E4 (375e6)
@@ -176,6 +186,13 @@
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
/*****************/
/* DQ CONSTANTS */
@@ -471,7 +488,6 @@
#define PXP_BAR_DQ 1
/* PTT and GTT */
-#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
@@ -496,6 +512,8 @@
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+#define PXP_NUM_PF_WINDOWS 12
+
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
@@ -518,8 +536,6 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-/*#define PXP_BAR0_START_GRC 0x1000 */
-/*#define PXP_BAR0_GRC_LENGTH 0xBFF000 */
#define PXP_BAR0_START_GRC 0x0000
#define PXP_BAR0_GRC_LENGTH 0x1C00000
#define PXP_BAR0_END_GRC \
@@ -588,7 +604,7 @@
#define SDM_OP_GEN_TRIG_AGG_INT 2
#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
-#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
/***********************************************************/
/* Completion types */
@@ -611,6 +627,7 @@
#define SDM_COMP_TYPE_RELEASE_THREAD 7
/* Write to local RAM as a completion */
#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */
/******************/
@@ -721,13 +738,10 @@ union event_ring_data {
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- /* Dedicated field for RoCE affiliated asynchronous error */;
- struct regpair roceHandle;
+ struct regpair roceHandle /* Dedicated field for RDMA data */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup
/* VF Initial Cleanup data */;
-/* Host handle for the Async Completions */
- struct regpair iwarp_handle;
};
/* Event Ring Entry */
struct event_ring_entry {
@@ -768,6 +782,8 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+
+
/*
* Ustorm Queue Zone
*/
@@ -881,7 +897,7 @@ enum db_dest {
*/
enum db_dpm_type {
DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
- DPM_ROCE /* RoCE DPM- to NIG */,
+ DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
/* L2 DPM inline- to PBF, with packet data on doorbell */
DPM_L2_INLINE,
DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
@@ -968,42 +984,42 @@ struct db_pwm_addr {
};
/*
- * Parameters to RoCE firmware, passed in EDPM doorbell
+ * Parameters to RDMA firmware, passed in EDPM doorbell
*/
-struct db_roce_dpm_params {
+struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
-/* Type of DPM transacation (DPM_ROCE) (use enum db_dpm_type) */
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
-/* opcode for ROCE operation */
-#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
};
/*
- * Structure for doorbell data, in ROCE DPM mode, for the first doorbell in a
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
* DPM burst
*/
-struct db_roce_dpm_data {
+struct db_rdma_dpm_data {
__le16 icid /* internal CID */;
__le16 prod_val /* aggregated value to update */;
-/* parameters passed to RoCE firmware */
- struct db_roce_dpm_params params;
+/* parameters passed to RDMA firmware */
+ struct db_rdma_dpm_params params;
};
/* Igu interrupt command */
@@ -1136,6 +1152,68 @@ struct parsing_and_err_flags {
/*
+ * Parsing error flags bitmap.
+ */
+struct parsing_err_flags {
+ __le16 flags;
+/* MAC error indication */
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+/* truncation error indication */
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+/* packet too small indication */
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+/* Header Missing Tag */
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
+ * indicates number that is bigger than real packet length 3. tunneling:
+ * total-ip-length of the outer header points to offset that is smaller than
+ * the one pointed to by the total-ip-len of the inner hdr.
+ */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+/* from frame cracker output. for either TCP or UDP */
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
+ * reason, like: udp/ipv4 checksum is 0 etc.
+ */
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+/* set if geneve option size was over 32 byte */
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
+
+/*
* Pb context
*/
struct pb_context {
@@ -1492,49 +1570,57 @@ struct tdif_task_context {
struct timers_context {
__le32 logical_client_0;
/* Expiration time of logical client 0 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
/* Valid bit of logical client 0 */
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
/* Active bit of logical client 0 */
#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
__le32 logical_client_1;
/* Expiration time of logical client 1 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
/* Valid bit of logical client 1 */
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
/* Active bit of logical client 1 */
#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
__le32 logical_client_2;
/* Expiration time of logical client 2 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
/* Valid bit of logical client 2 */
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
/* Active bit of logical client 2 */
#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
__le32 host_expiration_fields;
/* Expiration time on host (closest one) */
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
/* Valid bit of host expiration */
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
-#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
-#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 907b35b9..80b11a4c 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -28,9 +28,21 @@
#include "ecore_proto_if.h"
#include "mcp_public.h"
-#define MAX_HWFNS_PER_DEVICE (4)
+#define ECORE_MAJOR_VERSION 8
+#define ECORE_MINOR_VERSION 18
+#define ECORE_REVISION_VERSION 7
+#define ECORE_ENGINEERING_VERSION 0
+
+#define ECORE_VERSION \
+ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
+ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 128 /* @DPDK */
-#define VER_SIZE 16
#define ECORE_WFQ_UNIT 100
#include "../qede_logs.h" /* @DPDK */
@@ -80,13 +92,22 @@ enum ecore_nvm_cmd {
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name##_MASK << name##_SHIFT); \
- (value) |= (((u64)flag) << (name##_SHIFT)); \
+ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#endif
+#define ECORE_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define ECORE_MFW_SET_FIELD(name, field, value) \
+do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+} while (0)
+
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -158,8 +179,8 @@ enum DP_MODULE {
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_LL2 = 0x1000000,
ECORE_MSG_ILT = 0x2000000,
- ECORE_MSG_RDMA = 0x4000000,
- ECORE_MSG_DEBUG = 0x8000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
@@ -179,6 +200,7 @@ struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
struct ecore_ll2_info;
+struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
@@ -205,33 +227,29 @@ enum ecore_tunn_clss {
MAX_ECORE_TUNN_CLSS,
};
-struct ecore_tunn_start_params {
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum ecore_tunn_clss tun_cls;
+};
+
+struct ecore_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
};
-struct ecore_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunnel_info {
+ struct ecore_tunn_update_type vxlan;
+ struct ecore_tunn_update_type l2_geneve;
+ struct ecore_tunn_update_type ip_geneve;
+ struct ecore_tunn_update_type l2_gre;
+ struct ecore_tunn_update_type ip_gre;
+
+ struct ecore_tunn_update_udp_port vxlan_port;
+ struct ecore_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -243,7 +261,8 @@ enum ecore_pci_personality {
ECORE_PCI_FCOE,
ECORE_PCI_ISCSI,
ECORE_PCI_ETH_ROCE,
- ECORE_PCI_IWARP,
+ ECORE_PCI_ETH_IWARP,
+ ECORE_PCI_ETH_RDMA,
ECORE_PCI_DEFAULT /* default in shmem */
};
@@ -273,6 +292,7 @@ enum ecore_resources {
ECORE_LL2_QUEUE,
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
+ ECORE_BDQ,
ECORE_MAX_RESC, /* must be last */
};
@@ -288,6 +308,7 @@ enum ecore_feature {
ECORE_RDMA_CNQ,
ECORE_ISCSI_CQ,
ECORE_FCOE_CQ,
+ ECORE_VF_L2_QUE,
ECORE_MAX_FEATURES,
};
@@ -302,6 +323,7 @@ enum ecore_port_mode {
ECORE_PORT_MODE_DE_2X25G,
ECORE_PORT_MODE_DE_1X25G,
ECORE_PORT_MODE_DE_4X25G,
+ ECORE_PORT_MODE_DE_2X10G,
};
enum ecore_dev_cap {
@@ -326,6 +348,19 @@ enum ecore_hw_err_type {
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH || \
+ ECORE_IS_RDMA_PERSONALITY(dev))
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
@@ -347,9 +382,6 @@ struct ecore_hw_info {
u8 num_active_tc;
- /* Traffic class used for tcp out of order traffic */
- u8 ooo_tc;
-
/* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
@@ -372,16 +404,11 @@ struct ecore_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
-};
-struct ecore_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
- u8 vfid; /* 1-based; 0 signals this is for a PF */
+ /* Default DCBX mode */
+ u8 dcbx_mode;
- /* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
+ u16 mtu;
};
/* maximun size of read/write commands (HW limit) */
@@ -424,15 +451,18 @@ struct ecore_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -472,7 +502,7 @@ struct ecore_hwfn {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
@@ -527,8 +557,8 @@ struct ecore_hwfn {
u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
struct ecore_cxt_mngr *p_cxt_mngr;
@@ -544,9 +574,6 @@ struct ecore_hwfn {
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
- struct ecore_hw_cid_data *p_tx_cids;
- struct ecore_hw_cid_data *p_rx_cids;
-
struct ecore_dmae_info dmae_info;
/* QM init */
@@ -572,6 +599,12 @@ struct ecore_hwfn {
/* If one of the following is set then EDPM shouldn't be used */
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct ecore_l2_info *p_l2_info;
+
+ /* @DPDK */
+ struct ecore_ptt *p_arfs_ptt;
};
#ifndef __EXTRACT__LINUX__
@@ -603,7 +636,7 @@ struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
u8 type;
#define ECORE_DEV_TYPE_BB (0 << 0)
@@ -620,6 +653,10 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
+
u16 vendor_id;
u16 device_id;
@@ -679,7 +716,7 @@ struct ecore_dev {
int pcie_width;
int pcie_speed;
- u8 ver_str[NAME_SIZE]; /* @DPDK */
+
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
@@ -711,10 +748,7 @@ struct ecore_dev {
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
- bool b_hw_channel;
-
- unsigned long tunn_mode;
-
+ struct ecore_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
@@ -766,15 +800,6 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
-#ifndef REAL_ASIC_ONLY
-#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
- (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
- (ECORE_PATH_ID(p_hwfn) == 1) && \
- ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
-#endif
-
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@@ -783,8 +808,8 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
- u32 concrete_fid)
+static OSAL_INLINE u8
+ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@@ -800,7 +825,7 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
}
#define PURE_LB_TC 8
-#define OOO_LB_TC 9
+#define PKT_LB_TC 9
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
@@ -811,7 +836,33 @@ int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
+void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
+ u8 *mac);
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (1 << 0)
+#define PQ_FLAGS_MCOS (1 << 1)
+#define PQ_FLAGS_LB (1 << 2)
+#define PQ_FLAGS_OOO (1 << 3)
+#define PQ_FLAGS_ACK (1 << 4)
+#define PQ_FLAGS_OFLD (1 << 5)
+#define PQ_FLAGS_VFS (1 << 6)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index 9ad1874f..ba272a91 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -54,21 +54,9 @@ struct ecore_chain_pbl_u32 {
u32 cons_page_idx;
};
-struct ecore_chain_pbl {
- /* Base address of a pre-allocated buffer for pbl */
- dma_addr_t p_phys_table;
- void *p_virt_table;
-
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
- */
- void **pp_virt_addr_tbl;
-
- /* Index to current used page by producer/consumer */
- union {
- struct ecore_chain_pbl_u16 pbl16;
- struct ecore_chain_pbl_u32 pbl32;
- } u;
+struct ecore_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
};
struct ecore_chain_u16 {
@@ -84,40 +72,75 @@ struct ecore_chain_u32 {
};
struct ecore_chain {
- /* Address of first page of the chain */
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
-
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
/* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
- enum ecore_chain_mode mode;
- enum ecore_chain_use_mode intended_use;
+ /* Fastpath portions of the PBL [if exists] */
+
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ union {
+ struct ecore_chain_pbl_u16 u16;
+ struct ecore_chain_pbl_u32 u32;
+ } c;
+ } pbl;
- enum ecore_chain_cnt_type cnt_type;
union {
struct ecore_chain_u16 chain16;
struct ecore_chain_u32 chain32;
} u;
- u32 page_cnt;
+ /* Capacity counts only usable elements */
+ u32 capacity;
+ u32 page_cnt;
- /* Number of elements - capacity is for usable elements only,
- * while size will contain total number of elements [for entire chain].
+ /* A u8 would suffice for mode, but it would save as a lot of headaches
+ * on castings & defaults.
*/
- u32 capacity;
- u32 size;
+ enum ecore_chain_mode mode;
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
- u16 elem_unusable;
- u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- struct ecore_chain_pbl pbl;
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
+ */
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
+ u32 size;
+
+ u8 intended_use;
+
+ /* TBD - do we really need this? Couldn't find usage for it */
+ bool b_external_pbl;
void *dp_ctx;
};
@@ -128,8 +151,8 @@ struct ecore_chain {
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
- (1 + ((sizeof(struct ecore_chain_next) - 1) / \
- (elem_size))) : 0)
+ (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
@@ -238,7 +261,7 @@ u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
}
static OSAL_INLINE
-u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_unusable;
}
@@ -256,7 +279,7 @@ static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
static OSAL_INLINE
dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
{
- return p_chain->pbl.p_phys_table;
+ return p_chain->pbl_sp.p_phys_table;
}
/**
@@ -281,9 +304,9 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
p_next = (struct ecore_chain_next *)(*p_next_elem);
*p_next_elem = p_next->next_virt;
if (is_chain_u16(p_chain))
- *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
else
- *(u32 *)idx_to_inc += p_chain->elem_unusable;
+ *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
break;
case ECORE_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
@@ -384,7 +407,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -393,7 +416,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -458,7 +481,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -467,7 +490,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -511,25 +534,26 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
- p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
- case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
- case ECORE_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
-
case ECORE_CHAIN_USE_TO_CONSUME:
- /* produce empty elements */
- for (i = 0; i < p_chain->capacity; i++)
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
- break;
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
}
}
@@ -556,9 +580,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->p_virt_addr = OSAL_NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->intended_use = intended_use;
+ p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
- p_chain->cnt_type = cnt_type;
+ p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
@@ -570,9 +594,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl.p_phys_table = 0;
- p_chain->pbl.p_virt_table = OSAL_NULL;
+ p_chain->b_external_pbl = false;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
p_chain->dp_ctx = dp_ctx;
@@ -616,8 +640,8 @@ static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
- p_chain->pbl.p_phys_table = p_phys_pbl;
- p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 3dd953d9..688118bb 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -8,6 +8,7 @@
#include "bcm_osal.h"
#include "reg_addr.h"
+#include "common_hsi.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_eth.h"
#include "ecore_rt_defs.h"
@@ -19,6 +20,7 @@
#include "ecore_hw.h"
#include "ecore_dev_api.h"
#include "ecore_sriov.h"
+#include "ecore_mcp.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
@@ -100,7 +102,6 @@ struct ecore_tid_seg {
struct ecore_conn_type_cfg {
u32 cid_count;
- u32 cid_start;
u32 cids_per_vf;
struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
};
@@ -191,11 +192,11 @@ struct ecore_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+ /* TBD - do we want this allocated to reserve space? */
+ struct ecore_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
/* ILT shadow table */
struct ecore_dma_mem *ilt_shadow;
@@ -209,10 +210,29 @@ struct ecore_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* The infrastructure originally was very generic and context/task
+ * oriented - per connection-type we would set how many of those
+ * are needed, and later when determining how much memory we're
+ * needing for a given block we'd iterate over all the relevant
+ * connection-types.
+ * But since then we've had some additional resources, some of which
+ * require memory which is indepent of the general context/task
+ * scheme. We add those here explicitly per-feature.
+ */
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ /* TODO - VF arfs filters ? */
};
/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(enum protocol_type type)
+static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
{
return type == PROTOCOLID_TOE;
}
@@ -250,18 +270,22 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- if (!src_proto(i))
+ if (!src_proto(p_hwfn, i))
continue;
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -276,14 +300,24 @@ struct ecore_tm_iids {
static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ bool tm_vf_required = false;
+ bool tm_required = false;
u32 i, j;
for (i = 0; i < MAX_CONN_TYPES; i++) {
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
- iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
}
if (tm_tid_proto(i)) {
@@ -313,7 +347,8 @@ static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
}
}
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_tid_seg *segs;
@@ -671,7 +706,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* SRC */
p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@@ -718,12 +753,11 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++) {
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
}
-
- p_cli->vf_total_lines = curr_line - p_blk->start_line;
}
/* TSDM (SRQ CONTEXT) */
@@ -766,7 +800,6 @@ static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
p_mngr->t2[i].size);
OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
- p_mngr->t2 = OSAL_NULL;
}
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
@@ -787,7 +820,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@@ -1006,44 +1039,75 @@ ilt_shadow_fail:
static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ OSAL_FREE(p_hwfn->p_dev,
+ p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
}
}
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return ECORE_SUCCESS;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
+ p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (p_map->cid_map == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0;
- u32 type;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 size;
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct ecore_cid_acquired_map *p_map;
- if (cid_cnt == 0)
- continue;
-
- size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
- p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
- GFP_KERNEL, size);
- if (!p_mngr->acquired[type].cid_map)
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
goto cid_map_fail;
- p_mngr->acquired[type].max_count = cid_cnt;
- p_mngr->acquired[type].start_cid = start_cid;
-
- p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+ /* Handle VF maps */
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (ecore_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf,
+ p_map))
+ goto cid_map_fail;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
- "Type %08x start: %08x count %08x\n",
- type, p_mngr->acquired[type].start_cid,
- p_mngr->acquired[type].max_count);
- start_cid += cid_cnt;
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
}
return ECORE_SUCCESS;
@@ -1155,27 +1219,41 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
- OSAL_MUTEX_DEALLOC(&p_mngr->mutex);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
-
- p_hwfn->p_cxt_mngr = OSAL_NULL;
}
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
int type;
+ u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 i;
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
- if (cid_cnt == 0)
+ if (!p_cfg->cids_per_vf)
continue;
- for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
- p_mngr->acquired[type].cid_map[i] = 0;
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
}
}
@@ -1366,18 +1444,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
}
/* CM PF */
-static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
- union ecore_qm_pq_params pq_params;
- u16 pq;
-
- /* XCM pure-LB queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return ECORE_SUCCESS;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1569,7 +1639,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@@ -1585,6 +1655,9 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
p_hwfn->p_cxt_mngr->first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
p_hwfn->p_cxt_mngr->last_free);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Configured SEARCHER for 0x%08x connections\n",
+ conn_num);
}
/* Timers PF */
@@ -1724,93 +1797,150 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
ecore_prs_init_pf(p_hwfn);
}
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type, u32 *p_cid)
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
u32 rel_cid;
- if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == ECORE_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (p_map->cid_map == OSAL_NULL) {
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
return ECORE_INVAL;
}
- rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
- p_mngr->acquired[type].max_count);
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
+ p_map->max_count);
- if (rel_cid >= p_mngr->acquired[type].max_count) {
+ if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
type);
return ECORE_NORESOURCES;
}
- OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ OSAL_SET_BIT(rel_cid, p_map->cid_map);
- *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
+}
+
static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
- u32 cid, enum protocol_type *p_type)
+ u32 cid, u8 vfid,
+ enum protocol_type *p_type,
+ struct ecore_cid_acquired_map **pp_map)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_cid_acquired_map *p_map;
- enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
- for (p = 0; p < MAX_CONN_TYPES; p++) {
- p_map = &p_mngr->acquired[p];
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == ECORE_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
- if (!p_map->cid_map)
+ if (!((*pp_map)->cid_map))
continue;
- if (cid >= p_map->start_cid &&
- cid < p_map->start_cid + p_map->max_count) {
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
break;
}
}
- *p_type = p;
-
- if (p == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
- return false;
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
}
- rel_cid = cid - p_map->start_cid;
- if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
- DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
- return false;
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, true,
+ "CID %d [vifd %02x] not acquired", cid, vfid);
+ goto fail;
}
+
return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = OSAL_NULL;
+ return false;
}
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
+ if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn, true,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
if (!b_acquired)
return;
- rel_cid = cid - p_mngr->acquired[type].start_cid;
- OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ rel_cid = cid - p_map->start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
}
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ ECORE_CXT_PF_CID,
+ &type, &p_map);
if (!b_acquired)
return ECORE_INVAL;
@@ -1839,7 +1969,7 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1866,10 +1996,15 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- ecore_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ETH,
- p_params->num_cons, 1); /* FIXME VF count... */
-
+ /* TODO - we probably want to add VF number to the PF
+ * params;
+ * As of now, allocates 16 * 2 per-VF [to retain regular
+ * functionality].
+ */
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 32);
+ p_hwfn->p_cxt_mngr->arfs_count =
+ p_params->num_arfs_filters;
break;
}
default:
@@ -1879,47 +2014,6 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 proto, seg, total_lines, i, shadow_line;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_fl_seg;
- struct ecore_tid_seg *p_seg_info;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
-
- p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
- p_fl_seg->real_size_in_page);
-
- for (i = 0; i < total_lines; i++) {
- shadow_line = i + p_fl_seg->start_line -
- p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
- }
- p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
- p_fl_seg->real_size_in_page;
- p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
- p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
- p_info->tid_size;
-
- return ECORE_SUCCESS;
-}
-
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
@@ -2157,52 +2251,3 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
return rc;
}
-
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type, void **pp_task_ctx)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_seg;
- struct ecore_tid_seg *p_seg_info;
- u32 proto, seg;
- u32 total_lines;
- u32 tid_size, ilt_idx;
- u32 num_tids_per_block;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
-
- if (ctx_type == ECORE_CTX_WORKING_MEM) {
- p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
- } else if (ctx_type == ECORE_CTX_FL_MEM) {
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
- p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- } else {
- return ECORE_INVAL;
- }
- total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
- tid_size = p_mngr->task_type_size[p_seg_info->type];
- num_tids_per_block = p_seg->real_size_in_page / tid_size;
-
- if (total_lines < tid / num_tids_per_block)
- return ECORE_INVAL;
-
- ilt_idx = tid / num_tids_per_block + p_seg->start_line -
- p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
- (tid % num_tids_per_block) * tid_size;
-
- return ECORE_SUCCESS;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 5379d7bc..6ff823a5 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -35,17 +35,6 @@ u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
-#ifndef LINUX_REMOVE
-/**
- * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
- *
- * @param p_hwfn
- * @param iids [out], a structure holding all the counters
- */
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_qm_iids *iids);
-#endif
-
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -130,14 +119,53 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_CXT_PF_CID (0xff)
+
+/**
+ * @brief ecore_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ */
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
/**
-* @brief ecore_cxt_release - Release a cid
-*
-* @param p_hwfn
-* @param cid
-*/
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
- u32 cid);
+ * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid);
/**
* @brief ecore_cxt_get_tid_mem_info - function checks if the
@@ -169,9 +197,5 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type,
- void **task_ctx);
#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6a50412a..6d87620d 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -26,19 +26,6 @@ struct ecore_tid_mem {
};
/**
-* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
-*
-* @param p_hwfn
-* @param type
-* @param p_cid
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid);
-
-/**
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
@@ -50,15 +37,4 @@ enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
-/**
-* @brief ecore_cxt_get_tid_mem_info
-*
-* @param p_hwfn
-* @param p_info
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info);
-
#endif
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 8175619a..4f1b0698 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -13,6 +13,7 @@
#include "ecore_cxt.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
+#include "ecore_iov_api.h"
#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
#define ECORE_ETH_TYPE_DEFAULT (0)
@@ -27,14 +28,25 @@
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_ETHTYPE) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
}
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_PORT) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
@@ -45,100 +57,67 @@ static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
return ecore_dcbx_app_port(app_info_bitmap);
- return (mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT) ?
- true : false;
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
-static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == ECORE_ETH_TYPE_DEFAULT) ? true : false;
-}
+ bool ethtype;
-static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_DISABLED) ? false : true;
-}
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
-static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_CEE) ? true : false;
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
}
-static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
+static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
+ u16 proto_id, bool ieee)
{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_IEEE) ? true : false;
-}
+ bool port;
-static bool ecore_dcbx_local(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_STATIC) ? true : false;
-}
+ if (!p_hwfn->p_dcbx_info->iwarp_port)
+ return false;
-/* @@@TBD A0 Eagle workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool set_to_pfc)
-{
- if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- return;
-
- ecore_wr(p_hwfn, p_ptt,
- YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */ +
- YSTORM_FLOW_CONTROL_MODE_OFFSET,
- set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
- EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port));
}
static void
ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_results *p_data)
{
- struct ecore_hw_info *p_info = &p_hwfn->hw_info;
enum dcbx_protocol_type id;
- u8 prio, tc, size, update;
- bool enable;
- const char *name; /* @DPDK */
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
- name = ecore_dcbx_app_update[i].name;
-
- enable = p_data->arr[id].enable;
- update = p_data->arr[id].update;
- tc = p_data->arr[id].tc;
- prio = p_data->arr[id].priority;
- DP_INFO(p_hwfn,
- "%s info: update %d, enable %d, prio %d, tc %d,"
- " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
- name, update, enable, prio, tc, p_info->num_active_tc,
- p_data->arr[id].dscp_enable, p_data->arr[id].dscp_val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name,
+ p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
+ p_data->arr[id].dscp_enable,
+ p_data->arr[id].dscp_val);
}
}
static void
-ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
- u8 tc, enum ecore_pci_personality personality)
-{
- /* QM reconf data */
- if (p_info->personality == personality)
- p_info->offload_tc = tc;
-}
-
-void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
@@ -164,27 +143,26 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
else if (enable)
p_data->arr[type].update = UPDATE_DCB;
else
- p_data->arr[type].update = DONT_UPDATE_DCB_DHCP;
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- ecore_dcbx_set_pf_tcs(&p_hwfn->hw_info, tc, personality);
+ /* QM reconf data */
+ if (p_hwfn->hw_info.personality == personality)
+ p_hwfn->hw_info.offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
const char *name; /* @DPDK */
- u8 size;
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
if (type != id)
@@ -193,7 +171,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
name = ecore_dcbx_app_update[i].name;
- ecore_dcbx_set_params(p_data, p_hwfn, enable, update,
+ ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
}
}
@@ -232,20 +210,18 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
u32 app_prio_bitmap, u16 id,
enum dcbx_protocol_type *type, bool ieee)
{
- bool status = false;
-
- if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) {
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- status = true;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
DP_ERR(p_hwfn,
"No action required, App TLV id = 0x%x"
" app_prio_bitmap = 0x%x\n",
id, app_prio_bitmap);
+ return false;
}
- return status;
+ return true;
}
/* Parse app TLV's to update TC information in hw_info structure for
@@ -257,14 +233,17 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u8 tc, priority, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
+ u8 priority;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n",
+ count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
@@ -273,10 +252,12 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
DCBX_APP_PROTOCOL_ID);
priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PRI_MAP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
+ protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
if (rc == ECORE_INVAL) {
DP_ERR(p_hwfn, "Invalid priority\n");
- return rc;
+ return ECORE_INVAL;
}
tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
@@ -289,9 +270,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = (type == DCBX_PROTOCOL_ETH ? false : true);
+ enable = !(type == DCBX_PROTOCOL_ETH);
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
@@ -308,7 +289,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
@@ -350,6 +331,7 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -371,6 +353,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
do {
if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
@@ -403,21 +388,20 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_app_prio *p_prio,
struct ecore_dcbx_results *p_results)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 val;
if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
- p_results->arr[DCBX_PROTOCOL_ETH].enable) {
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "Priority: eth %d\n", p_prio->eth);
- }
- return rc;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priorities: eth %d\n",
+ p_prio->eth);
}
static void
@@ -508,8 +492,9 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -528,10 +513,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x"
- " max_ets_tc %d\n",
- p_params->ets_willing, p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
@@ -540,7 +525,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
- pri_map = OSAL_BE32_TO_CPU(p_ets->pri_tc_tbl[0]);
+ pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -552,7 +537,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
}
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
@@ -563,60 +548,35 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_admin_params *p_local;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_local = &params->local;
- p_data = &p_local->params;
- p_app = &p_hwfn->p_dcbx_info->local_admin.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
- pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
-
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
- false);
- p_local->valid = true;
+ struct dcbx_features *p_feat;
- return ECORE_SUCCESS;
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_remote_params *p_remote;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_remote = &params->remote;
- p_data = &p_remote->params;
- p_app = &p_hwfn->p_dcbx_info->remote.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
- pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
+ struct dcbx_features *p_feat;
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params,
false);
- p_remote->valid = true;
-
- return ECORE_SUCCESS;
+ params->remote.valid = true;
}
static enum _ecore_status_t
@@ -625,14 +585,11 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
struct ecore_dcbx_results *p_results;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
+ struct dcbx_features *p_feat;
bool enabled, err;
- u32 pfc, flags;
+ u32 flags;
+ bool val;
flags = p_hwfn->p_dcbx_info->operational.flags;
@@ -640,42 +597,50 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
- enabled = ecore_dcbx_enabled(flags);
+ enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
return ECORE_INVAL;
}
- p_data = &p_operational->params;
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
- p_app = &p_hwfn->p_dcbx_info->operational.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
- pfc = p_hwfn->p_dcbx_info->operational.features.pfc;
- p_operational->ieee = ecore_dcbx_ieee(flags);
- p_operational->cee = ecore_dcbx_cee(flags);
- p_operational->local = ecore_dcbx_local(flags);
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"Version support: ieee %d, cee %d, static %d\n",
p_operational->ieee, p_operational->cee,
p_operational->local);
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
- err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
- return rc;
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
@@ -700,62 +665,46 @@ ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
(j * 4)) & 0xf;
}
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_local *p_local;
- osal_size_t size;
- u32 *dest;
-
- p_local = &params->lldp_local;
-
- size = OSAL_ARRAY_SIZE(p_local->local_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id;
- OSAL_MEMCPY(dest, p_local->local_chassis_id, size);
+ struct lldp_config_params_s *p_local;
- size = OSAL_ARRAY_SIZE(p_local->local_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id;
- OSAL_MEMCPY(dest, p_local->local_port_id, size);
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_local.local_chassis_id,
+ p_local->local_chassis_id,
+ OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
+ OSAL_ARRAY_SIZE(p_local->local_port_id));
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_remote *p_remote;
- osal_size_t size;
- u32 *dest;
-
- p_remote = &params->lldp_remote;
+ struct lldp_status_params_s *p_remote;
- size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id;
- OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size);
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
- size = OSAL_ARRAY_SIZE(p_remote->peer_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id;
- OSAL_MEMCPY(dest, p_remote->peer_port_id, size);
-
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
+ p_remote->peer_chassis_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_port_id));
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, enum ecore_mib_read_type type)
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *p_params,
+ enum ecore_mib_read_type type)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_get *p_params;
-
- p_params = &p_hwfn->p_dcbx_info->get;
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
@@ -768,10 +717,10 @@ ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -785,9 +734,10 @@ static enum _ecore_status_t
ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
lldp_config_params);
data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
@@ -802,8 +752,8 @@ ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
@@ -857,6 +807,7 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, local_admin_dcbx_mib);
data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
@@ -883,7 +834,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_INVAL;
switch (type) {
case ECORE_DCBX_OPERATIONAL_MIB:
@@ -904,7 +855,6 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
- return ECORE_INVAL;
}
return rc;
@@ -945,10 +895,9 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* according to negotiation results
*/
enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
@@ -964,17 +913,18 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_info));
+ sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_dcbx_info'");
- rc = ECORE_NOMEM;
+ return ECORE_NOMEM;
}
- return rc;
+ p_hwfn->p_dcbx_info->iwarp_port =
+ p_hwfn->pf_params.rdma_pf_params.iwarp_port;
+
+ return ECORE_SUCCESS;
}
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
@@ -999,24 +949,31 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
+ u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
+ p_dest->update_iwarp_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->eth_dcb_data;
ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+ p_dcb_data = &p_dest->iwarp_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
}
-static
-enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
- enum ecore_mib_read_type type)
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
{
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
rc = ECORE_TIMEOUT;
@@ -1028,30 +985,13 @@ enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
-enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_get *p_get,
- enum ecore_mib_read_type type)
-{
- enum _ecore_status_t rc;
-
- rc = ecore_dcbx_query(p_hwfn, type);
- if (rc)
- return rc;
-
- if (p_get != OSAL_NULL)
- OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get,
- sizeof(struct ecore_dcbx_get));
-
- return rc;
-}
-
static void
ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
u32 *pfc, struct ecore_dcbx_params *p_params)
@@ -1074,8 +1014,8 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
- pfc_map |= (0x1 << i);
-
+ pfc_map |= (1 << i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1087,6 +1027,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_params *p_params)
{
u8 *bw_map, *tsa_map;
+ u32 val;
int i;
if (p_params->ets_willing)
@@ -1113,14 +1054,22 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
bw_map[i] = p_params->ets_tc_bw_tbl[i];
tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
- p_ets->pri_tc_tbl[0] |= (((u32)p_params->ets_pri_tc_tbl[i]) <<
- ((7 - i) * 4));
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
}
- p_ets->pri_tc_tbl[0] = OSAL_CPU_TO_BE32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
+ p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
+ p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
+ p_ets->tc_tsa_tbl[1]);
}
static void
@@ -1147,24 +1096,33 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
if (ieee) {
- *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT;
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
}
} else {
@@ -1183,6 +1141,8 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
*entry |= ((u32)(p_params->app_entry[i].prio) <<
DCBX_APP_PRI_MAP_SHIFT);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
static enum _ecore_status_t
@@ -1195,7 +1155,7 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
local_admin->flags = 0;
OSAL_MEMCPY(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
- sizeof(struct dcbx_features));
+ sizeof(local_admin->features));
if (params->enabled) {
local_admin->config = params->ver_num;
@@ -1230,10 +1190,9 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
sizeof(*p_dscp_map));
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
if (p_params->dscp.enabled)
p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
- else
- p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
for (i = 0, entry = 0; i < 8; i++) {
val = 0;
@@ -1246,6 +1205,8 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->dscp_nig_update = true;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+
return ECORE_SUCCESS;
}
@@ -1254,15 +1215,15 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params,
bool hw_commit)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_mib_meta_data data;
struct dcbx_local_params local_admin;
+ struct ecore_dcbx_mib_meta_data data;
struct dcb_dscp_map dscp_map;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!hw_commit) {
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
- sizeof(struct ecore_dcbx_set));
+ sizeof(p_hwfn->p_dcbx_info->set));
return ECORE_SUCCESS;
}
@@ -1315,12 +1276,13 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
}
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_get));
+ sizeof(*dcbx_info));
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
return ECORE_NOMEM;
}
+ OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
ECORE_DCBX_OPERATIONAL_MIB);
if (rc) {
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 15186246..eba2d91b 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,9 +17,6 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
-#define ECORE_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -32,6 +29,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_set set;
struct ecore_dcbx_get get;
u8 dcbx_cap;
+ u16 iwarp_port;
};
struct ecore_dcbx_mib_meta_data {
@@ -56,10 +54,4 @@ void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
-#ifndef REAL_ASIC_ONLY
-/* @@@TBD eagle phy workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
- bool set_to_pfc);
-#endif
-
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 82416e7f..2dc76796 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -37,6 +37,7 @@ enum dcbx_protocol_type {
DCBX_PROTOCOL_ROCE,
DCBX_PROTOCOL_ROCE_V2,
DCBX_PROTOCOL_ETH,
+ DCBX_PROTOCOL_IWARP,
DCBX_MAX_PROTOCOL_TYPE
};
@@ -147,6 +148,7 @@ struct ecore_dcbx_get {
#define ECORE_DCBX_VERSION_DISABLED 0
#define ECORE_DCBX_VERSION_IEEE 1
#define ECORE_DCBX_VERSION_CEE 2
+#define ECORE_DCBX_VERSION_DYNAMIC 3
struct ecore_dcbx_set {
#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
@@ -190,7 +192,8 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
{DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
- {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
+ {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
#endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 6060f9ee..865103c6 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -30,6 +30,7 @@
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
+#include "ecore_l2.h"
/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
* registers involved are not split and thus configuration is a race where
@@ -70,28 +71,26 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
}
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
/* The above registers were updated in the past only in CMT mode. Since
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (!val) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 256kB for GRC and 512kB for DB\n");
- return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
- } else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 512kB for GRC and 512kB for DB\n");
- return 512 * 1024;
- }
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 256kB"
+ " for GRC and 512kB for DB\n");
+ val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 512kB"
+ " for GRC and 512kB for DB\n");
+ val = 512 * 1024;
}
- return 1 << (val + 15);
+ return val;
}
void ecore_init_dp(struct ecore_dev *p_dev,
@@ -138,335 +137,620 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
- qm_info->qm_pq_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
- qm_info->qm_vport_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
- qm_info->qm_port_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
- qm_info->wfq_data = OSAL_NULL;
}
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_free(&p_dev->hwfns[i]);
return;
+ }
OSAL_FREE(p_dev, p_dev->fw_data);
- p_dev->fw_data = OSAL_NULL;
OSAL_FREE(p_dev, p_dev->reset_stats);
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
- p_hwfn->p_tx_cids = OSAL_NULL;
- OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
- p_hwfn->p_rx_cids = OSAL_NULL;
- }
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
ecore_cxt_mngr_free(p_hwfn);
ecore_qm_info_free(p_hwfn);
ecore_spq_free(p_hwfn);
- ecore_eq_free(p_hwfn, p_hwfn->p_eq);
- ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_free(p_hwfn);
+ ecore_consq_free(p_hwfn);
ecore_int_free(p_hwfn);
-#ifdef CONFIG_ECORE_LL2
- ecore_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
-#endif
ecore_iov_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
/* @@@TBD Flush work-queue ? */
}
}
-static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
- bool b_sleepable)
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue;
- struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, protocol_pqs;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
- u32 pf_rl;
- u8 pf_wfq;
-
- /* @TMP - saving the existing min/max bw config before resetting the
- * qm_info to restore them.
- */
- pf_rl = qm_info->pf_rl;
- pf_wfq = qm_info->pf_wfq;
+ u32 flags;
-#ifdef CONFIG_ECORE_SRIOV
- if (p_hwfn->p_dev->p_iov_info)
- num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-#endif
- OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+ /* common flags */
+ flags = PQ_FLAGS_LB;
-#ifndef ASIC_ONLY
- /* @TMP - Don't allocate QM queues for VFs on emulation */
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false,
- "Emulation - skip configuring QM queues for VFs\n");
- num_vfs = 0;
+ /* feature flags */
+ if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ flags |= PQ_FLAGS_VFS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
+ default:
+ DP_ERR(p_hwfn, "unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ return 0;
}
-#endif
+ return flags;
+}
- /* ethernet PFs require a pq per tc. Even if only a subset of the TCs
- * active, we want physical queues allocated for all of them, since we
- * don't have a good recycle flow. Non ethernet PFs require only a
- * single physical queue.
- */
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH)
- protocol_pqs = p_hwfn->hw_info.num_hw_tc;
- else
- protocol_pqs = 1;
-
- num_pqs = protocol_pqs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) {
- /* Due to FW assumption that rl==vport, we limit the
- * number of rate limiters by the minimum between its
- * allocated number and the allocated number of vports.
- * Another limitation is the number of supported qps
- * with rate limiters in FW.
- */
- num_pf_rls =
- (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- RESC_NUM(p_hwfn, ECORE_VPORT));
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* we subtract num_vfs because each one requires a rate
- * limiter, and one default rate limiter.
- */
- if (num_pf_rls < num_vfs + 1) {
- DP_ERR(p_hwfn, "No RL for DCQCN");
- DP_ERR(p_hwfn, "[num_pf_rls %d num_vfs %d]\n",
- num_pf_rls, num_vfs);
- return ECORE_INVAL;
- }
- num_pf_rls -= num_vfs + 1;
- }
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8)num_pf_rls;
- }
+#define NUM_DEFAULT_RLS 1
- if (p_hwfn->hw_info.personality == ECORE_PCI_IWARP) {
- num_pqs += 3; /* for iwarp queue / pure-ack / ooo */
- init_rdma_offload_pq = true;
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ /* @DPDK */
+ /* num RLs can't exceed resource amount of rls or vports or the
+ * dcqcn qps
+ */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x avail %04x",
- num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
- return ECORE_INVAL;
+ /* make sure after we reserve the default and VF rls we'll have
+ * something left
+ */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+ DP_NOTICE(p_hwfn, false,
+ "no rate limiters left for PF rate limiting"
+ " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ return 0;
}
- /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
- * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport (hence the 1 below), except for vfs
+ * and pf_rl pqs
*/
- qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_pq_params) *
- num_pqs);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct
- init_qm_vport_params) *
- num_vports);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ ecore_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) +
+ (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn);
+}
- qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_port_params)
- * MAX_NUM_PORTS);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
- qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct ecore_wfq_data) *
- num_vports);
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
- vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
- /* First init rate limited queues ( Due to RoCE assumption of
- * qpid=rlid )
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
+ * 4 otherwise
*/
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- };
-
- /* Protocol PQs */
- for (i = 0; i < protocol_pqs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = i;
- /* Note: this assumes that if we had a configuration
- * with N tcs and subsequently another configuration
- * With Fewer TCs, the in flight traffic (in QM queues,
- * in FW, from driver to FW) will still trickle out and
- * not get "stuck" in the QM. This is determined by the
- * NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ. Unused TCs are
- * supposed to be cleared in this map, allowing traffic
- * to flush out. If this is not the case, we would need
- * to set the TC of unused queues to 0, and reconfigure
- * QM every time num of TCs changes. Unused queues in
- * this context would mean those intended for TCs where
- * tc_id > hw_info.num_active_tcs.
- */
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- }
- }
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8)RESC_START(p_hwfn, ECORE_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0; /* Already initialized for iSCSI/FCoE */
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- /* @@@TBD VF Multi-cos */
- qm_info->qm_pq_params[curr_queue].tc_id = 0;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- };
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
- * be in place
- */
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0xf;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
- if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
- qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
- else
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq =
+ ecore_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL ||
+ pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+ " qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls,
+ ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+ sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+ PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+ PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ ecore_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ ecore_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ ecore_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ ecore_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ ecore_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ ecore_init_qm_offload_pq(p_hwfn);
+
+ /* done sharing vports */
+ ecore_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_init_qm_get_num_vports(p_hwfn) >
+ RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return ECORE_INVAL;
+ }
return ECORE_SUCCESS;
+}
- alloc_err:
- DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
- ecore_qm_info_free(p_hwfn);
- return ECORE_NOMEM;
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d,"
+ " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+ " num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->num_vports,
+ qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+ " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+ qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+ qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ port = &qm_info->qm_port_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d,"
+ " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+ " reserved %d\n",
+ i, port->active, port->active_phys_tcs,
+ port->num_pbf_cmd_lines, port->num_btb_blocks,
+ port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &qm_info->qm_vport_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d,"
+ " first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->vport_rl,
+ vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+ vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &qm_info->qm_pq_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
+ " rl_valid %d\n",
+ qm_info->start_pq + i, pq->vport_id, pq->tc_id,
+ pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ ecore_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ ecore_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ ecore_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ ecore_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ ecore_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ ecore_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -478,17 +762,8 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
bool b_rc;
enum _ecore_status_t rc;
- /* qm_info is allocated in ecore_init_qm_info() which is already called
- * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- ecore_qm_info_free(p_hwfn);
-
/* initialize ecore's qm data structure */
- rc = ecore_init_qm_info(p_hwfn, false);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_init_qm_info(p_hwfn);
/* stop PF's qm queues */
OSAL_SPIN_LOCK(&qm_lock);
@@ -521,51 +796,67 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+
+ rc = ecore_init_qm_sanity(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_pq_params) *
+ ecore_init_qm_get_num_pqs(p_hwfn));
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_vport_params) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engines);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_wfq_data) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
{
- struct ecore_consq *p_consq;
- struct ecore_eq *p_eq;
-#ifdef CONFIG_ECORE_LL2
- struct ecore_ll2_info *p_ll2_info;
-#endif
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i) {
+ rc = ecore_l2_alloc(&p_dev->hwfns[i]);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
return rc;
+ }
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->fw_data));
if (!p_dev->fw_data)
return ECORE_NOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- /* @@@TMP - resc management, change to actual required size */
- int tx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
- int rx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
- p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- tx_size);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Tx Cids\n");
- goto alloc_no_mem;
- }
-
- p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- rx_size);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Rx Cids\n");
- goto alloc_no_mem;
- }
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
@@ -582,11 +873,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = ecore_init_qm_info(p_hwfn, true);
+ rc = ecore_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ ecore_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
if (rc)
@@ -618,8 +911,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* EQ */
n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
- if ((p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) ||
- (p_hwfn->hw_info.personality == ECORE_PCI_IWARP)) {
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Calculate the EQ size
* ---------------------
* Each ICID may generate up to one event at a time i.e.
@@ -629,37 +921,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
* worst case:
* - Core - according to SPQ.
* - RoCE - per QP there are a couple of ICIDs, one
- * responder and one requester, each can
- * generate an EQE => n_eqes_qp = 2 * n_qp.
- * Each CQ can generate an EQE. There are 2 CQs
- * per QP => n_eqes_cq = 2 * n_qp.
- * Hence the RoCE total is 4 * n_qp or
- * 2 * num_cons.
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
* - ENet - There can be up to two events per VF. One
- * for VF-PF channel and another for VF FLR
- * initial cleanup. The number of VFs is
- * bounded by MAX_NUM_VFS_BB, and is much
- * smaller than RoCE's so we avoid exact
- * calculation.
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
*/
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
num_cons =
ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_ROCE,
- 0);
+ OSAL_NULL);
num_cons *= 2;
} else {
num_cons = ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_IWARP,
- 0);
+ OSAL_NULL);
}
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
num_cons =
ecore_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ OSAL_NULL);
n_eqes += 2 * num_cons;
}
@@ -667,33 +960,27 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
"The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = ecore_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
-
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2) {
- p_ll2_info = ecore_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
- }
-#endif
+ rc = ecore_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info structure\n");
+ "Failed to allocate memory for dmae_info"
+ " structure\n");
goto alloc_err;
}
@@ -707,7 +994,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
- sizeof(struct ecore_eth_stats));
+ sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
@@ -715,9 +1002,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
- alloc_no_mem:
+alloc_no_mem:
rc = ECORE_NOMEM;
- alloc_err:
+alloc_err:
ecore_resc_free(p_dev);
return rc;
}
@@ -726,16 +1013,19 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_setup(&p_dev->hwfns[i]);
return;
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
ecore_cxt_mngr_setup(p_hwfn);
ecore_spq_setup(p_hwfn);
- ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
- ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_setup(p_hwfn);
+ ecore_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -745,11 +1035,8 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_l2_setup(p_hwfn);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2)
- ecore_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
-#endif
}
}
@@ -794,10 +1081,9 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Sending final cleanup for PFVF[%d] [Command %08x\n]",
- id, OSAL_CPU_TO_LE32(command));
+ id, command);
- ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
- OSAL_CPU_TO_LE32(command));
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
/* Poll until completion */
while (!REG_RD(p_hwfn, addr) && count--)
@@ -819,10 +1105,8 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
{
int hw_mode = 0;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_A0;
- } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_B0;
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
} else {
@@ -877,11 +1161,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
-#endif
-
if (p_hwfn->p_dev->num_hwfns > 1)
hw_mode |= 1 << MODE_100G;
@@ -899,29 +1178,36 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 pl_hv = 1;
int i;
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- pl_hv |= 0x600;
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev))
+ pl_hv |= 0x600;
+ }
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+ if (CHIP_REV_IS_EMUL(p_dev) &&
+ (ECORE_IS_AH(p_dev)))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
- if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+ if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
- /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_hwfn->p_dev->num_ports_in_engines >> 1));
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_dev->num_ports_in_engines >> 1));
- ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ }
}
/* Poll on RBC */
@@ -987,7 +1273,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1002,7 +1288,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
}
ecore_qm_common_rt_init(p_hwfn,
- p_hwfn->p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engines,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1010,18 +1296,6 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_unpretend(p_hwfn, p_ptt);
-
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1032,11 +1306,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (ECORE_IS_BB(p_dev)) {
/* Workaround clears ROCE search for all functions to prevent
* involving non initialized function in processing ROCE packet.
*/
- num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+ num_pfs = NUM_OF_ENG_PFS(p_dev);
for (pf_id = 0; pf_id < num_pfs; pf_id++) {
ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
@@ -1052,8 +1326,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
* This is not done inside the init tool since it currently can't
* perform a pretending to VFs.
*/
- max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
- : MAX_NUM_VFS_BB;
+ max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
@@ -1080,20 +1353,19 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
{
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
- ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
(8 << PMEG_IF_BYTE_COUNT),
(reg_type << 25) | (addr << 8) | port,
(u32)((data >> 32) & 0xffffffff),
(u32)(data & 0xffffffff));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
- (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
(reg_type << 25) | (addr << 8) | port);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
- data & 0xffffffff);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
(data >> 32) & 0xffffffff);
}
@@ -1109,48 +1381,13 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
#define XLMAC_PAUSE_CTRL (0x60d)
#define XLMAC_PFC_CTRL (0x60e)
-static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u8 port = p_hwfn->port_id;
- u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
-
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
- (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
- (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
- | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
- 1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
- 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
- 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
- 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
- (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
- (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
-}
-
-static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
u8 loopback = 0, port = p_hwfn->port_id * 2;
DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
- if (ECORE_IS_AH(p_hwfn->p_dev)) {
- ecore_emul_link_init_ah(p_hwfn, p_ptt);
- return;
- }
-
/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
port);
@@ -1179,8 +1416,53 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
}
-static void ecore_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 port)
+static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ (port <<
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ (0xA <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ (8 <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
+ 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
+ else /* BB */
+ ecore_emul_link_init_bb(p_hwfn, p_ptt);
+}
+
+static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
{
int port_offset = port ? 0x800 : 0;
u32 xmac_rxctrl = 0;
@@ -1193,10 +1475,10 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
/* Set the number of ports on the Warp Core to 10G */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
/* Soft reset of XMAC */
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
@@ -1207,70 +1489,24 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
/* FIXME: move to common end */
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
/* Set Max packet size: initialize XMAC block register for port 0 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
/* CRC append for Tx packets: init XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
/* Enable TX and RX: initialize XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
- XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
- xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
- xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
+ XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
+ XMAC_REG_RX_CTRL_BB + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
}
#endif
-static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- int hw_mode)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
- if (rc != ECORE_SUCCESS)
- return rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
- return ECORE_SUCCESS;
-
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- if (ECORE_IS_AH(p_hwfn->p_dev))
- return ECORE_SUCCESS;
- ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
- } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- /* Activate OPTE in CMT */
- u32 val;
-
- val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
- val |= 0x10;
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
- 0x55555555);
- }
-
- ecore_emul_link_init(p_hwfn, p_ptt);
- } else {
- DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
- }
-#endif
-
- return rc;
-}
-
static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
@@ -1339,7 +1575,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
u32 db_bar_size, n_cpus;
u32 roce_edpm_mode;
u32 pf_dems_shift;
- int rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
@@ -1394,8 +1630,9 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- cond = ((rc) && (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
- (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ cond = ((rc != ECORE_SUCCESS) &&
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
if (cond || p_hwfn->dcbx_no_edpm) {
/* Either EDPM is disabled from user configuration, or it is
* disabled via DCBx, or it is not mandatory and we failed to
@@ -1419,7 +1656,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
"disabled" : "enabled");
/* Check return codes from above calls */
- if (rc) {
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
"Failed to allocate enough DPIs\n");
return ECORE_NORESOURCES;
@@ -1437,10 +1674,58 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ else if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
@@ -1532,7 +1817,9 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
if (b_hw_start) {
/* enable interrupts */
- ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* send function start command */
rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
@@ -1618,14 +1905,31 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
- enum _ecore_status_t rc, mfw_rc;
- u32 load_code, param;
- int i, j;
+ struct ecore_load_req_params load_req_params;
+ u32 load_code, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ int i;
- if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
+ (p_dev->num_hwfns > 1)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
@@ -1640,8 +1944,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
if (IS_VF(p_dev)) {
- p_hwfn->b_int_enabled = 1;
+ ecore_vf_start(p_hwfn, p_params);
continue;
}
@@ -1654,33 +1964,37 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- /* @@@TBD need to add here:
- * Check for fan failure
- * Prev_unload
- */
- rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
- if (rc) {
+ OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
+ load_req_params.drv_role = p_params->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ load_req_params.timeout_val = p_params->mfw_timeout_val;
+ load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_REQ command\n");
+ "Failed sending a LOAD_REQ command\n");
return rc;
}
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
* out that these registers get initialized during the call to
* ecore_mcp_load_req request. So we need to reread them here
* to get the proper shadow register value.
- * Note: This is a workaround for the missinginig MFW
+ * Note: This is a workaround for the missing MFW
* initialization. It may be removed once the implementation
* is done.
*/
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
-
/* Only relevant for recovery:
* Clear the indication after the LOAD_REQ command is responded
* by the MFW.
@@ -1699,33 +2013,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
-
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
- struct init_nig_pri_tc_map_req tc_map;
-
- OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
-
- /* remove this once flow control is
- * implemented
- */
- for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
- tc_map.pri[j].tc_id = 0;
- tc_map.pri[j].valid = 1;
- }
- ecore_init_nig_pri_tc_map(p_hwfn,
- p_hwfn->p_main_ptt,
- &tc_map);
- }
-#endif
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
@@ -1736,6 +2031,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected load code [0x%08x]", load_code);
rc = ECORE_NOTIMPL;
break;
}
@@ -1751,16 +2048,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
0, &load_code, &param);
if (rc != ECORE_SUCCESS)
return rc;
+
if (mfw_rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_DONE command\n");
+ "Failed sending a LOAD_DONE command\n");
return mfw_rc;
}
- ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt);
- ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
- p_params->epoch);
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
@@ -1777,7 +2071,29 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->hw_init_done = true;
}
- return ECORE_SUCCESS;
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ drv_mb_param = STORM_FW_VERSION;
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu)
+ rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+
+ rc = ecore_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_DISABLED);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+ }
+
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -1802,13 +2118,14 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
*/
OSAL_MSLEEP(1);
}
- if (i == ECORE_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn, true,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ if (i < ECORE_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
+ " [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
@@ -1823,32 +2140,77 @@ void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
}
}
+static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 expected_val)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (val != expected_val) {
+ DP_NOTICE(p_hwfn, true,
+ "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
+ addr, val, expected_val);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
{
- enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
int j;
for_each_hwfn(p_dev, j) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &p_dev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+
rc = ecore_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
/* perform debug action after PF stop was sent */
- OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
/* close NIG to BRB gate */
ecore_wr(p_hwfn, p_ptt,
@@ -1875,20 +2237,48 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
- }
+
+ if (!p_dev->recov_in_prog) {
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+ } /* hwfn loop */
if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
- p_dev->hwfns[0].p_main_ptt, false);
- if (t_rc != ECORE_SUCCESS)
- rc = t_rc;
+ rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_change_pci_hwfn failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
}
- return rc;
+ return rc2;
}
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
@@ -1949,84 +2339,6 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 reg,
- bool expected)
-{
- u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return ECORE_UNKNOWN_ERROR;
- }
-
- return 0;
-}
-
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 unload_resp, unload_param;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (IS_VF(p_dev)) {
- rc = ecore_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- if (!p_dev->recov_in_prog) {
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
- /* @@@TBD - assert on incorrect xCFC values (10.b) */
- }
-
- /* Disable PF in HW blocks */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
-
- if (p_dev->recov_in_prog) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Recovery is in progress -> skip sending unload_req/done\n");
- break;
- }
-
- /* Send unload command to MCP */
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ,
- DRV_MB_PARAM_UNLOAD_WOL_MCP,
- &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "ecore_hw_reset: UNLOAD_REQ failed\n");
- /* @@TBD - what to do? for now, assume ENG. */
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
-
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn,
- true, "ecore_hw_reset: UNLOAD_DONE failed\n");
- /* @@@TBD - Should it really ASSERT here ? */
- return rc;
- }
- }
-
- return rc;
-}
-
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
{
@@ -2040,22 +2352,22 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
/* clear indirect access */
if (ECORE_IS_AH(p_hwfn->p_dev)) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_E8_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_EC_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F0_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F4_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
} else {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
/* Clean Previous errors if such exist */
@@ -2090,6 +2402,7 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct ecore_sb_cnt_info sb_cnt_info;
int num_features = 1;
/* L2 Queues require each: 1 status block. 1 L2 queue */
@@ -2098,145 +2411,226 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
RESC_NUM(p_hwfn, ECORE_SB) / num_features,
RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ sb_cnt_info.sb_iov_cnt);
+
+ feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
- feat_num[ECORE_PF_L2_QUE],
- feat_num[ECORE_RDMA_CNQ],
- RESC_NUM(p_hwfn, ECORE_SB), num_features);
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
+ RESC_NUM(p_hwfn, ECORE_SB));
}
-static enum resource_id_enum
-ecore_hw_get_mfw_res_id(enum ecore_resources res_id)
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
case ECORE_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
+ return "SB";
case ECORE_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case ECORE_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case ECORE_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case ECORE_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case ECORE_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case ECORE_MAC:
+ return "MAC";
case ECORE_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case ECORE_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case ECORE_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case ECORE_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case ECORE_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case ECORE_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case ECORE_BDQ:
+ return "BDQ";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ enum _ecore_status_t rc;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ /* @DPDK */
+ switch (res_id) {
+ case ECORE_LL2_QUEUE:
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_RDMA_STATS_QUEUE:
+ case ECORE_BDQ:
+ resc_max_val = 0;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return ECORE_NOTIMPL;
}
- return mfw_res_id;
+ return ECORE_SUCCESS;
}
-static u32 ecore_hw_get_dflt_resc_num(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id)
+static
+enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
struct ecore_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
case ECORE_SB:
OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
+ *p_resc_num = sb_cnt_info.sb_cnt;
break;
case ECORE_L2_QUEUE:
- dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case ECORE_VPORT:
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
case ECORE_RSS_ENG:
- dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case ECORE_PQ:
- dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
MAX_QM_TX_QUEUES_BB) / num_funcs;
break;
case ECORE_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case ECORE_MAC:
case ECORE_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case ECORE_ILT:
- dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case ECORE_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
/* @DPDK */
- dflt_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
/* @DPDK */
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
+ case ECORE_BDQ:
+ /* @DPDK */
+ *p_resc_num = 0;
+ break;
default:
break;
}
- return dflt_resc_num;
+
+ switch (res_id) {
+ case ECORE_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id,
- bool drv_resc_alloc)
+static enum _ecore_status_t
+__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
+ bool drv_resc_alloc)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
enum _ecore_status_t rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- dflt_resc_num = ecore_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
- DP_ERR(p_hwfn, "Failed to get default amount for resource %d\n",
- res_id);
- return ECORE_INVAL;
+ rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
}
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -2246,21 +2640,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
}
#endif
- OSAL_MEM_ZERO(&resc_info, sizeof(resc_info));
- resc_info.res_id = ecore_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d with MFW resources\n",
- res_id);
- return ECORE_INVAL;
- }
-
- rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "MFW resp failure for a resc alloc req [res_id %d]\n",
- res_id);
+ "MFW response failure for an allocation request for"
+ " resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
return rc;
}
@@ -2269,15 +2655,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- /* @DPDK */
- DP_INFO(p_hwfn,
- "No allocation info for resc %d [mcp_resp 0x%x].",
- res_id, mcp_resp);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
DP_INFO(p_hwfn,
- "Applying default values [num %d, start %d].\n",
- dflt_resc_num, dflt_resc_start);
+ "Failed to receive allocation info for resource %d [%s]."
+ " mcp_resp = 0x%x. Applying default values"
+ " [%d,%d].\n",
+ res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
+ dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
@@ -2287,71 +2671,51 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
/* TBD - remove this when revising the handling of the SB resource */
if (res_id == ECORE_SB) {
/* Excluding the slowpath SB */
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
- DP_NOTICE(p_hwfn, false,
- "Resource %d: MFW allocation [num %d, start %d]",
- res_id, *p_resc_num, *p_resc_start);
- DP_NOTICE(p_hwfn, false,
- "differs from default values [num %d, start %d]%s\n",
- dflt_resc_num,
- dflt_resc_start,
- drv_resc_alloc ? " - applying default values" : "");
+ DP_INFO(p_hwfn,
+ "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
+ *p_resc_start, dflt_resc_num, dflt_resc_start,
+ drv_resc_alloc ? " - Applying default values" : "");
if (drv_resc_alloc) {
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
}
}
- out:
+out:
return ECORE_SUCCESS;
}
-static const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
{
- switch (res_id) {
- case ECORE_SB:
- return "SB";
- case ECORE_L2_QUEUE:
- return "L2_QUEUE";
- case ECORE_VPORT:
- return "VPORT";
- case ECORE_RSS_ENG:
- return "RSS_ENG";
- case ECORE_PQ:
- return "PQ";
- case ECORE_RL:
- return "RL";
- case ECORE_MAC:
- return "MAC";
- case ECORE_VLAN:
- return "VLAN";
- case ECORE_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case ECORE_ILT:
- return "ILT";
- case ECORE_LL2_QUEUE:
- return "LL2_QUEUE";
- case ECORE_CMDQS_CQS:
- return "CMDQS_CQS";
- case ECORE_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
- default:
- return "UNKNOWN_RESOURCE";
+ enum _ecore_status_t rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
}
+
+ return ECORE_SUCCESS;
}
+#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
+#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
+
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
bool drv_resc_alloc)
{
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
- enum _ecore_status_t rc;
u8 res_id;
+ enum _ecore_status_t rc;
#ifndef ASIC_ONLY
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -2364,10 +2728,62 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
#endif
- for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
- rc = ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ * Old drivers that don't acquire the lock can run in parallel, and
+ * their allocation values won't be affected by the updated max values.
+ */
+ OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
+ resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+ resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
+ resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+ resc_lock_params.sleep_b4_retry = true;
+ OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
+ resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ return rc;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ rc = ECORE_BUSY;
+ goto unlock_and_exit;
+ } else {
+ rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
if (rc != ECORE_SUCCESS)
- return rc;
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
}
#ifndef ASIC_ONLY
@@ -2420,14 +2836,21 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
RESC_START(p_hwfn, res_id));
return ECORE_SUCCESS;
+
+unlock_and_exit:
+ ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ return rc;
}
-static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t
+ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_hw_prepare_params *p_params)
{
- u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct ecore_mcp_link_params *link;
+ enum _ecore_status_t rc;
/* Read global nvm_cfg address */
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -2435,6 +2858,8 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
/* Verify MCP has initialized it */
if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
return ECORE_INVAL;
}
@@ -2474,6 +2899,9 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
break;
@@ -2486,6 +2914,28 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
break;
}
+ /* Read DCBX configuration */
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ dcbx_mode = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, generic_cont0));
+ dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
+ >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ switch (dcbx_mode) {
+ case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_CEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_IEEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
+ break;
+ default:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
+ }
+
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -2595,7 +3045,13 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
&p_hwfn->hw_info.device_capabilities);
- return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
}
static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
@@ -2615,7 +3071,12 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
* In case of CMT in BB, only the "even" functions are enabled, and thus
* the number of functions for both hwfns is learnt from the same bits.
*/
- reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+ if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_FUNCTION_HIDE_BB_K2);
+ } else { /* E5 */
+ reg_function_hide = 0;
+ }
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
@@ -2681,8 +3142,7 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
port_mode = 1;
else
#endif
- port_mode = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
p_hwfn->p_dev->num_ports_in_engines = 1;
@@ -2697,8 +3157,8 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
}
}
-static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 port;
int i;
@@ -2727,7 +3187,8 @@ static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
#endif
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ CNIG_REG_NIG_PORT0_CONF_K2_E5 +
+ (i * 4));
if (port & 1)
p_hwfn->p_dev->num_ports_in_engines++;
}
@@ -2739,20 +3200,27 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
- ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+ ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
}
static enum _ecore_status_t
ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- enum ecore_pci_personality personality, bool drv_resc_alloc)
+ enum ecore_pci_personality personality,
+ struct ecore_hw_prepare_params *p_params)
{
+ bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_BAD_IOV;
+ else
+ return rc;
+ }
}
/* TODO In get_hw_info, amoungst others:
@@ -2765,13 +3233,22 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_hw_info_port_num(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
+#endif
+ rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ }
#endif
- ecore_hw_get_nvm_info(p_hwfn, p_ptt);
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
+ else
+ return rc;
+ }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
@@ -2795,11 +3272,14 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
- if (personality != ECORE_PCI_DEFAULT)
+ if (personality != ECORE_PCI_DEFAULT) {
p_hwfn->hw_info.personality = personality;
- else if (ecore_mcp_is_init(p_hwfn))
- p_hwfn->hw_info.personality =
- p_hwfn->mcp_info->func_info.protocol;
+ } else if (ecore_mcp_is_init(p_hwfn)) {
+ enum ecore_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
#ifndef ASIC_ONLY
/* To overcome ILT lack for emulation, until at least until we'll have
@@ -2826,18 +3306,23 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_get_num_funcs(p_hwfn, p_ptt);
+ if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
/* In case of forcing the driver's default resource allocation, calling
* ecore_hw_get_resc() should come after initializing the personality
* and after getting the number of functions, since the calculation of
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- return ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
-}
+ rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
-#define ECORE_DEV_ID_MASK 0xff00
-#define ECORE_DEV_ID_MASK_BB 0x1600
-#define ECORE_DEV_ID_MASK_AH 0x8000
+ return rc;
+}
static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
{
@@ -2892,9 +3377,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
- "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
- CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
@@ -2948,11 +3433,13 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
- void OSAL_IOMEM *p_doorbells,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * p_regview,
+ void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Split PCI bars evenly between hwfns */
@@ -2966,6 +3453,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
"Reading the ME register returns all Fs; Preventing further chip access\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
return ECORE_INVAL;
}
@@ -2975,6 +3464,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
}
@@ -2984,8 +3475,12 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = ecore_get_dev_info(p_dev);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_DEV;
goto err1;
+ }
}
ecore_hw_hwfn_prepare(p_hwfn);
@@ -2994,50 +3489,72 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
}
- if (p_hwfn == ECORE_LEADING_HWFN(p_dev) && !p_dev->recov_in_prog) {
- rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
- DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
- }
-
/* Read the device configuration information from the HW and SHMEM */
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
- p_params->personality, p_params->drv_resc_alloc);
+ p_params->personality, p_params);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
goto err2;
}
+ /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
+ * called, since among others it sets the ports number in an engine.
+ */
+ if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
+ /* Check if mdump logs are present and update the epoch value */
+ if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_info);
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ }
+
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
DP_NOTICE(p_hwfn, false,
"FPGA: workaround; Prevent DMAE parities\n");
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
+ 7);
DP_NOTICE(p_hwfn, false,
"FPGA: workaround: Set VF bar0 size\n");
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_VF_BAR0_SIZE, 4);
+ PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
}
#endif
return rc;
- err2:
+err2:
if (IS_LEAD_HWFN(p_hwfn))
ecore_iov_free_hw_info(p_dev);
ecore_mcp_free(p_hwfn);
- err1:
+err1:
ecore_hw_hwfn_free(p_hwfn);
- err0:
+err0:
return rc;
}
@@ -3049,6 +3566,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
+
/* Store the precompiled init data ptrs */
if (IS_PF(p_dev))
ecore_init_iro_array(p_dev);
@@ -3084,6 +3604,10 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
* initiliazed hwfn 0.
*/
if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_ENG2;
+
if (IS_PF(p_dev)) {
ecore_init_free(p_hwfn);
ecore_mcp_free(p_hwfn);
@@ -3096,13 +3620,18 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
}
}
- return ECORE_SUCCESS;
+ return rc;
}
void ecore_hw_remove(struct ecore_dev *p_dev)
{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
int i;
+ if (IS_PF(p_dev))
+ ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -3164,13 +3693,13 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
struct ecore_chain *p_chain)
{
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
- u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
if (!pp_virt_addr_tbl)
return;
- if (!p_chain->pbl.p_virt_table)
+ if (!p_pbl_virt)
goto out;
for (i = 0; i < page_cnt; i++) {
@@ -3185,8 +3714,10 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
}
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table, pbl_size);
+
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3271,8 +3802,8 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
static enum _ecore_status_t
ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
{
- void *p_virt = OSAL_NULL;
dma_addr_t p_phys = 0;
+ void *p_virt = OSAL_NULL;
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
@@ -3286,8 +3817,10 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
- struct ecore_chain *p_chain)
+static enum _ecore_status_t
+ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
void *p_virt = OSAL_NULL;
u8 *p_pbl_virt = OSAL_NULL;
@@ -3296,13 +3829,12 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
u32 page_cnt = p_chain->page_cnt, size, i;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+ pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
DP_NOTICE(p_dev, true,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
- OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -3311,7 +3843,15 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+
+ if (ext_pbl == OSAL_NULL) {
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
@@ -3349,7 +3889,8 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems, osal_size_t elem_size,
- struct ecore_chain *p_chain)
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -3380,7 +3921,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_single(p_dev, p_chain);
break;
case ECORE_CHAIN_MODE_PBL:
- rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
break;
}
if (rc)
@@ -3388,7 +3929,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
- nomem:
+nomem:
ecore_chain_free(p_dev, p_chain);
return rc;
}
@@ -3764,19 +4305,14 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
{
struct coalescing_timeset *p_coal_timeset;
- if (IS_VF(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
- return ECORE_INVAL;
- }
-
if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
DP_NOTICE(p_hwfn, true,
"Coalescing configuration not enabled\n");
return ECORE_INVAL;
}
- OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
p_coal_timeset = p_eth_qzone;
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
@@ -3784,15 +4320,55 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Configuring a single queue's coalescing but
+ * claiming all queues are abiding same configuration
+ * for PF and VF both.
+ */
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
+ tx_coal, p_cid);
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (rx_coal) {
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct ustorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3807,35 +4383,32 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
}
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, false);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc != ECORE_SUCCESS)
goto out;
- p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
out:
return rc;
}
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct xstorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3851,22 +4424,16 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, true);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- if (rc != ECORE_SUCCESS)
- goto out;
-
- p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
out:
return rc;
}
@@ -4292,3 +4859,16 @@ int ecore_device_num_ports(struct ecore_dev *p_dev)
return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
}
+
+void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 042c0af2..e64a768d 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -58,18 +58,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
void ecore_resc_setup(struct ecore_dev *p_dev);
struct ecore_hw_init_params {
- /* tunnelling parameters */
- struct ecore_tunn_start_params *p_tunn;
+ /* Tunnelling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
bool b_hw_start;
- /* interrupt mode [msix, inta, etc.] to use */
+
+ /* Interrupt mode [msix, inta, etc.] to use */
enum ecore_int_mode int_mode;
-/* npar tx switching to be used for vports configured for tx-switching */
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
bool allow_npar_tx_switch;
- /* binary fw data pointer in binary fw file */
+
+ /* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
- /* the OS Epoch time in seconds */
- u32 epoch;
+
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
};
/**
@@ -131,22 +151,47 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev);
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
-/**
- * @brief ecore_hw_reset -
- *
- * @param p_dev
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+enum ecore_hw_prepare_result {
+ ECORE_HW_PREPARE_SUCCESS,
+
+ /* FAILED results indicate probe has failed & cleaned up */
+ ECORE_HW_PREPARE_FAILED_ENG2,
+ ECORE_HW_PREPARE_FAILED_ME,
+ ECORE_HW_PREPARE_FAILED_MEM,
+ ECORE_HW_PREPARE_FAILED_DEV,
+ ECORE_HW_PREPARE_FAILED_NVM,
+
+ /* BAD results indicate probe is passed even though some wrongness
+ * has occurred; Trying to actually use [I.e., hw_init()] might have
+ * dire reprecautions.
+ */
+ ECORE_HW_PREPARE_BAD_IOV,
+ ECORE_HW_PREPARE_BAD_MCP,
+ ECORE_HW_PREPARE_BAD_IGU,
+};
struct ecore_hw_prepare_params {
- /* personality to initialize */
+ /* Personality to initialize */
int personality;
- /* force the driver's default resource allocation */
+
+ /* Force the driver's default resource allocation */
bool drv_resc_alloc;
- /* check the reg_fifo after any register access */
+
+ /* Check the reg_fifo after any register access */
bool chk_reg_fifo;
+
+ /* Request the MFW to initiate PF FLR */
+ bool initiate_pf_flr;
+
+ /* The OS Epoch time in seconds */
+ u32 epoch;
+
+ /* Allow prepare to pass even if some initializations are failing.
+ * If set, the `p_prepare_res' field would be set with the return,
+ * and might allow probe to pass even if there are certain issues.
+ */
+ bool b_relaxed_probe;
+ enum ecore_hw_prepare_result p_relaxed_res;
};
/**
@@ -368,7 +413,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
osal_size_t elem_size,
- struct ecore_chain *p_chain);
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
/**
* @brief ecore_chain_free - Free chain DMA memory
@@ -515,41 +561,24 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
-
/**
- * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
- * The fact that we can configure coalescing to up to 511, but on varying
- * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
- * for the highest values.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
-
-/**
- * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
- * While the API allows setting coalescing per-qid, all tx queues sharing a
- * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param p_handle
*
* @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
+ **/
+enum _ecore_status_t
+ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
+ u16 tx_coal, void *p_handle);
#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 6395b7cd..2acd864d 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -10,43 +10,43 @@
#define GTT_REG_ADDR_H
/* Win 2 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 179d410f..3042ed55 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -75,306 +75,306 @@ struct xstorm_core_conn_st_ctx {
__le32 reserved0[55] /* Pad to 15 cycles */;
};
-struct xstorm_core_conn_ag_ctx {
+struct e4_xstorm_core_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
/* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
/* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
/* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
/* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
/* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
/* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
/* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
/* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 consolid_prod /* physical_q1 */;
@@ -410,7 +410,7 @@ struct xstorm_core_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -428,89 +428,89 @@ struct xstorm_core_conn_ag_ctx {
__le16 word15 /* word15 */;
};
-struct tstorm_core_conn_ag_ctx {
+struct e4_tstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -532,63 +532,63 @@ struct tstorm_core_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_core_conn_ag_ctx {
+struct e4_ustorm_core_conn_ag_ctx {
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -628,11 +628,11 @@ struct core_conn_context {
/* xstorm storm context */
struct xstorm_core_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
/* mstorm storm context */
struct mstorm_core_conn_st_ctx mstorm_st_context;
/* ustorm storm context */
@@ -660,6 +660,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -743,6 +744,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@@ -834,7 +836,12 @@ struct core_rx_fast_path_cqe {
__le16 packet_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
- __le32 reserved[4];
+/* bit- map: each bit represents a specific error. errors indications are
+ * provided by the cracker. see spec for detailed description
+ */
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
/*
@@ -860,7 +867,8 @@ struct core_rx_slow_path_cqe {
u8 type /* CQE type */;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved1[5];
};
/*
@@ -926,36 +934,51 @@ struct core_rx_stop_ramrod_data {
/*
* Flags for Core TX BD
*/
-struct core_tx_bd_flags {
- u8 as_bitfield;
+struct core_tx_bd_data {
+ __le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
- * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
/*
@@ -968,28 +991,18 @@ struct core_tx_bd {
* packets: echo data to pass to Rx
*/
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-/* Number of BDs that make up one packet - width wide enough to present
- * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT
- */
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-/* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when
- * connType is ROCE (use enum core_roce_flavor_type)
- */
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags /* BD Flags */;
+ struct core_tx_bd_data bd_data /* BD Flags */;
__le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
/* Packet destination - Network, LB (use enum core_tx_dest) */
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1034,13 +1047,13 @@ struct core_tx_stop_ramrod_data {
/*
* Enum flag for what type of dcb data to update
*/
-enum dcb_dhcp_update_flag {
+enum dcb_dscp_update_mode {
/* use when no change should be done to dcb data */
- DONT_UPDATE_DCB_DHCP,
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB /* use to update only l2 (vlan) priority */,
- UPDATE_DSCP /* use to update only l3 dhcp */,
- UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
- MAX_DCB_DHCP_UPDATE_FLAG
+ UPDATE_DSCP /* use to update only l3 dscp */,
+ UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ MAX_DCB_DSCP_UPDATE_FLAG
};
@@ -1224,6 +1237,10 @@ enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
/* LL2 queue for unaligned packets sent aligned by the driver */
IWARP_LL2_ALIGNED_TX_QUEUE,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
+ * driver
+ */
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR /* Error indication */,
MAX_IWARP_LL2_TX_QUEUES
};
@@ -1265,6 +1282,7 @@ enum malicious_vf_error_id {
/* Tunneled packet with IPv6+Ext without a proper number of BDs */
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+ ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1311,9 +1329,13 @@ enum personality_type {
* tunnel configuration
*/
struct pf_start_tunnel_config {
-/* Set VXLAN tunnel UDP destination port. */
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_vxlan_udp_port_flg;
-/* Set GENEVE tunnel UDP destination port. */
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
/* If set, enable l2 GENEVE tunnel in TX path. */
@@ -1329,8 +1351,10 @@ struct pf_start_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
+ __le16 vxlan_udp_port;
+/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
+ __le16 geneve_udp_port;
};
/*
@@ -1431,13 +1455,13 @@ struct pf_update_tunnel_config {
*/
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
- u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
- u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
- u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
+ u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
+ u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
+ u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+ u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
@@ -1596,6 +1620,8 @@ struct tstorm_per_port_stat {
struct regpair fcoe_irregular_pkt;
/* packet is an ROCE irregular packet */
struct regpair roce_irregular_pkt;
+/* packet is an IWARP irregular packet */
+ struct regpair iwarp_irregular_pkt;
/* packet is an ETH irregular packet */
struct regpair eth_irregular_pkt;
/* packet is an TOE irregular packet */
@@ -1846,8 +1872,11 @@ struct dmae_cmd {
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
-/* PCIe completion address high or reserved (if completion address is in GRC) */
+/* PCIe completion address low in bytes or GRC completion address in DW */
+ __le32 comp_addr_lo;
+/* PCIe completion address high in bytes or reserved (if completion address is
+ * GRC)
+ */
__le32 comp_addr_hi;
__le32 comp_val /* Value to write to completion address */;
__le32 crc32 /* crc16 result */;
@@ -1919,6 +1948,92 @@ enum dmae_cmd_src_enum {
};
+struct e4_mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+
/*
* IGU cleanup command
*/
@@ -2002,44 +2117,6 @@ struct igu_msix_vector {
};
-struct mstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
/*
* per encapsulation type enabling flags
*/
@@ -2187,10 +2264,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-
-
-
-
struct ystorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index e82b0d4c..917e8f4c 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -92,6 +92,13 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x1fa0000,
+ GRCBASE_RGSRC = 0x1fa8000,
+ GRCBASE_TGFS = 0x1fb0000,
+ GRCBASE_TGSRC = 0x1fb8000,
+ GRCBASE_PTLD = 0x1fc0000,
+ GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -177,6 +184,13 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -198,6 +212,10 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
+ BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -209,8 +227,8 @@ enum bin_dbg_buffer_type {
struct dbg_attn_bit_mapping {
__le16 data;
/* The index of an attention in the blocks attentions list
- * (if is_unused_idx_cnt=0), or a number of consecutive unused attention bits
- * (if is_unused_idx_cnt=1)
+ * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_bit_cnt=1)
*/
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
@@ -259,10 +277,10 @@ struct dbg_attn_reg_result {
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le16 reserved;
@@ -279,7 +297,7 @@ struct dbg_attn_block_result {
/* Value from dbg_attn_type enum */
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-/* Number of registers in the blok in which at least one attention bit is set */
+/* Number of registers in block in which at least one attention bit is set */
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
/* Offset of this registers block attention names in the attention name offsets
@@ -314,17 +332,17 @@ struct dbg_mode_hdr {
*/
struct dbg_attn_reg {
struct dbg_mode_hdr mode /* Mode header */;
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+/* Number of attention in this register */
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
/* STS_CLR attention register GRC address (in dwords) */
__le32 sts_clr_address;
/* MASK attention register GRC address (in dwords) */
@@ -344,6 +362,53 @@ enum dbg_attn_type {
/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the Debug Bus lines array. */
+ __le16 lines_offset;
+};
+
+
+/*
+ * Debug Bus block user data
+ */
+struct dbg_bus_block_user_data {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the debug bus line name offsets array. */
+ __le16 names_offset;
+};
+
+
+/*
+ * Block Debug line data
+ */
+struct dbg_bus_line {
+ u8 data;
+/* Number of groups in the line (0-3) */
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+/* Indicates if this is a 128b line (0) or a 256b line (1). */
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
+ * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
+ * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+ */
+ u8 group_sizes;
+};
+
+
+/*
* condition header for registers dump
*/
struct dbg_dump_cond_hdr {
@@ -367,8 +432,11 @@ struct dbg_dump_mem {
/* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
@@ -378,10 +446,13 @@ struct dbg_dump_mem {
struct dbg_dump_reg {
__le32 data;
/* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
@@ -414,8 +485,11 @@ struct dbg_idle_chk_cond_hdr {
struct dbg_idle_chk_cond_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
@@ -431,8 +505,11 @@ struct dbg_idle_chk_cond_reg {
struct dbg_idle_chk_info_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
@@ -534,17 +611,21 @@ enum dbg_idle_chk_severity_types {
* Debug Bus block data
*/
struct dbg_bus_block_data {
-/* Indicates if the block is enabled for recording (0/1) */
- u8 enabled;
- u8 hw_id /* HW ID associated with the block */;
+ __le16 data;
+/* 4-bit value: bit i set -> dword/qword i is enabled. */
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+/* Number of dwords/qwords to shift right the debug data (0-3) */
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+/* 4-bit value: bit i set -> dword/qword i is forced valid. */
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num /* Debug line number to select */;
- u8 right_shift /* Number of units to right the debug data (0-3) */;
- u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
-/* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_valid;
-/* 4-bit value: bit i set -> unit i frame bit is forced. */
- u8 force_frame;
- u8 reserved;
+ u8 hw_id /* HW ID associated with the block */;
};
@@ -594,6 +675,21 @@ enum dbg_bus_constraint_ops {
/*
+ * Debug Bus trigger state data
+ */
+struct dbg_bus_trigger_state_data {
+ u8 data;
+/* 4-bit value: bit i set -> dword i of the trigger state block
+ * (after right shift) is enabled.
+ */
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+/* 4-bit value: bit i set -> dword i is compared by a constraint */
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
+/*
* Debug Bus memory address
*/
struct dbg_bus_mem_addr {
@@ -640,14 +736,8 @@ union dbg_bus_storm_eid_params {
* Debug Bus Storm data
*/
struct dbg_bus_storm_data {
-/* Indicates if the Storm is enabled for fast debug recording (0/1) */
- u8 fast_enabled;
-/* Fast debug Storm mode, valid only if fast_enabled is set */
- u8 fast_mode;
-/* Indicates if the Storm is enabled for slow debug recording (0/1) */
- u8 slow_enabled;
-/* Slow debug Storm mode, valid only if slow_enabled is set */
- u8 slow_mode;
+ u8 enabled /* indicates if the Storm is enabled for recording */;
+ u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
u8 hw_id /* HW ID associated with the Storm */;
u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
@@ -657,7 +747,6 @@ struct dbg_bus_storm_data {
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
/* EID filter params to filter on. Valid only if eid_filter_en is set. */
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
/* CID to filter on. Valid only if cid_filter_en is set. */
__le32 cid;
};
@@ -669,20 +758,18 @@ struct dbg_bus_data {
__le32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u8 hw_dwords /* HW dwords per cycle */;
- u8 next_hw_id /* Next HW ID to be associated with an input */;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+ __le16 hw_id_mask;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
- u8 next_trigger_state /* ID of next trigger state to be added */;
-/* ID of next filter/trigger constraint to be added */
- u8 next_constraint_id;
u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
/* Indicates if timestamp recording is enabled (0/1) */
u8 timestamp_input_en;
u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
-/* Indicates if the recording trigger is enabled (0/1) */
- u8 trigger_en;
/* If true, the next added constraint belong to the filter. Otherwise,
* it belongs to the last added trigger state. Valid only if either filter or
* triggers are enabled.
@@ -696,6 +783,14 @@ struct dbg_bus_data {
* Valid only if both filter and trigger are enabled (0/1)
*/
u8 filter_post_trigger;
+ __le16 reserved;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* trigger states data */
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
* assigned a different HW ID (0/1)
*/
@@ -706,9 +801,8 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
struct dbg_bus_pci_buf_data pci_buf;
- __le16 reserved;
/* Debug Bus data for each block */
- struct dbg_bus_block_data blocks[80];
+ struct dbg_bus_block_data blocks[88];
/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6];
};
@@ -738,17 +832,6 @@ enum dbg_bus_frame_modes {
/*
- * Debug bus input types
- */
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
-
-
-/*
* Debug bus other engine mode
*/
enum dbg_bus_other_engine_modes {
@@ -842,16 +925,17 @@ enum dbg_bus_targets {
};
+
/*
* GRC Dump data
*/
struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
- __le32 param_val[40];
-/* Indicates for each GRC parameter if it was set by the user (0/1).
- * Array size must match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40];
+ __le32 param_val[48];
};
@@ -901,6 +985,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+ DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
MAX_DBG_GRC_PARAMS
};
@@ -975,7 +1061,10 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -1014,9 +1103,9 @@ struct dbg_tools_data {
struct idle_chk_data idle_chk /* Idle Check data */;
u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
/* Indicates if a block is in reset state (0/1) */
- u8 block_in_reset[80];
+ u8 block_in_reset[88];
u8 chip_id /* Chip ID (from enum chip_ids) */;
- u8 platform_id /* Platform ID (from enum platform_ids) */;
+ u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
u8 reserved;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index e26c1833..397c408d 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -34,315 +34,315 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
/* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
/* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
/* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
/* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
/* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
/* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
/* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
/* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
/* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
/* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
/* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
/* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
/* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
/* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
+ __le16 e5_reserved1 /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
@@ -375,7 +375,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -400,47 +400,47 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
/* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
/* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
/* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
/* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
/* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
@@ -454,89 +454,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3 /* reg3 */;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -558,88 +558,88 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
/* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
/* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
/* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
/* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
/* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
/* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
/* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
/* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
/* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
/* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
/* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
/* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
/* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
/* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
/* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
/* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
/* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
/* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
/* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
/* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -678,15 +678,15 @@ struct eth_conn_context {
/* xstorm storm context */
struct xstorm_eth_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
/* ystorm storm context */
struct ystorm_eth_conn_st_ctx ystorm_st_context;
/* ystorm aggregative context */
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
/* ustorm storm context */
struct ustorm_eth_conn_st_ctx ustorm_st_context;
/* mstorm storm context */
@@ -739,6 +739,7 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
/* vni add filters command failed due to duplicate VNI filter */
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
MAX_ETH_ERROR_CODE
};
@@ -982,8 +983,10 @@ struct eth_vport_rss_config {
u8 rss_id;
u8 rss_mode /* The RSS mode for this function */;
u8 update_rss_key /* if set update the rss key */;
- u8 update_rss_ind_table /* if set update the indirection table */;
- u8 update_rss_capabilities /* if set update the capabilities */;
+/* if set update the indirection table values */
+ u8 update_rss_ind_table;
+/* if set update the capabilities and indirection table size. */
+ u8 update_rss_capabilities;
u8 tbl_size /* rss mask (Tbl size) */;
__le32 reserved2[2];
/* RSS indirection table */
@@ -1267,7 +1270,10 @@ struct rx_update_gft_filter_data {
/* Use enum to set type of flow using gft HW logic blocks */
u8 filter_type;
u8 filter_action /* Use to set type of action on filter */;
- u8 reserved;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
+ * case of error.
+ */
+ u8 assert_on_error;
};
@@ -1446,7 +1452,15 @@ struct vport_update_ramrod_data_cmn {
/* If set, MTU will be updated. Vport must be not active. */
u8 update_mtu_flg;
__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
- u8 reserved[2];
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+ u8 update_ctl_frame_checks_en_flg;
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -1472,6 +1486,668 @@ struct vport_update_ramrod_data {
+struct E4XstormEthConnAgCtxDqExtLdPart {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+
+struct e4_mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+
+
/*
* GFT CAM line struct
*/
@@ -1620,8 +2296,7 @@ enum gft_profile_upper_protocol_type {
* GFT RAM line struct
*/
struct gft_ram_line {
- __le32 low32bits;
-/* (use enum gft_vlan_select) */
+ __le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
@@ -1684,7 +2359,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_DST_PORT_SHIFT 30
#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
+ __le32 hi;
#define GFT_RAM_LINE_DSCP_MASK 0x1
#define GFT_RAM_LINE_DSCP_SHIFT 0
#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
@@ -1722,690 +2397,4 @@ enum gft_vlan_select {
};
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
-
-
-struct xstormEthConnAgCtxDqExtLdPart {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-/* cf5 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-/* cf6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-/* cf7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-/* cf9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-/* cf10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-/* cf11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-/* cf13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-/* cf14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-/* cf15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
-};
-
-
-
-struct xstorm_eth_hw_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-/* cf5 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-/* cf6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-/* cf7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-/* cf9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-/* cf10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-/* cf11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-/* cf13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-/* cf14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-/* cf15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
-};
-
-
#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 410b0bcb..1f57e9b2 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -22,6 +22,13 @@
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_E5,
+ MAX_CHIP_IDS
+};
+
struct fw_asserts_ram_section {
/* The offset of the section in the RAM in RAM lines (64-bit units) */
@@ -69,51 +76,6 @@ struct fw_info_location {
__le32 size;
};
-
-
-
-enum init_modes {
- MODE_BB_A0,
- MODE_BB_B0,
- MODE_K2,
- MODE_ASIC,
- MODE_EMUL_REDUCED,
- MODE_EMUL_FULL,
- MODE_FPGA,
- MODE_CHIPSIM,
- MODE_SF,
- MODE_MF_SD,
- MODE_MF_SI,
- MODE_PORTS_PER_ENG_1,
- MODE_PORTS_PER_ENG_2,
- MODE_PORTS_PER_ENG_4,
- MODE_100G,
- MODE_40G,
- MODE_EAGLE_ENG1_WORKAROUND,
- MAX_INIT_MODES
-};
-
-
-enum init_phases {
- PHASE_ENGINE,
- PHASE_PORT,
- PHASE_PF,
- PHASE_VF,
- PHASE_QM_PF,
- MAX_INIT_PHASES
-};
-
-
-enum init_split_types {
- SPLIT_TYPE_NONE,
- SPLIT_TYPE_PORT,
- SPLIT_TYPE_PF,
- SPLIT_TYPE_PORT_PF,
- SPLIT_TYPE_VF,
- MAX_INIT_SPLIT_TYPES
-};
-
-
/*
* Binary buffer header
*/
@@ -204,8 +166,46 @@ union init_array_hdr {
};
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_E5,
+ MAX_INIT_MODES
+};
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
/*
* init array types
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 7f4db0a0..2bcc32d3 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -86,7 +86,6 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
- p_hwfn->p_ptt_pool = OSAL_NULL;
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@@ -496,8 +495,8 @@ static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static enum _ecore_status_t
-ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
@@ -774,6 +773,18 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ /* Return success to let the flow to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
@@ -906,44 +917,6 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
return rc;
}
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- union ecore_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH) && !p_params) {
- DP_NOTICE(p_hwfn, true,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- /* TODO - multi-CoS for VFs? */
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
-
- return pq_id;
-}
-
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type)
{
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index de08650b..004ab351 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -17,112 +17,156 @@
#include "ecore_hsi_init_tool.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
-enum CmInterfaceEnum {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
+
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
-/* general constants */
-#define QM_PQ_MEM_4KB(pq_size) \
-(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
-#define QM_PQ_SIZE_256B(pq_size) \
-(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
-/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
+ 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* WFQ constants: */
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
-/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5
+
+/* 0.7 * upper bound (62500000) */
+#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* RL constants: */
+
+/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_RL_UPPER_BOUND 62500000
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-/* RL increment value - the factor of 1.01 was added after seeing only
- * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
- * In this scenario the PF RL was reducing the line rate to 99% although
- * the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is
- * unknown at this point.
+
+/* 0.7 * upper bound (62500000) */
+#define QM_RL_MAX_INC_VAL 43750000
+
+/* RL increment value - rate is specified in mbps. the factor of 1.01 was
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
*/
-/* rate in mbps */
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
- QM_RL_PERIOD * 101) / (8 * 100)), 1)
+ QM_RL_PERIOD * 101) / (8 * 100)), 1)
+
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
+
+/* Command Queue constants: */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
-(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
-voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
-- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
-(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
-/* headroom per-port */
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
-SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+ SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
-((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
+ ((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phys_tcs_per_port) \
-((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+ ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
+ LB_VOQ(port))
+
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+
+ /* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
@@ -133,7 +177,8 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -145,12 +190,13 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -163,7 +209,8 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -176,13 +223,9 @@ static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands
- * with the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- if (is_bb_a0)
- cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
@@ -198,38 +241,43 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
- /* find #lines to divide between active physical TCs */
- phys_lines =
- port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn,
- voq, phys_lines_per_tc);
- }
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find #lines to divide between the active physical TCs */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
}
- /* init registers for pure LB TC */
- ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
}
+
+ /* Init registers for pure LB TC */
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -259,50 +307,51 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id, num_tcs_in_port;
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- /* subtract headroom blocks */
- usable_blocks =
- port_params[port_id].num_btb_blocks -
- BTB_HEADROOM_BLOCKS;
-/* find blocks per physical TC. use factor to avoid floating arithmethic */
-
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- pure_lb_blocks =
- (usable_blocks * BTB_PURE_LB_FACTOR) /
- (num_tcs_in_port *
- BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
- pure_lb_blocks =
- OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
- pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks =
- (usable_blocks -
- pure_lb_blocks) /
- num_tcs_in_port;
- /* init physical TCs */
- for (tc = 0;
- tc < NUM_OF_PHYS_TCS;
- tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks /
+ BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
- }
}
- /* init pure LB TC */
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(
- LB_VOQ(port_id)), pure_lb_blocks);
}
+
+ /* Init pure LB TC */
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ pure_lb_blocks);
}
}
@@ -323,59 +372,69 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
- u16 i, pq_id, pq_group;
- u16 num_pqs = num_pf_pqs + num_vf_pqs;
- u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = num_pf_pqs + num_vf_pqs;
+
+ first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(num_vf_cids));
- /* go over all Tx PQs */
+
+ /* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
- struct qm_rf_pq_map tx_pq_map;
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
- bool is_vf_pq = (i >= num_pf_pqs);
- /* added to avoid compilation warning */
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- bool rl_valid = pq_params[i].rl_valid &&
- pq_params[i].vport_id < max_qm_global_rls;
- /* update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
- u16 first_tx_pq_id =
- vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
- tc_id];
+ struct qm_rf_pq_map tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u8 voq, vport_id_in_pf;
+ u16 first_tx_pq_id;
+
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ is_vf_pq = (i >= num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
+ max_qm_global_rls;
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
- /* check RL ID */
+
+ /* Check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >=
max_qm_global_rls)
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter config");
- /* fill PQ map entry */
+ "Invalid VPORT ID for rate limiter config\n");
+
+ /* Fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
@@ -386,44 +445,30 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+
+ /* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication to PQ
- * VF mask
- */
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- /* A0-only: perform read-modify-write
- *(fixed in B0)
- */
- u32 curr_mask =
- is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
- QM_REG_MAXPQSIZETXSEL_0
- + i * 4);
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, curr_mask | tx_pq_vf_mask[i]);
- } else
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, tx_pq_vf_mask[i]);
- }
- }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -433,20 +478,26 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
-/* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
-
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
@@ -454,7 +505,10 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
mem_addr_4kb += pq_mem_4kb;
}
}
-/* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
@@ -463,76 +517,89 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u16 num_tx_pqs,
struct init_qm_pq_params *pq_params)
{
+ u32 inc_val, crd_reg_offset;
+ u8 voq;
u16 i;
- u32 inc_val;
- u32 crd_reg_offset =
- (pf_id <
- MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ (pf_id % MAX_NUM_PFS_BB);
+
inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
+
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
-/* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
-/* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
- * error.
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
*/
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
- u8 tc, i;
+ u16 vport_pq_id;
u32 inc_val;
- /* go over all PF VPORTs */
+ u8 tc, i;
+
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (vport_params[i].vport_wfq) {
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight config");
- return -1;
- }
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id =
- vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET
- + vport_pq_id, inc_val);
- }
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
}
@@ -548,19 +615,23 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
+
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
@@ -569,6 +640,7 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
+
return 0;
}
@@ -576,17 +648,20 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
"Timeout waiting for QM SDM cmd ready signal\n");
return false;
}
+
return true;
}
@@ -596,15 +671,19 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
+
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
+
/******************** INTERFACE IMPLEMENTATION *********************/
+
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
@@ -625,32 +704,42 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- /* init AFullOprtnstcCrdMask */
- u32 mask =
- (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask;
+
+ /* Init AFullOprtnstcCrdMask */
+ mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
- /* enable/disable PF RL */
+
+ /* Enable/disable PF RL */
ecore_enable_pf_rl(p_hwfn, pf_rl_en);
- /* enable/disable PF WFQ */
+
+ /* Enable/disable PF WFQ */
ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
- /* enable/disable VPORT RL */
+
+ /* Enable/disable VPORT RL */
ecore_enable_vport_rl(p_hwfn, vport_rl_en);
- /* enable/disable VPORT WFQ */
+
+ /* Enable/disable VPORT WFQ */
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
- /* init PBF CMDQ line credit */
+
+ /* Init PBF CMDQ line credit */
ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
- /* init BTB blocks in PBF */
+
+ /* Init BTB blocks in PBF */
ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
+
return 0;
}
@@ -673,66 +762,86 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
+ u32 other_mem_size_4kb;
u8 tc, i;
- u32 other_mem_size_4kb =
- QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
- /* clear first Tx PQ ID array for each VPORT */
+
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
+ QM_OTHER_PQS_PER_PF;
+
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+
+ /* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
num_tids, 0);
#endif
- /* map Tx PQs */
+
+ /* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
max_phys_tcs_per_port, is_first_pf, num_pf_cids,
num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
start_vport, other_mem_size_4kb, pq_params,
vport_params);
- /* init PF WFQ */
+
+ /* Init PF WFQ */
if (pf_wfq)
if (ecore_pf_wfq_rt_init
(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params) != 0)
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
- /* init PF RL */
- if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+
+ /* Init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
return -1;
- /* set VPORT WFQ */
- if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+
+ /* Set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
return -1;
- /* set VPORT RL */
+
+ /* Set VPORT RL */
if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, vport_params) != 0)
+ (p_hwfn, start_vport, num_vports, vport_params))
return -1;
+
return 0;
}
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ u32 inc_val;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
return 0;
}
@@ -740,20 +849,25 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
ecore_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
}
+
return 0;
}
@@ -761,20 +875,24 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+
if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
+
inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
return 0;
}
@@ -784,15 +902,20 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
- /* go over requested PQs */
+
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -807,68 +930,92 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
pq_mask = 0;
}
}
+
return true;
}
+
/* NIG: ETS configuration constants */
#define NIG_TX_ETS_CLIENT_OFFSET 4
#define NIG_LB_ETS_CLIENT_OFFSET 1
#define NIG_ETS_MIN_WFQ_BYTES 1600
+
/* NIG: ETS constants */
#define NIG_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
/* NIG: RL constants */
-#define NIG_RL_BASE_TYPE 1 /* byte base type */
-#define NIG_RL_PERIOD 1 /* in us */
+
+/* Byte base type value */
+#define NIG_RL_BASE_TYPE 1
+
+/* Period in us */
+#define NIG_RL_PERIOD 1
+
+/* Period in 25MHz cycles */
#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+
+/* Rate in mbps */
#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+
#define NIG_RL_MAX_VAL(inc_val, mtu) \
-(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+ (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+
/* NIG: packet prioritry configuration constants */
-#define NIG_PRIORITY_MAP_TC_BITS 4
+#define NIG_PRIORITY_MAP_TC_BITS 4
+
+
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req, bool is_lb)
{
- u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
- u8 tc_client_offset =
- is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_weight_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
- u32 tc_bound_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
+ u32 tc_bound_base_addr, tc_bound_addr_diff;
+ u8 sp_tc_map = 0, wfq_tc_map = 0;
+ u8 tc, num_tc, tc_client_offset;
+
+ num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
+ NIG_TX_ETS_CLIENT_OFFSET;
+ min_weight = 0xffffffff;
+ tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < num_tc; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
- /* write SP map */
+
+ /* Write SP map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
NIG_REG_TX_ARB_CLIENT_IS_STRICT,
(sp_tc_map << tc_client_offset));
- /* write WFQ map */
+
+ /* Write WFQ map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
@@ -876,22 +1023,23 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
/* write WFQ weights */
for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- tc_weight_base_addr +
- tc_weight_addr_diff * tc_client_offset,
- byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- tc_bound_base_addr +
- tc_bound_addr_diff * tc_client_offset,
- NIG_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
@@ -899,16 +1047,18 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req)
{
- u8 tc;
u32 ctrl, inc_val, reg_offset;
- /* disable global MAC+LB RL */
+ u8 tc;
+
+ /* Disable global MAC+LB RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
- /* configure and enable global MAC+LB RL */
+
+ /* Configure and enable global MAC+LB RL */
if (req->lb_mac_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
@@ -916,20 +1066,23 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
}
- /* disable global LB-only RL */
+
+ /* Disable global LB-only RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
- /* configure and enable global LB-only RL */
+
+ /* Configure and enable global LB-only RL */
if (req->lb_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
@@ -937,41 +1090,41 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
}
- /* per-TC RLs */
+
+ /* Per-TC RLs */
for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
tc++, reg_offset += 4) {
- /* disable TC RL */
+ /* Disable TC RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
- /* configure and enable TC RL */
- if (req->tc_rate[tc]) {
- /* configure */
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
- reg_offset, NIG_RL_PERIOD_CLK_25M);
- inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
- reg_offset, inc_val);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
- reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
- ctrl |=
- 1 <<
- NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
- ctrl);
- }
+
+ /* Configure and enable TC RL */
+ if (!req->tc_rate[tc])
+ continue;
+
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |= 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
+ reg_offset, ctrl);
}
}
@@ -979,20 +1132,23 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req)
{
- u8 pri, tc;
- u32 pri_tc_mask = 0;
u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ u32 pri_tc_mask = 0;
+ u8 pri, tc;
+
for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
- if (req->pri[pri].valid) {
- pri_tc_mask |=
- (req->pri[pri].
- tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
- tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
- }
+ if (!req->pri[pri].valid)
+ continue;
+
+ pri_tc_mask |= (req->pri[pri].tc_id <<
+ (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
}
- /* write priority -> TC mask */
+
+ /* Write priority -> TC mask */
ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
- /* write TC -> priority mask */
+
+ /* Write TC -> priority mask */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
tc_pri_mask[tc]);
@@ -1001,110 +1157,133 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
}
}
+
/* PRS: ETS configuration constants */
-#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_MIN_WFQ_BYTES 1600
#define PRS_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_ets_req *req)
{
+ u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
+
/* write SP map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+
/* write WFQ map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
wfq_tc_map);
+
/* write WFQ weights */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
- tc * tc_weight_addr_diff, byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
- tc * tc_bound_addr_diff,
- PRS_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
+ tc_weight_addr_diff, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
+ req->mtu));
}
}
+
/* BRB: RAM configuration constants */
#define BRB_TOTAL_RAM_BLOCKS_BB 4800
#define BRB_TOTAL_RAM_BLOCKS_K2 5632
-#define BRB_BLOCK_SIZE 128 /* in bytes */
+#define BRB_BLOCK_SIZE 128
#define BRB_MIN_BLOCKS_PER_TC 9
-#define BRB_HYST_BYTES 10240
-#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
-/*
- * temporary big RAM allocation - should be updated
- */
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+
+/* Temporary big RAM allocation - should be updated */
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
{
- u8 port, active_ports = 0;
+ u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
u32 active_port_blocks, reg_offset = 0;
- u32 tc_headroom_blocks =
- (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
- u32 min_pkt_size_blocks =
- (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
- u32 total_blocks =
- ECORE_IS_K2(p_hwfn->
- p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
- BRB_TOTAL_RAM_BLOCKS_BB;
- /* find number of active ports */
+ u8 port, active_ports = 0;
+
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
+ BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
+ BRB_BLOCK_SIZE);
+ total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+
+ /* Find number of active ports */
for (port = 0; port < MAX_NUM_PORTS; port++)
if (req->num_active_tcs[port])
active_ports++;
+
active_port_blocks = (u32)(total_blocks / active_ports);
+
for (port = 0; port < req->max_ports_per_engine; port++) {
- /* calculate per-port sizes */
- u32 tc_guaranteed_blocks =
- (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
- u32 port_blocks =
- req->num_active_tcs[port] ? active_port_blocks : 0;
- u32 port_guaranteed_blocks =
- req->num_active_tcs[port] * tc_guaranteed_blocks;
- u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
- u32 full_xoff_th =
- req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
- u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
- u32 pause_xoff_th = tc_headroom_blocks;
- u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+ u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
+ u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
+ u32 tc_guaranteed_blocks;
u8 tc;
- /* init total size per port */
+
+ /* Calculate per-port sizes */
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
+ BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks :
+ 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] *
+ tc_guaranteed_blocks;
+ port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ full_xoff_th = req->num_active_tcs[port] *
+ BRB_MIN_BLOCKS_PER_TC;
+ full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ pause_xoff_th = tc_headroom_blocks;
+ pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+
+ /* Init total size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
port_blocks);
- /* init shared size per port */
+
+ /* Init shared size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
port_shared_blocks);
+
for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
- /* clear init values for non-active TCs */
+ /* Clear init values for non-active TCs */
if (tc == req->num_active_tcs[port]) {
tc_guaranteed_blocks = 0;
full_xoff_th = 0;
@@ -1112,15 +1291,18 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
pause_xoff_th = 0;
pause_xon_th = 0;
}
- /* init guaranteed size per TC */
+
+ /* Init guaranteed size per TC */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_TC_GUARANTIED_0 + reg_offset,
tc_guaranteed_blocks);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
BRB_HYST_BLOCKS);
-/* init pause/full thresholds per physical TC - for loopback traffic */
+ /* Init pause/full thresholds per physical TC - for
+ * loopback traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1133,7 +1315,10 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
-/* init pause/full thresholds per physical TC - for main traffic */
+
+ /* Init pause/full thresholds per physical TC - for
+ * main traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1150,23 +1335,25 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
}
-/*In MF should be called once per engine to set EtherType of OuterTag*/
+/* In MF should be called once per engine to set EtherType of OuterTag */
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update PRS register */
+ /* Update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update NIG register */
+
+ /* Update NIG register */
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update PBF register */
+
+ /* Update PBF register */
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
-/*In MF should be called once per port to set EtherType of OuterTag*/
+/* In MF should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update DORQ register */
+ /* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
@@ -1176,11 +1363,13 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
@@ -1188,23 +1377,26 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool vxlan_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ register */
+
+ /* Update DORQ register */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}
@@ -1214,7 +1406,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
bool eth_gre_enable, bool ip_gre_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1224,10 +1417,11 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1236,7 +1430,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ registers */
+
+ /* Update DORQ registers */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
@@ -1246,14 +1441,13 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -1262,10 +1456,8 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
@@ -1275,42 +1467,75 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+
+ /* EDPM with geneve tunnel not supported in BB */
if (ECORE_IS_BB_B0(p_hwfn->p_dev))
return;
- /* update DORQ registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
+
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
+{
+ union gft_cam_line_union cam_line;
+ struct gft_ram_line ram_line;
+ u32 i, *ram_line_ptr;
+
+ ram_line_ptr = (u32 *)&ram_line;
+
+ /* Stop using gft logic, disable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next rfs/gft session*/
+
+ /* Zero camline */
+ OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line.cam_line_mapped.camline);
+
+ /* Zero ramline */
+ OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
+
+ /* Each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ram_line_ptr + i));
+}
+
+
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- /* set RFS event ID to be awakened i Tstorm By Prs */
- u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ u32 rfs_cm_hdr_event_id;
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
@@ -1331,39 +1556,48 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct gft_ram_line ramLine;
u32 *ramLinePointer = (u32 *)&ramLine;
int i;
+
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - ipv4 or ipv6");
+
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - udp or tcp");
- /* set RFS event ID to be awakened i Tstorm By Prs */
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
/* Configure Registers for RFS mode */
-/* enable gft search */
+
+ /* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
* context only cid
* in PRS on match
*/
camLine.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+
+ /* Cam line is now valid!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
- /* filters are per PF!! */
+
+ /* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1373,6 +1607,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
+
if (!(ipv4 && ipv6)) {
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
@@ -1385,44 +1620,53 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+
+ /* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camLine.cam_line_mapped.camline);
camLine.cam_line_mapped.camline =
ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramLine.low32bits = 0;
- ramLine.high32bits = 0;
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
- /* each iteration write to reg */
+
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramLine.lo = 0;
+ ramLine.hi = 0;
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * pf_id +
i * REG_SIZE, *(ramLinePointer + i));
- /* set default profile so that no filter match will happen */
- ramLine.low32bits = 0xffff;
- ramLine.high32bits = 0xffff;
+
+ /* Set default profile so that no filter match will happen */
+ ramLine.lo = 0xffff;
+ ramLine.hi = 0xffff;
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
i * REG_SIZE, *(ramLinePointer + i));
}
-/* Configure VF zone size mode*/
+/* Configure VF zone size mode */
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mode,
bool runtime_init)
{
u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
u32 msdm_vf_offset_mask;
+
if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
msdm_vf_size_log += 1;
else if (mode == VF_ZONE_SIZE_MODE_QUAD)
msdm_vf_size_log += 2;
+
msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+
if (runtime_init) {
STORE_RT_REG(p_hwfn,
PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
@@ -1438,12 +1682,13 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
}
}
-/* get mstorm statistics for offset by VF zone size mode*/
+/* Get mstorm statistics for offset by VF zone size mode */
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+
if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
(stat_cnt_id > MAX_NUM_PFS)) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
@@ -1453,16 +1698,18 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
(stat_cnt_id - MAX_NUM_PFS);
}
+
return offset;
}
-/* get mstorm VF producer offset by VF zone size mode*/
+/* Get mstorm VF producer offset by VF zone size mode */
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
u8 vf_id,
u8 vf_queue_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+
if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
@@ -1471,5 +1718,166 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
}
+
return offset;
}
+
+/* Calculate CRC8 of first 4 bytes in buf */
+static u8 ecore_calc_crc8(const u8 *buf)
+{
+ u32 i, j, crc = 0xff << 8;
+
+ /* CRC-8 polynomial */
+ #define POLY 0x1070
+
+ for (j = 0; j < 4; j++, buf++) {
+ crc ^= (*buf << 8);
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x8000)
+ crc ^= (POLY << 3);
+
+ crc <<= 1;
+ }
+ }
+
+ return (u8)(crc >> 8);
+}
+
+/* Calculate and return CDU validation byte per conneciton type / region /
+ * cid
+ */
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
+ u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ u32 validation_string = 0;
+ const u8 *data_to_crc_rev;
+ u8 data_to_crc[4];
+
+ data_to_crc_rev = (const u8 *)&validation_string;
+
+ /*
+ * The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian (ntoh())*/
+ data_to_crc[0] = data_to_crc_rev[3];
+ data_to_crc[1] = data_to_crc_rev[2];
+ data_to_crc[2] = data_to_crc_rev[1];
+ data_to_crc[3] = data_to_crc_rev[0];
+
+ crc = ecore_calc_crc8(data_to_crc);
+
+ validation_byte |= ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
+ const u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3 - bits [31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 9df0e7de..4da3fc29 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -8,20 +8,22 @@
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
-/* forward declarations */
+/* Forward declarations */
+
struct init_qm_pq_params;
+
/**
- * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
*
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
@@ -31,6 +33,7 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
+
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
* phase
@@ -38,10 +41,10 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
* @param p_hwfn
* @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params - array of size MAX_NUM_PORTS with params for each port
*
* @return 0 on success, -1 on error.
@@ -54,22 +57,24 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+
/**
* @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param port_id - port ID
- * @param pf_id - PF ID
+ * @param port_id - port ID
+ * @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
- * @param num_pf_cids - number of connections used by this PF
+ * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
+ * @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param start_pq - first Tx PQ ID associated with this PF
- * @param num_pf_pqs - number of Tx PQs associated with this PF (non-VF)
- * @param num_vf_pqs - number of Tx PQs associated with a VF
- * @param start_vport - first VPORT ID associated with this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
* @param num_vports - number of VPORTs associated with this PF
* @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
* be 0. otherwise, the weight must be non-zero.
@@ -100,6 +105,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
+
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
*
@@ -114,11 +120,12 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u16 pf_wfq);
+
/**
- * @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
@@ -128,6 +135,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u32 pf_rl);
+
/**
* @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
*
@@ -144,10 +152,12 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
u16 vport_wfq);
+
/**
- * @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
+ * VPORT.
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
@@ -158,6 +168,7 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl);
+
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -178,6 +189,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
*
@@ -193,6 +205,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req,
bool is_lb);
+
/**
* @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
*
@@ -205,6 +218,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
*
@@ -216,6 +230,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req);
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
@@ -229,6 +244,7 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
@@ -242,6 +258,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_brb_ram_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
@@ -250,22 +267,24 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
* if engine
* is in BD mode.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
+
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
* port
@@ -276,15 +295,17 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool vxlan_enable);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -296,6 +317,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable,
bool ip_gre_enable);
+
/**
* @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
* udp port
@@ -306,6 +328,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -318,6 +341,7 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable,
bool ip_geneve_enable);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
*
@@ -325,16 +349,27 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable RFS.
+ */
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
+
/**
* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
*
-*
-* @param p_ptt - ptt window used for writing the registers.
-* @param pf_id - pf on which to enable RFS.
-* @param tcp - set profile tcp packets.
-* @param udp - set profile udp packet.
-* @param ipv4 - set profile ipv4 packet.
-* @param ipv6 - set profile ipv6 packet.
+* @param p_ptt - ptt window used for writing the registers.
+* @param pf_id - pf on which to enable RFS.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
*/
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -344,6 +379,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
bool ipv4,
bool ipv6);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
* used before first ETH queue started.
@@ -357,18 +393,20 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
*p_ptt, u16 mode, bool runtime_init);
+
/**
-* @brief ecore_get_mstorm_queue_stat_offset - get mstorm statistics offset by VF
-* zone size mode.
+ * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
+ * VF zone size mode.
*
* @param stat_cnt_id - statistic counter id
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
*/
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id, u16 vf_zone_size_mode);
+
/**
-* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
-* size mode.
+ * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+ * size mode.
*
* @param vf_id - vf id.
* @param vf_queue_id - per VF rx queue id.
@@ -376,4 +414,58 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
*/
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
vf_queue_id, u16 vf_zone_size_mode);
+/**
+ * @brief ecore_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid);
+/**
+ * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid);
+/**
+ * @brief ecore_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
+/**
+ * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
+ * validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 351e9467..b907a95e 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -63,8 +63,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i, segment;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
@@ -190,19 +190,19 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
bool b_must_dmae,
bool b_can_dmae)
{
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ u32 data = OSAL_LE32_TO_CPU(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 offset, output_len, input_len, max_size;
#endif
- u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
union init_array_hdr *hdr;
const u32 *array_data;
- u32 size, addr, data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 size;
array_data = p_dev->fw_data->arr_data;
- data = OSAL_LE32_TO_CPU(cmd->data);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
hdr = (union init_array_hdr *)
(uintptr_t)(array_data + dmae_array_offset);
@@ -272,13 +272,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
struct init_write_op *p_cmd,
bool b_can_dmae)
{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
enum _ecore_status_t rc = ECORE_SUCCESS;
- bool b_must_dmae;
- u32 addr, data;
-
- data = OSAL_LE32_TO_CPU(p_cmd->data);
- b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
@@ -452,10 +449,10 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
int phase, int phase_id, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
init_ops = p_dev->fw_data->init_ops;
@@ -573,8 +570,7 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- /* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 6fb037df..8dc4d150 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -59,6 +59,11 @@ struct aeu_invert_reg_bit {
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT (1 << 23)
+
#define ATTENTION_CLEAR_ENABLE (1 << 28)
unsigned int flags;
@@ -414,7 +419,7 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
@@ -468,7 +473,26 @@ static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
-/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 1 */
@@ -511,8 +535,18 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
OSAL_NULL, MAX_BLOCK_ID},
{"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
ecore_general_attention_35, MAX_BLOCK_ID},
- {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- BLOCK_CNIG},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ OSAL_NULL, BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ OSAL_NULL, BLOCK_NWM},
{"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
{"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -634,6 +668,27 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
};
+static struct aeu_invert_reg_bit *
+ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!ECORE_IS_BB(p_hwfn->p_dev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct ecore_sb_attn_info {
@@ -868,7 +923,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
- if ((p_bit->flags & ATTENTION_PARITY) &&
+ if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
!!(parities & (1 << bit_idx))) {
ecore_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -905,26 +960,29 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
unsigned long int bitmask;
u8 bit, bit_len;
+ /* Need to account bits with changed meaning */
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
- /* No need to handle attention-only bits */
- if (p_aeu->flags == ATTENTION_PAR)
- continue;
-
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
- if (p_aeu->flags & ATTENTION_PAR_INT) {
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
/* Skip Parity */
bit++;
bit_len--;
}
+ /* Find the bits relating to HW-block, then
+ * shift so they'll become LSB.
+ */
bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
if (bitmask) {
u32 flags = p_aeu->flags;
char bit_name[30];
+ u8 num;
- bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
bit_len);
/* Some bits represent more than a
@@ -936,11 +994,17 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
ATTENTION_LENGTH(flags) > 1))
OSAL_SNPRINTF(bit_name, 30,
p_aeu->bit_name,
- bit);
+ num);
else
OSAL_STRNCPY(bit_name,
p_aeu->bit_name,
30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
/* Handle source of the attention */
ecore_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
@@ -1203,12 +1267,13 @@ static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
- unsigned int flags = aeu_descs[i].bits[j].flags;
+ struct aeu_invert_reg_bit *p_aeu;
- if (flags & ATTENTION_PARITY)
+ p_aeu = &aeu_descs[i].bits[j];
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
sb_info->parity_mask[i] |= 1 << k;
- k += ATTENTION_LENGTH(flags);
+ k += ATTENTION_LENGTH(p_aeu->flags);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Attn Mask [Reg %d]: 0x%08x\n",
@@ -1234,7 +1299,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true,
- "Failed to allocate `struct ecore_sb_attn_info'");
+ "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@@ -1243,7 +1308,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_dev, true,
- "Failed to allocate status block (attentions)");
+ "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1964,6 +2029,31 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
}
}
}
+
+ /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+ * the number of VF SBs [especially for first VF on engine, as we can't
+ * diffrentiate between empty entries and its entries].
+ * Since we don't really support more SBs than VFs today, prevent any
+ * such configuration by sanitizing the number of SBs to equal the
+ * number of VFs.
+ */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (total_vfs < p_igu_info->free_blks) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Limiting number of SBs for IOV - %04x --> %04x\n",
+ p_igu_info->free_blks,
+ p_hwfn->p_dev->p_iov_info->total_vfs);
+ p_igu_info->free_blks = total_vfs;
+ } else if (total_vfs > p_igu_info->free_blks) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+ p_igu_info->free_blks, total_vfs);
+ return ECORE_INVAL;
+ }
+ }
+
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
@@ -2092,24 +2182,6 @@ void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
- struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
- } else {
- DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
- sb_id);
- return 0;
- }
-}
-
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
{
int i;
@@ -2127,8 +2199,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
- enum _ecore_status_t rc;
struct cau_sb_entry sb_entry;
+ enum _ecore_status_t rc;
if (!p_hwfn->hw_init_done) {
DP_ERR(p_hwfn, "hardware not initialized yet\n");
@@ -2159,3 +2231,30 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
return rc;
}
+
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ int i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY +
+ sbid * 4 * PIS_PER_SB + i * 4);
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 45358b94..0c8929e3 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -172,16 +172,6 @@ void ecore_int_free(struct ecore_hwfn *p_hwfn);
void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
-
-/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index fc873e77..799fbe82 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -41,6 +41,12 @@ struct ecore_sb_info {
struct ecore_dev *p_dev;
};
+struct ecore_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct ecore_sb_cnt_info {
int sb_cnt;
int sb_iov_cnt;
@@ -108,7 +114,7 @@ static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
-static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
@@ -120,19 +126,37 @@ static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
+ data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM *addr,
- int size, u32 *data)
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
{
- __internal_ram_wr(p_hwfn, addr, size, data);
+ __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
- int size, u32 *data)
+ int size, u32 *data)
{
- __internal_ram_wr(OSAL_NULL, addr, size, data);
+ __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
}
#endif
+
#endif
struct ecore_hwfn;
@@ -285,4 +309,19 @@ void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
*/
void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+/**
+ * @brief Read debug information regarding a given SB.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb - point to Status block for which we want to get info.
+ * @param p_info - pointer to struct to fill with information regarding SB.
+ *
+ * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
+ */
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info);
+
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index bb8df82f..50cb3f2b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -52,6 +52,7 @@ enum ecore_iov_pf_to_vf_status {
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
};
struct ecore_mcp_link_params;
@@ -87,6 +88,28 @@ struct ecore_public_vf_info {
u16 forced_vlan;
};
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ u8 vport_id;
+
+ /* Should be set in case RSS is going to be used for VF */
+ u8 rss_eng_id;
+};
+
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
@@ -174,15 +197,14 @@ void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
*
* @param p_hwfn
* @param p_ptt
- * @param rel_vf_id
- * @param num_rx_queues
+ * @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 rel_vf_id,
- u16 num_rx_queues);
+ struct ecore_iov_vf_init_params
+ *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
@@ -301,12 +323,13 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
*
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
- bool b_enabled_only);
+ bool b_enabled_only, bool b_non_malicious);
/**
* @brief Get VF's public info structure
@@ -399,16 +422,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
u16 *opaque_fid);
/**
- * @brief Get VFs VPORT id.
- *
- * @param p_hwfn
- * @param vfid
- * @param vport id
- */
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vport_id);
-
-/**
* @brief Set forced VLAN [pvid] in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
* Setting of pvid 0 would clear the feature.
@@ -662,24 +675,24 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
- * @brief - Get VF's vport min rate configured.
+ * @brief - Returm true if VF has started in FW
+ *
* @param p_hwfn
* @param rel_vf_id
*
- * @return - rate in Mbps
+ * @return
*/
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
- * @brief - Configure min rate for VF's vport.
- * @param p_dev
- * @param vfid
- * @param - rate in Mbps
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
*
- * @return
+ * @return - rate in Mbps
*/
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate);
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
#endif
/**
@@ -688,15 +701,17 @@ enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
* @param p_hwfn
* @param rel_vf_id
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port);
#endif /* CONFIG_ECORE_SRIOV */
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < MAX_NUM_VFS; \
+ _i < E4_MAX_NUM_VFS; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index aad90123..b4bfe89f 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -185,5 +185,13 @@
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
+ ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
+ ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 43e01e47..6764bfa6 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
- { 0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6318, 0x20, 0x0, 0x0, 0x20},
+ { 0x6518, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -41,7 +41,7 @@ static const struct iro iro_arr[47] = {
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x60f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
@@ -53,7 +53,7 @@ static const struct iro iro_arr[47] = {
/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0x53a0, 0x80, 0x4, 0x0, 0x4},
/* MSTORM_TPA_TIMEOUT_US_OFFSET */
- { 0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -63,13 +63,13 @@ static const struct iro iro_arr[47] = {
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf188, 0x78, 0x0, 0x0, 0x78},
+ { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
- { 0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
- { 0xade0, 0x8, 0x0, 0x0, 0x8},
+ { 0xafe8, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -85,25 +85,29 @@ static const struct iro iro_arr[47] = {
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x10, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd888, 0x38, 0x0, 0x0, 0x24},
+ { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x12c38, 0x10, 0x0, 0x0, 0x8},
+ { 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x28, 0x0, 0x0, 0x18},
+ { 0x86f8, 0x30, 0x0, 0x0, 0x18},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x101f8, 0x10, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
- { 0xdd08, 0x48, 0x0, 0x0, 0x38},
+ { 0xde28, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x10660, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x2b80, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x5000, 0x10, 0x0, 0x0, 0x10},
+ { 0x5020, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
+ { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
+ { 0xeec0, 0x10, 0x0, 0x0, 0x10},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 74f61b00..4ab8fd5f 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -29,6 +29,306 @@
#define ECORE_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+struct ecore_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ osal_mutex_t lock;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return ECORE_SUCCESS;
+
+ p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
+ if (!p_l2_info)
+ return ECORE_NOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ ecore_vf_get_num_rxqs(p_hwfn, &rx);
+ ecore_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
+ }
+
+ pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
+ sizeof(unsigned long *) *
+ p_l2_info->queues);
+ if (pp_qids == OSAL_NULL)
+ return ECORE_NOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
+ MAX_QUEUES_PER_QZONE / 8);
+ if (pp_qids[i] == OSAL_NULL)
+ return ECORE_NOMEM;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_l2_free(struct ecore_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (p_hwfn->p_l2_info == OSAL_NULL)
+ return;
+
+ if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
+ break;
+ OSAL_VFREE(p_hwfn->p_dev,
+ p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ /* Lock is last to initialize, if everything else was */
+ if (i == p_hwfn->p_l2_info->queues)
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
+#endif
+
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = OSAL_NULL;
+}
+
+/* TODO - we'll need locking around these... */
+static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
+
+ if (queue_id > p_l2_info->queues) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ OSAL_MUTEX_RELEASE(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
+
+ OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ /* For VF-queues, stuff is a bit complicated as:
+ * - They always maintain the qid_usage on their own.
+ * - In legacy mode, they also maintain their CIDs.
+ */
+
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+ if (!p_cid->b_legacy_vf)
+ ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params != OSAL_NULL) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ } else {
+ p_cid->vfid = ECORE_QUEUE_CID_PF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * The would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u8 vfid = ECORE_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->b_legacy) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
+ if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, p_vf_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+}
+
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params)
@@ -36,9 +336,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- u16 rx_mode = 0;
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
@@ -71,6 +371,30 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+ /* Handle requests for strict behavior on transmission errors */
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+ p_params->b_err_illegal_vlan_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+ p_params->b_err_small_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+ p_params->b_err_anti_spoof ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+ p_params->b_err_illegal_inband_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+ p_params->b_err_vlan_insert_with_inband ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+ p_params->b_err_big_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+ p_params->b_err_ctrl_frame ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
/* TPA related fields */
OSAL_MEMSET(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
@@ -129,10 +453,9 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct ecore_rss_params *p_rss)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct eth_vport_rss_config *p_config;
- u16 abs_l2_queue = 0;
- int i;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
@@ -186,16 +509,40 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities,
p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- rc = ecore_fw_l2_queue(p_hwfn,
- (u8)p_rss->rss_ind_table[i],
- &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
- p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
- i, p_config->indirection_table[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
@@ -250,8 +597,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->rx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
/* Set Tx mode accept flags */
@@ -274,8 +621,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->tx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
}
@@ -534,57 +881,28 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
return 0;
}
-static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_hw_cid_data *p_cid_data)
-{
- if (!p_cid_data->b_cid_allocated)
- return;
-
- ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
- p_cid_data->b_cid_allocated = false;
-}
-
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod)
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 abs_rx_q_id = 0;
- u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = p_params->vport_id;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->abs.sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -595,11 +913,11 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = p_params->stats_id;
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
@@ -609,92 +927,88 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_params->vf_qid || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+ if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- b_use_zone_a_prod ? " [legacy]" : "",
- p_params->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod)
{
- struct ecore_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
- u16 abs_l2_queue = 0;
- u8 abs_stats_id = 0;
- enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_rxq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size, pp_prod);
- }
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_rx_cid->b_cid_allocated = true;
- p_params->stats_id = abs_stats_id;
- p_params->vf_qid = 0;
-
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
- opaque_fid,
- p_rx_cid->cid,
- p_params,
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size,
- false);
+ &p_ret_params->p_prod);
+ /* Provide the caller with a reference to as handler */
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handles,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -704,14 +1018,14 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 qid, abs_rx_q_id = 0;
+ struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
- rx_queue_id,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
@@ -721,12 +1035,11 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
- qid = rx_queue_id + i;
- p_rx_cid = &p_hwfn->p_rx_cids[qid];
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
/* Get SPQ entry */
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -735,41 +1048,34 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
}
return rc;
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion)
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- u16 abs_rx_q_id = 0;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
- cqe_completion);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -779,64 +1085,56 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) &&
- !eq_completion_only) || cqe_completion;
- p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) ||
- eq_completion_only;
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
+ b_eq_completion_only;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params)
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
{
struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_tx_cid;
- u16 pq_id, abs_tx_q_id = 0;
- u8 abs_vport_id;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- p_tx_cid->cid = cid;
- p_tx_cid->opaque_fid = opaque_fid;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -846,110 +1144,89 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->stats_counter_id = p_params->stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
- p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
+ dma_addr_t pbl_addr, u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
{
- struct ecore_hw_cid_data *p_tx_cid;
- union ecore_qm_pq_params pq_params;
- u8 abs_stats_id = 0;
enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_txq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- pbl_addr,
- pbl_size,
- pp_doorbell);
- }
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc != ECORE_SUCCESS)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-
- pq_params.eth.tc = tc;
-
- /* Allocate a CID for the queue */
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_tx_cid->b_cid_allocated = true;
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ return ECORE_SUCCESS;
+}
- p_params->stats_id = abs_stats_id;
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- opaque_fid,
- p_tx_cid->cid,
- p_params,
- pbl_addr,
- pbl_size,
- &pq_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
-{
- return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id)
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_tx_cid->cid;
- init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -958,11 +1235,22 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
@@ -988,17 +1276,6 @@ ecore_filter_action(enum ecore_filter_opcode opcode)
return action;
}
-static void ecore_set_fw_mac_addr(__le16 *fw_msb,
- __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
-{
- ((u8 *)fw_msb)[0] = mac[1];
- ((u8 *)fw_msb)[1] = mac[0];
- ((u8 *)fw_mid)[0] = mac[3];
- ((u8 *)fw_mid)[1] = mac[2];
- ((u8 *)fw_lsb)[0] = mac[5];
- ((u8 *)fw_lsb)[1] = mac[4];
-}
-
static enum _ecore_status_t
ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
@@ -1093,6 +1370,9 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
case ECORE_FILTER_VNI:
p_first_filter->type = ETH_FILTER_TYPE_VNI;
break;
+ case ECORE_FILTER_UNUSED: /* @DPDK */
+ p_first_filter->type = MAX_ETH_FILTER_TYPE;
+ break;
}
if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
@@ -1738,3 +2018,87 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
else
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
}
+
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+ }
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
+ p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing",
+ (unsigned long)p_addr, length);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 9c1bd388..7fe4cbcb 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -15,59 +15,106 @@
#include "ecore_spq.h"
#include "ecore_l2_api.h"
-/**
- * @brief ecore_sp_eth_tx_queue_update -
- *
- * This ramrod updates a TX queue. It is used for setting the active
- * state of the queue.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define ECORE_QUEUE_CID_PF (0xff)
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+struct ecore_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ bool b_legacy;
+
+ /* For VFs, this index arrives via TLV to diffrentiate between
+ * different queues opened on the same qzone, and is passed
+ * [where the PF would have allocated it internally for its own].
+ */
+ u8 qid_usage_idx;
+};
+
+struct ecore_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct ecore_queue_start_common_params rel;
+ struct ecore_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to diffrentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to diffrentiate].
+ */
+ u8 qid_usage_idx;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+
+ struct ecore_hwfn *p_owner;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
+void ecore_l2_free(struct ecore_hwfn *p_hwfn);
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
- stats_id is absolute packed in p_params.
+ * @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
- * @param b_use_zone_a_prod - support legacy VF producers
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod);
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
+ * @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -75,14 +122,38 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params);
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_ptt
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ it with cookie and callback function address, if not
+ using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 326fa45b..d09f3c4a 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -28,15 +28,26 @@ enum ecore_rss_caps {
#endif
struct ecore_queue_start_common_params {
- /* Rx/Tx queue id */
- u8 queue_id;
+ /* Should always be relative to entity sending this. */
u8 vport_id;
+ u16 queue_id;
- /* stats_id is relative or absolute depends on function */
+ /* Relative, but relevant only for PFs */
u8 stats_id;
+
+ /* These are always absolute */
u16 sb;
- u16 sb_idx;
- u16 vf_qid;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
};
struct ecore_rss_params {
@@ -48,7 +59,9 @@ struct ecore_rss_params {
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
- u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
@@ -89,6 +102,7 @@ enum ecore_filter_ucast_type {
ECORE_FILTER_INNER_MAC_VNI_PAIR,
ECORE_FILTER_MAC_VNI_PAIR,
ECORE_FILTER_VNI,
+ ECORE_FILTER_UNUSED, /* @DPDK */
};
struct ecore_filter_ucast {
@@ -127,6 +141,14 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_BCAST 0x20
};
+struct ecore_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable; /* Enable or disable arfs mode */
+};
+
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
* FW will assert in the following cases, so driver should take care...:
* 1. Adding a filter to a full table.
@@ -159,42 +181,37 @@ ecore_filter_accept_cmd(
struct ecore_spq_comp_cb *p_comp_data);
/**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
*
* This ramrod initializes an RX Queue for a VPort. An Assert is generated if
* the VPort ID is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
- * @p_params [stats_id is relative, packed in p_params]
+ * @p_params Inputs; Relative for PF [SB being an exception]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
* @param cqe_pbl_size Size of the CQE PBL Table
- * @param pp_prod Pointer to place producer's
- * address for the Rx Q (May be
- * NULL).
+ * @param p_ret_params Pointed struct to be filled with outputs.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
+ * @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
@@ -205,13 +222,13 @@ ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
/**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
*
* This ramrod initializes a TX Queue for a VPort. An Assert is generated if
* the VPort is not currently initialized.
@@ -222,34 +239,29 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
- * @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
- * This address should be used with the
- * DIRECT_REG_WR macro.
+ * @param p_ret_params Pointer to fill the return parameters in.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell);
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
- * @param tx_queue_id TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
@@ -273,6 +285,15 @@ struct ecore_sp_vport_start_params {
bool zero_placement_offset;
bool check_mac;
bool check_ethtype;
+
+ /* Strict behavior on transmission errors */
+ bool b_err_illegal_vlan_mode;
+ bool b_err_illegal_inband_mode;
+ bool b_err_vlan_insert_with_inband;
+ bool b_err_small_pkt;
+ bool b_err_big_pkt;
+ bool b_err_anti_spoof;
+ bool b_err_ctrl_frame;
};
/**
@@ -372,19 +393,19 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* @note Final phase API.
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
- * @param num_rxqs Allow to update multiple rx
- * queues, from rx_queue_id to
- * (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -401,4 +422,18 @@ void ecore_get_vport_stats(struct ecore_dev *p_dev,
void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 2ff97155..a834ac74 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -104,7 +104,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
- p_hwfn->mcp_info = OSAL_NULL;
return ECORE_SUCCESS;
}
@@ -365,6 +364,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
+ union drv_union_data union_data;
u32 union_data_addr;
enum _ecore_status_t rc;
@@ -374,6 +374,15 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
+ if (p_mb_params->data_src_size > sizeof(union_data) ||
+ p_mb_params->data_dst_size > sizeof(union_data)) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ sizeof(union_data));
+ return ECORE_INVAL;
+ }
+
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
OFFSETOF(struct public_drv_mb, union_data);
@@ -384,19 +393,21 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_mb_params->p_data_src != OSAL_NULL)
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
p_mb_params->param, &p_mb_params->mcp_resp,
&p_mb_params->mcp_param);
- if (p_mb_params->p_data_dst != OSAL_NULL)
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size)
ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
+ union_data_addr, p_mb_params->data_dst_size);
ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
@@ -444,14 +455,13 @@ enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
u32 i_txn_size, u32 *i_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -471,13 +481,17 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_txn_size, u32 *o_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -486,7 +500,8 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
return ECORE_SUCCESS;
}
@@ -519,57 +534,389 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
}
#endif
+static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+{
+ return (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+}
+
+static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
+#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
+#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
+#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
+#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
+#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
+#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
+
+static u32 ecore_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+#ifdef CONFIG_ECORE_L2
+ config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_SRIOV
+ config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ROCE
+ config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_IWARP
+ config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+ config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+ config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_LL2
+ config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
+#endif
+
+ return config_bitmap;
+}
+
+struct ecore_load_req_in_params {
+ u8 hsi_ver;
+#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
+#define ECORE_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct ecore_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static enum _ecore_status_t
+__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_load_req_in_params *p_in_params,
+ struct ecore_load_req_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
+ p_in_params->drv_role);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_FLAGS0));
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0,
+ LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
+ enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case ECORE_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case ECORE_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum ecore_load_req_force {
+ ECORE_LOAD_REQ_FORCE_NONE,
+ ECORE_LOAD_REQ_FORCE_PF,
+ ECORE_LOAD_REQ_FORCE_ALL,
+};
+
+static enum _ecore_status_t
+ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
+ enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case ECORE_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case ECORE_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case ECORE_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code)
+ struct ecore_load_req_params *p_params)
{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct ecore_load_req_out_params out_params;
+ struct ecore_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+ ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
return ECORE_SUCCESS;
}
#endif
- OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- p_dev->drv_type;
- OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = ECORE_VERSION;
+ in_params.drv_ver_1 = ecore_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- /* if mcp fails to respond we must abort */
- if (rc != ECORE_SUCCESS) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
return rc;
- }
- *p_load_code = mb_params.mcp_resp;
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
+
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (ecore_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ rc = ecore_get_mfw_force_cmd(p_hwfn,
+ ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.force_cmd = mfw_force_cmd;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return ECORE_BUSY;
+ }
+ }
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ * This is unexpected since a quasi-FLR request was
+ * previously sent as part of ecore_hw_prepare().
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
+ return ECORE_INVAL;
+ }
+ break;
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
+ DP_NOTICE(p_hwfn, false,
+ "MFW refused a load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return ECORE_BUSY;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ break;
}
+ p_params->load_code = out_params.load_code;
+
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -611,7 +958,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
int i;
@@ -622,8 +968,8 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
@@ -801,9 +1147,6 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
- if (p_link->link_up)
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
-
OSAL_LINK_UPDATE(p_hwfn);
}
@@ -812,8 +1155,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
{
struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *p_phy_cfg;
+ struct eth_phy_cfg phy_cfg;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd;
@@ -823,32 +1165,30 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
#endif
/* Set the shmem configuration according to params */
- p_phy_cfg = &union_data.drv_phy_cfg;
- OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- p_phy_cfg->speed = params->speed.forced_speed;
- p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- p_phy_cfg->adv_speed = params->speed.advertised_speeds;
- p_phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x,"
- " features 0x%08x\n",
- p_phy_cfg->speed, p_phy_cfg->pause,
- p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
- p_phy_cfg->feature_config_flags);
+ " adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -927,8 +1267,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
enum ecore_mcp_protocol_type stats_type;
union ecore_mcp_protocol_stats stats;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
+ enum _ecore_status_t rc;
switch (type) {
case MFW_DRV_MSG_GET_LAN_STATS:
@@ -936,7 +1276,7 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
return;
}
@@ -945,14 +1285,15 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
- ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
}
-static void
-ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
+static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
{
struct ecore_mcp_function_info *p_info;
@@ -1043,28 +1384,38 @@ static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
}
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u32 mdump_cmd, union drv_union_data *p_data_src,
- union drv_union_data *p_data_dst, u32 *p_mcp_resp)
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
{
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
- mb_params.param = mdump_cmd;
- mb_params.p_data_src = p_data_src;
- mb_params.p_data_dst = p_data_dst;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
DP_NOTICE(p_hwfn, false,
"MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- mdump_cmd);
+ p_mdump_cmd_params->cmd);
rc = ECORE_INVAL;
}
@@ -1074,99 +1425,123 @@ ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 epoch)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
- &union_data, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
-}
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- u32 mcp_resp;
-
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct mdump_config_stc *p_mdump_config)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
- OSAL_NULL, &union_data, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
if (rc != ECORE_SUCCESS)
return rc;
- /* A zero response implies that the mdump command is not supported */
- if (!mcp_resp)
+ if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
return ECORE_NOTIMPL;
+ }
- if (mcp_resp != FW_MSG_CODE_OK) {
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
DP_NOTICE(p_hwfn, false,
"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mcp_resp);
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
- OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
- sizeof(*p_mdump_config));
-
return rc;
}
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info)
{
+ u32 addr, global_offsize, global_addr;
struct mdump_config_stc mdump_config;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
- if (rc != ECORE_SUCCESS)
- return rc;
+ OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW mdump_config: version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
- mdump_config.version, mdump_config.config, mdump_config.epoc,
- mdump_config.num_of_logs, mdump_config.valid_logs);
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
+ global_addr +
+ OFFSETOF(struct public_global,
+ mdump_reason));
- if (mdump_config.valid_logs > 0) {
- DP_NOTICE(p_hwfn, false,
- "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ if (p_mdump_info->reason) {
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_info->version = mdump_config.version;
+ p_mdump_info->config = mdump_config.config;
+ p_mdump_info->epoch = mdump_config.epoc;
+ p_mdump_info->num_of_logs = mdump_config.num_of_logs;
+ p_mdump_info->valid_logs = mdump_config.valid_logs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ p_mdump_info->reason, p_mdump_info->version,
+ p_mdump_info->config, p_mdump_info->epoch,
+ p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d\n", p_mdump_info->reason);
}
- return rc;
+ return ECORE_SUCCESS;
}
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable)
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- p_dev->mdump_en = mdump_enable;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
@@ -1184,6 +1559,7 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
if (p_hwfn->p_dev->mdump_en) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
+ p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -1256,9 +1632,7 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
break;
default:
- /* @DPDK */
- DP_NOTICE(p_hwfn, false,
- "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = ECORE_INVAL;
}
}
@@ -1364,16 +1738,47 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
}
+/* @DPDK */
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
+ enum ecore_pci_personality *p_proto)
+{
+ *p_proto = ECORE_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+/* @DPDK */
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
struct public_func *p_info,
+ struct ecore_ptt *p_ptt,
enum ecore_pci_personality *p_proto)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = ECORE_PCI_ETH;
+ if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
+ ECORE_SUCCESS)
+ ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
default:
rc = ECORE_INVAL;
@@ -1394,7 +1799,8 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return ECORE_INVAL;
@@ -1422,6 +1828,13 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ if (info->mtu == 0)
+ info->mtu = 1500;
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x"
" protocol %02x BW [%02x - %02x]"
@@ -1543,8 +1956,9 @@ int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
continue;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &protocol) != ECORE_SUCCESS)
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &protocol) !=
+ ECORE_SUCCESS)
continue;
if ((1 << ((u32)protocol)) & personalities)
@@ -1636,9 +2050,8 @@ enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
u32 num_words, i;
void *p_name;
OSAL_BE32 val;
@@ -1649,18 +2062,20 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
#endif
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
for (i = 0; i < num_words; i++) {
+ /* The driver name is expected to be in a big-endian format */
p_name = &p_ver->name[i * sizeof(u32)];
val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
}
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1700,22 +2115,24 @@ enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client)
{
enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
- switch (config) {
+ switch (client) {
case ECORE_OV_CLIENT_DRV:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
break;
case ECORE_OV_CLIENT_USER:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
break;
+ case ECORE_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
default:
- DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
return ECORE_INVAL;
}
@@ -1752,9 +2169,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
- drv_state, &resp, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
return rc;
}
@@ -2251,7 +2668,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 drv_mb_param = 0, rsp, param;
+ u32 drv_mb_param, rsp, param;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
@@ -2327,28 +2744,25 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
struct ecore_temperature_info *p_temp_info)
{
struct ecore_temperature_sensor *p_temp_sensor;
- struct temperature_status_stc *p_mfw_temp_info;
+ struct temperature_status_stc mfw_temp_info;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 val;
enum _ecore_status_t rc;
u8 i;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- p_mfw_temp_info = &union_data.temp_info;
-
OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
- p_temp_info->num_sensors = OSAL_MIN_T(u32,
- p_mfw_temp_info->num_of_sensors,
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
ECORE_MAX_NUM_OF_SENSORS);
for (i = 0; i < p_temp_info->num_sensors; i++) {
- val = p_mfw_temp_info->sensor[i];
+ val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
SENSOR_LOCATION_SHIFT;
@@ -2403,7 +2817,60 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
0, &rsp, (u32 *)num_events);
}
-#define ECORE_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum
+ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case ECORE_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 2
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
@@ -2411,34 +2878,146 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
(ECORE_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+struct ecore_resc_alloc_in_params {
+ u32 cmd;
+ enum ecore_resources res_id;
+ u32 resc_max_val;
+};
+
+struct ecore_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static enum _ecore_status_t
+ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_resc_alloc_in_params *p_in_params,
+ struct ecore_resc_alloc_out_params *p_out_params)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data *p_union_data;
+ struct resource_info mfw_resc_info;
enum _ecore_status_t rc;
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id));
+ return ECORE_INVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return ECORE_INVAL;
+ }
+
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = ECORE_RESC_ALLOC_VERSION;
- p_union_data = (union drv_union_data *)p_resc_info;
- mb_params.p_data_src = p_union_data;
- mb_params.p_data_dst = p_union_data;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd, p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
- " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
- p_resc_info->offset, p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num, p_out_params->resc_start,
+ p_out_params->vf_resc_num, p_out_params->vf_resc_start,
+ p_out_params->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
return ECORE_SUCCESS;
}
@@ -2448,6 +3027,182 @@ enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
{
u32 mcp_resp, mcp_param;
- return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR,
- 0, &mcp_resp, &mcp_param);
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp,
+ u32 *p_mcp_param)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn, false,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ switch (p_params->timeout) {
+ case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case ECORE_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
+ RESOURCE_CMD_RSP_OWNER);
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ enum _ecore_status_t rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ OSAL_MSLEEP(retry_interval_in_ms);
+ } else {
+ OSAL_UDELAY(p_params->retry_interval);
+ }
+ }
+
+ rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 831890ca..37d1835f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -11,6 +11,7 @@
#include "bcm_osal.h"
#include "mcp_public.h"
+#include "ecore.h"
#include "ecore_mcp_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
@@ -64,12 +65,22 @@ struct ecore_mcp_info {
struct ecore_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
+struct ecore_drv_tlv_hdr {
+ u8 tlv_type; /* According to the enum below */
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -128,32 +139,58 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
- * was successul.
+ * was successful.
*/
enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+enum ecore_drv_role {
+ ECORE_DRV_ROLE_OS,
+ ECORE_DRV_ROLE_KDUMP,
+};
+
+struct ecore_load_req_params {
+ enum ecore_drv_role drv_role;
+ u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
+ bool avoid_eng_reset;
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
- *
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return enum _ecore_status_t -
- * ECORE_SUCCESS - Operation was successul.
- * ECORE_BUSY - Operation failed
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code);
+ struct ecore_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -327,31 +364,37 @@ enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Clears the MFW crash dump logs.
+ * @brief - Sets the MFW's max value for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW crash dump configuration and logs info.
+ * @brief - Gets the MFW allocation info for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
-
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start);
/**
* @brief - Initiates PF FLR
@@ -364,4 +407,79 @@ enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
+#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
+
+enum ecore_resc_lock {
+ ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
+ /* Locks that the MFW is aware of should be added here downwards */
+
+ /* Ecore only locks should be added here upwards */
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+};
+
+struct ecore_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
+#define ECORE_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params);
+
+struct ecore_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index c26b4943..190c1352 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -84,6 +84,8 @@ struct ecore_mcp_function_info {
#define ECORE_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
+
+ u16 mtu;
};
struct ecore_mcp_nvm_common {
@@ -173,15 +175,10 @@ union ecore_mcp_protocol_stats {
};
#endif
-enum ecore_ov_config_method {
- ECORE_OV_CONFIG_MTU,
- ECORE_OV_CONFIG_MAC,
- ECORE_OV_CONFIG_WOL
-};
-
enum ecore_ov_client {
ECORE_OV_CLIENT_DRV,
- ECORE_OV_CLIENT_USER
+ ECORE_OV_CLIENT_USER,
+ ECORE_OV_CLIENT_VENDOR_SPEC
};
enum ecore_ov_driver_state {
@@ -235,6 +232,297 @@ struct ecore_mba_vers {
u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
};
+enum ecore_mfw_tlv_type {
+ ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ ECORE_MFW_TLV_MAX = 0x16,
+};
+
+struct ecore_mfw_tlv_generic {
+ u16 feat_flags;
+ bool feat_flags_set;
+ u64 local_mac;
+ bool local_mac_set;
+ u64 additional_mac1;
+ bool additional_mac1_set;
+ u64 additional_mac2;
+ bool additional_mac2_set;
+ u8 drv_state;
+ bool drv_state_set;
+ u8 pxe_progress;
+ bool pxe_progress_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+struct ecore_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+struct ecore_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ u8 crc_err_tstamp[5][14];
+ bool crc_err_tstamp_set[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ u8 flogi_tstamp[14];
+ bool flogi_tstamp_set;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ u8 flogi_acc_tstamp[14];
+ bool flogi_acc_tstamp_set;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ u8 flogi_rjt_tstamp[14];
+ bool flogi_rjt_tstamp_set;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ u8 plogi_tstamp[5][14];
+ bool plogi_tstamp_set[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ u8 plogi_acc_tstamp[5][14];
+ bool plogi_acc_tstamp_set[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ u8 plogo_tstamp[5][14];
+ bool plogo_tstamp_set[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ u8 abts_tstamp[5][14];
+ bool abts_tstamp_set[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ u8 scsi_chk_tstamp[5][14];
+ bool scsi_chk_tstamp_set[5];
+};
+
+struct ecore_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union ecore_mfw_tlv_data {
+ struct ecore_mfw_tlv_generic generic;
+ struct ecore_mfw_tlv_eth eth;
+ struct ecore_mfw_tlv_fcoe fcoe;
+ struct ecore_mfw_tlv_iscsi iscsi;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -452,7 +740,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param config - Configuation that has been updated
* @param client - ecore client type
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -460,7 +747,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client);
/**
@@ -792,14 +1078,49 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 *num_events);
+struct ecore_mdump_info {
+ u32 reason;
+ u32 version;
+ u32 config;
+ u32 epoch;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_info
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info);
+
/**
- * @brief Sets whether a critical error notification from the MFW is acked, or
- * is it being ignored and thus allowing the MFW crash dump.
+ * @brief - Clears the MFW crash dump logs.
*
- * @param p_dev
- * @param mdump_enable
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the ecore client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
*
+ * @param return ECORE_SUCCESS upon success.
*/
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable);
+enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
new file mode 100644
index 00000000..0bf1be88
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -0,0 +1,1535 @@
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+static enum _ecore_status_t
+ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= ECORE_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = ECORE_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_ISCSI;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_generic *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->feat_flags_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags;
+ return sizeof(p_drv_buf->feat_flags);
+ }
+ break;
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ if (p_drv_buf->local_mac_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->local_mac;
+ return sizeof(p_drv_buf->local_mac);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ if (p_drv_buf->additional_mac1_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1;
+ return sizeof(p_drv_buf->additional_mac1);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ if (p_drv_buf->additional_mac2_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2;
+ return sizeof(p_drv_buf->additional_mac2);
+ }
+ break;
+ case DRV_TLV_OS_DRIVER_STATES:
+ if (p_drv_buf->drv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->drv_state;
+ return sizeof(p_drv_buf->drv_state);
+ }
+ break;
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ if (p_drv_buf->pxe_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress;
+ return sizeof(p_drv_buf->pxe_progress);
+ }
+ break;
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_eth *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_fcoe *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0];
+ return sizeof(p_drv_buf->crc_err_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1];
+ return sizeof(p_drv_buf->crc_err_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2];
+ return sizeof(p_drv_buf->crc_err_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3];
+ return sizeof(p_drv_buf->crc_err_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4];
+ return sizeof(p_drv_buf->crc_err_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0];
+ return sizeof(p_drv_buf->crc_err_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1];
+ return sizeof(p_drv_buf->crc_err_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2];
+ return sizeof(p_drv_buf->crc_err_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3];
+ return sizeof(p_drv_buf->crc_err_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4];
+ return sizeof(p_drv_buf->crc_err_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0];
+ return sizeof(p_drv_buf->flogi_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1];
+ return sizeof(p_drv_buf->flogi_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2];
+ return sizeof(p_drv_buf->flogi_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3];
+ return sizeof(p_drv_buf->flogi_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ if (p_drv_buf->flogi_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp;
+ return sizeof(p_drv_buf->flogi_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_acc_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0];
+ return sizeof(p_drv_buf->flogi_acc_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_acc_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1];
+ return sizeof(p_drv_buf->flogi_acc_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_acc_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2];
+ return sizeof(p_drv_buf->flogi_acc_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_acc_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3];
+ return sizeof(p_drv_buf->flogi_acc_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ if (p_drv_buf->flogi_acc_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp;
+ return sizeof(p_drv_buf->flogi_acc_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ if (p_drv_buf->flogi_rjt_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp;
+ return sizeof(p_drv_buf->flogi_rjt_tstamp);
+ }
+ break;
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0];
+ return sizeof(p_drv_buf->plogi_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1];
+ return sizeof(p_drv_buf->plogi_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2];
+ return sizeof(p_drv_buf->plogi_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3];
+ return sizeof(p_drv_buf->plogi_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4];
+ return sizeof(p_drv_buf->plogi_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0];
+ return sizeof(p_drv_buf->plogi_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1];
+ return sizeof(p_drv_buf->plogi_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2];
+ return sizeof(p_drv_buf->plogi_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3];
+ return sizeof(p_drv_buf->plogi_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4];
+ return sizeof(p_drv_buf->plogi_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0];
+ return sizeof(p_drv_buf->plogo_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1];
+ return sizeof(p_drv_buf->plogo_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2];
+ return sizeof(p_drv_buf->plogo_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3];
+ return sizeof(p_drv_buf->plogo_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4];
+ return sizeof(p_drv_buf->plogo_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0];
+ return sizeof(p_drv_buf->plogo_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1];
+ return sizeof(p_drv_buf->plogo_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2];
+ return sizeof(p_drv_buf->plogo_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3];
+ return sizeof(p_drv_buf->plogo_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4];
+ return sizeof(p_drv_buf->plogo_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0];
+ return sizeof(p_drv_buf->abts_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1];
+ return sizeof(p_drv_buf->abts_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2];
+ return sizeof(p_drv_buf->abts_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3];
+ return sizeof(p_drv_buf->abts_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4];
+ return sizeof(p_drv_buf->abts_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0];
+ return sizeof(p_drv_buf->abts_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1];
+ return sizeof(p_drv_buf->abts_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2];
+ return sizeof(p_drv_buf->abts_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3];
+ return sizeof(p_drv_buf->abts_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4];
+ return sizeof(p_drv_buf->abts_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ if (p_drv_buf->rx_rscn_nport_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0];
+ return sizeof(p_drv_buf->rx_rscn_nport[0]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ if (p_drv_buf->rx_rscn_nport_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1];
+ return sizeof(p_drv_buf->rx_rscn_nport[1]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ if (p_drv_buf->rx_rscn_nport_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2];
+ return sizeof(p_drv_buf->rx_rscn_nport[2]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ if (p_drv_buf->rx_rscn_nport_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3];
+ return sizeof(p_drv_buf->rx_rscn_nport[3]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0];
+ return sizeof(p_drv_buf->scsi_rx_chk[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1];
+ return sizeof(p_drv_buf->scsi_rx_chk[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2];
+ return sizeof(p_drv_buf->scsi_rx_chk[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[4]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_iscsi *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static enum _ecore_status_t
+ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+{
+ union ecore_mfw_tlv_data *p_tlv_data;
+ struct ecore_drv_tlv_hdr tlv;
+ u8 *p_tlv_ptr = OSAL_NULL, *p_temp;
+ u32 offset;
+ int len;
+
+ p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return ECORE_NOMEM;
+
+ if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) {
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+ return ECORE_INVAL;
+ }
+
+ offset = 0;
+ OSAL_MEMSET(&tlv, 0, sizeof(tlv));
+ while (offset < size) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ tlv.tlv_flags = TLV_FLAGS(p_temp);
+ DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n",
+ tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags);
+
+ offset += sizeof(tlv);
+ if (tlv_group == ECORE_MFW_TLV_GENERIC)
+ len = ecore_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_ETH)
+ len = ecore_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_FCOE)
+ len = ecore_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe, &p_tlv_ptr);
+ else
+ len = ecore_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi, &p_tlv_ptr);
+
+ if (len > 0) {
+ OSAL_WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length");
+ len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
+ /* TODO: Endianness handling? */
+ OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv));
+ OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len);
+ }
+
+ offset += sizeof(u32) * tlv.tlv_length;
+ }
+
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val;
+ u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp;
+ u32 global_offsize, global_addr;
+ enum _ecore_status_t rc;
+ struct ecore_drv_tlv_hdr tlv;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, data_ptr);
+ size = ecore_rd(p_hwfn, p_ptt, global_addr +
+ OFFSETOF(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = ecore_rd(p_hwfn, p_ptt, addr + offset);
+ OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ goto drv_done;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id) {
+ if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
+ size))
+ goto drv_done;
+ }
+ }
+
+ /* Write the TLV data to shared memory */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = (u32)p_mfw_buf[offset];
+ ecore_wr(p_hwfn, p_ptt, addr + offset, val);
+ offset += sizeof(u32);
+ }
+
+drv_done:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index e252d528..226e3d2a 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -21,6 +21,12 @@ struct ecore_eth_pf_params {
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
};
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
@@ -63,6 +69,12 @@ struct ecore_iscsi_pf_params {
u8 bdq_pbl_num_entries[2];
};
+enum ecore_rdma_protocol {
+ ECORE_RDMA_PROTOCOL_DEFAULT,
+ ECORE_RDMA_PROTOCOL_ROCE,
+ ECORE_RDMA_PROTOCOL_IWARP,
+};
+
struct ecore_rdma_pf_params {
/* Supplied to ECORE during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -76,6 +88,10 @@ struct ecore_rdma_pf_params {
/* Will allocate rate limiters to be used with QPs */
u8 enable_dcqcn;
+
+ /* TCP port number used for the iwarp traffic */
+ u16 iwarp_port;
+ enum ecore_rdma_protocol rdma_protocol;
};
struct ecore_pf_params {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 01a29e31..846dc6d1 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -115,339 +115,338 @@
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30857
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30873
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31677
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32701
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33213
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
-#define RUNTIME_ARRAY_SIZE 33927
+#define RUNTIME_ARRAY_SIZE 34311
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index a4cb507f..c8e564f9 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -41,5 +41,24 @@ struct ecore_spq_comp_cb {
*/
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
#endif
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index b3736a8c..8fd64d7a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -22,6 +22,7 @@
#include "ecore_hw.h"
#include "ecore_dcbx.h"
#include "ecore_sriov.h"
+#include "ecore_vf.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
@@ -31,7 +32,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
if (!pp_ent)
return ECORE_INVAL;
@@ -88,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,224 +108,208 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
}
static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src,
+ bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- p_src->tunn_mode = new_tunn_mode;
- return;
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ /* @DPDK - typecast tunnul class */
+ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- return;
- }
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct ecore_tunn_update_udp_port *p_udp_port)
+{
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+ tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+ }
+}
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct ecore_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
+ ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn)
+{
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel hw config is not supported\n");
return;
+ }
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+ if (p_tunn->vxlan_port.b_update_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_src,
+ struct ecore_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
-
- if (!p_src)
- return;
-
- tunn_mode = p_src->tunn_mode;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf start config is not supported\n");
return;
}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+ if (!p_src)
+ return;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
@@ -379,11 +364,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
- p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
- p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
@@ -419,11 +404,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (p_tunn) {
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -498,7 +480,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
@@ -506,6 +488,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf update config is not supported\n");
+ return rc;
+ }
+
+ if (!p_tunn)
+ return ECORE_INVAL;
+
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -526,15 +520,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -564,7 +550,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 66c9a69b..33e31e42 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -68,32 +68,11 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
- * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
- * update Ramrod
- *
- * This ramrod is sent to update a tunneling configuration
- * for a physical function (PF).
- *
- * @param p_hwfn
- * @param p_tunn - pf update tunneling parameters
- * @param comp_mode - completion mode
- * @param p_comp_data - callback function
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t
-ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
- enum spq_mode comp_mode,
- struct ecore_spq_comp_cb *p_comp_data);
-
-/**
* @brief ecore_sp_pf_update - PF Function Update Ramrod
*
* This ramrod updates function-related parameters. Every parameter can be
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 0d744ddd..3c1d05b3 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -173,11 +173,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
- u16 pq;
struct ecore_cxt_info cxt_info;
struct core_conn_context *p_cxt;
- union ecore_qm_pq_params pq_params;
enum _ecore_status_t rc;
+ u16 physical_q;
cxt_info.iid = p_spq->cid;
@@ -191,23 +190,26 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
- SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
- SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
- /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
- */
- SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ /* @@@TBD we zero the context until we have ilt_reset implemented. */
+ OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ }
/* CDU validation - FIXME currently disabled */
/* QM physical queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+ physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -248,7 +250,8 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
/* make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
- DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+ DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
+ *(u32 *)&db);
/* make sure doorbell is rang */
OSAL_WMB(p_hwfn->p_dev);
@@ -355,7 +358,7 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
{
struct ecore_eq *p_eq;
@@ -364,7 +367,7 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_eq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain*/
@@ -373,34 +376,38 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
num_elem,
- sizeof(union event_ring_element), &p_eq->chain)) {
+ sizeof(union event_ring_element),
+ &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
/* register EQ completion on the SP SB */
- ecore_int_register_cb(p_hwfn,
- ecore_eq_completion,
+ ecore_int_register_cb(p_hwfn, ecore_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return ECORE_SUCCESS;
eq_allocate_fail:
- ecore_eq_free(p_hwfn, p_eq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+ return ECORE_NOMEM;
}
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_eq->chain);
+ ecore_chain_reset(&p_hwfn->p_eq->chain);
}
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_eq)
+ if (!p_hwfn->p_eq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_eq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+ p_hwfn->p_eq = OSAL_NULL;
}
/***************************************************************************
@@ -501,10 +508,13 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
}
/* SPQ ring */
- if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
- ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
- /* N/A when the mode is SINGLE */
- sizeof(struct slow_path_element), &p_spq->chain)) {
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain, OSAL_NULL)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -920,6 +930,9 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -937,7 +950,7 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_consq *p_consq;
@@ -947,7 +960,7 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_consq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain */
@@ -956,27 +969,30 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
ECORE_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
+ 0x80,
+ &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return ECORE_SUCCESS;
consq_allocate_fail:
- ecore_consq_free(p_hwfn, p_consq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+ return ECORE_NOMEM;
}
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_consq->chain);
+ ecore_chain_reset(&p_hwfn->p_consq->chain);
}
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_consq)
+ if (!p_hwfn->p_consq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_consq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 717ede30..e530f834 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -26,6 +26,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -194,28 +195,23 @@ void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn,
- u16 num_elem);
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ * @brief ecore_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ * @brief ecore_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
@@ -261,32 +257,26 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
*
* @param p_hwfn
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief ecore_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_free(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b28d7281..db2873e7 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -51,6 +51,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_VPORT_UPDATE_RSS",
"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_MAX"
};
@@ -86,6 +88,7 @@ static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_ETH;
break;
case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
@@ -146,7 +149,7 @@ static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
}
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
- bool b_enabled_only)
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
@@ -161,6 +164,10 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
b_enabled_only)
return false;
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
return true;
}
@@ -175,7 +182,8 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
- if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -184,28 +192,90 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return vf;
}
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 rx_qid)
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
+ mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 tx_qid)
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
+ mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -226,6 +296,35 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
return false;
}
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
{
@@ -317,10 +416,9 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
- DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
"ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
- " stride %d, page size 0x%x\n", 0,
- /* @@@TBD MichalK - function id */
+ " stride %d, page size 0x%x\n",
iov->nres, iov->cap, iov->ctrl,
iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
iov->offset, iov->stride, iov->pgsz);
@@ -395,8 +493,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
return;
}
- p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
-
for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
@@ -425,8 +521,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
/* TODO - need to devise a better way of getting opaque */
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
- /* @@TBD MichalK - add base vport_id of VFs to equation */
- vf->vport_id = p_iov_info->base_vport_id + idx;
vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
@@ -550,7 +644,6 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn)
void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
{
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
}
enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
@@ -593,18 +686,33 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"IOV capabilities, but no VFs are published\n");
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
return ECORE_SUCCESS;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only be later
+ * to diffrentiate between the two.
*/
- p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (ECORE_PATH_ID(p_hwfn))
- p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+
+ if (ECORE_PATH_ID(p_hwfn))
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -613,7 +721,8 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
@@ -621,12 +730,17 @@ bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
return false;
/* Check VF validity */
- if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true))
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
return false;
return true;
}
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
u16 rel_vf_id, u8 to_disable)
{
@@ -747,6 +861,9 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+ /* It's possible VF was previously considered malicious */
+ vf->b_malicious = false;
+
rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
@@ -901,17 +1018,59 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
vf->num_sbs = 0;
}
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
@@ -919,22 +1078,80 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
if (vf->b_init) {
DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
+ return ECORE_INVAL;
+ }
+
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
return ECORE_INVAL;
}
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues"
" [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16)cids);
- num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
- num_rx_queues);
+ num_irqs);
if (num_of_vf_available_chains == 0) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return ECORE_NOMEM;
@@ -945,28 +1162,28 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_available_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
- if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
- DP_NOTICE(p_hwfn, true,
- "VF[%d] will require utilizing of"
- " out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- /* TODO - cleanup the already allocate SBs */
- return ECORE_INVAL;
- }
-
- /* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (rc == ECORE_SUCCESS) {
@@ -981,43 +1198,6 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
return rc;
}
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
- u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities *p_caps)
-{
- struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
- struct ecore_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
-
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
-
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
- p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id)
@@ -1326,7 +1506,7 @@ struct ecore_public_vf_info
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
p_vf->configured_features = 0;
@@ -1337,8 +1517,18 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
- for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
- p_vf->vf_queues[i].rxq_active = 0;
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1351,7 +1541,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1372,7 +1562,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
for (i = 0; i < p_resp->num_rxqs; i++) {
ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
@@ -1460,6 +1650,18 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ /* TODO - not doing anything is bad since we'll assert, but this isn't
+ * necessarily the right behavior - perhaps we should have allowed some
+ * versatility here.
+ */
+ if (vf->state != VF_FREE &&
+ vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
if (req->vfdev_info.capabilities &
@@ -1575,12 +1777,12 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
" db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
- " n_vlans-%d, n_mcs-%d\n",
+ " n_vlans-%d\n",
vf->abs_vf_id, resp->pfdev_info.chip_num,
resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
(unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
- resc->num_vlan_filters, resc->num_mc_filters);
+ resc->num_vlan_filters);
vf->state = VF_ACQUIRED;
@@ -1650,11 +1852,9 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = ecore_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- ECORE_SPQ_MODE_CB,
- OSAL_NULL);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to configure VLAN [%04x]"
@@ -1682,9 +1882,10 @@ ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
return rc;
}
-static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- u64 events)
+static enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_filter_ucast filter;
@@ -1764,14 +1965,17 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
- if (!p_vf->vf_queues[i].rxq_active)
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
+ p_queue);
+ if (p_cid == OSAL_NULL)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
@@ -1779,7 +1983,7 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
" fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
@@ -1823,6 +2027,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1837,7 +2043,6 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
}
- ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1904,6 +2109,15 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ }
+
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
@@ -1961,63 +2175,242 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
+ struct ecore_queue_cid *p_cid;
bool b_legacy_vf = false;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
- p_params.vf_qid = req->rx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Legacy VFs have their Producers in a different location, which they
- * calculate on their own and clean the producer prior to this.
+ /* Legacy VFs made assumptions on the CID their queues connected to,
+ * assuming queue X used CID X.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_legacy_vf = true;
- else
+
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.queue_id = (u8)p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '0' for Rx.
+ */
+ qid_usage_idx = 0;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!b_legacy_vf)
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- &p_params,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr,
- req->cqe_pbl_size,
- b_legacy_vf);
-
- if (rc) {
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
out:
- ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
- status, b_legacy_vf);
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ b_legacy_vf);
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
+ u32 cid,
u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
@@ -2046,12 +2439,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
@@ -2060,48 +2449,80 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union ecore_qm_pq_params pq_params;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ bool b_legacy_vf = false;
+ u8 qid_usage_idx;
+ u32 cid = 0;
enum _ecore_status_t rc;
+ u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
+ OSAL_MEMSET(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- rc = ecore_sp_eth_txq_start_ramrod(
- p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- &p_params,
- req->pbl_addr,
- req->pbl_size,
- &pq_params);
+ /* In case this is a legacy VF - need to know to use the right cids.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
- if (rc)
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '1' for Tx.
+ */
+ qid_usage_idx = 1;
+
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
- else {
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
}
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2111,22 +2532,37 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
bool cqe_completion)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ int qid, i;
+ /* TODO - improve validation [wrap around] */
if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
return ECORE_INVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
-
- if (rc)
- return rc;
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+ struct ecore_queue_cid **pp_cid = OSAL_NULL;
+
+ /* There can be at most a single Rx per qzone. Find it */
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx) {
+ pp_cid = &p_queue->cids[i].p_cid;
+ break;
+ }
}
- vf->vf_queues[qid].rxq_active = false;
+ if (pp_cid == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
+ vf->relative_vf_id, qid);
+ continue;
+ }
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_cid = OSAL_NULL;
vf->num_active_rxqs--;
}
@@ -2138,22 +2574,33 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
u16 txq_id, u8 num_txqs)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ struct ecore_vf_queue *p_queue;
+ int qid, j;
- if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
+ ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[qid];
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (p_queue->cids[j].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[j].b_is_tx)
+ continue;
- if (rc)
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[j].p_cid);
+ if (rc != ECORE_SUCCESS)
return rc;
+
+ p_queue->cids[j].p_cid = OSAL_NULL;
}
- vf->vf_queues[qid].txq_active = false;
}
+
return rc;
}
@@ -2208,44 +2655,52 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
enum _ecore_status_t rc;
- u8 i;
+ u16 i;
req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
-
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "VF rx_qid = %d isn`t active!\n", qid);
- status = PFVF_STATUS_FAILURE;
- break;
+ /* Validate inputs */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
}
+ }
- rc = ecore_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- ECORE_SPQ_MODE_EBLOCK,
- OSAL_NULL);
+ for (i = 0; i < req->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue;
+ u16 qid = req->rx_qid + i;
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ p_queue = &vf->vf_queues[qid];
+ handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ p_queue);
}
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
@@ -2422,12 +2877,14 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_rss_params *p_rss,
- struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2452,39 +2909,38 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
VFPF_UPDATE_RSS_KEY_FLAG);
p_rss->rss_enable = p_rss_tlv->rss_enable;
- p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_eng_id = vf->rss_eng_id;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
sizeof(p_rss->rss_key));
table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ struct ecore_queue_cid *p_cid;
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d,"
- " rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
@@ -2540,11 +2996,11 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
u16 tlvs_mask = 0, tlvs_accepted = 0;
- struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 length;
enum _ecore_status_t rc;
@@ -2559,6 +3015,12 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
goto out;
}
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
OSAL_MEMSET(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2572,20 +3034,24 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
- ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
- mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
- tlvs_accepted = tlvs_mask;
-
-#ifndef LINUX_REMOVE
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
&params, &tlvs_accepted) !=
ECORE_SUCCESS) {
@@ -2593,7 +3059,6 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
-#endif
if (!tlvs_accepted) {
if (tlvs_mask)
@@ -2614,6 +3079,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_accepted);
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
@@ -2916,6 +3382,88 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ int i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3049,6 +3597,13 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
*/
@@ -3115,9 +3670,10 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3127,7 +3683,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->p_dev->p_iov_info) {
DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3156,7 +3712,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3215,7 +3771,8 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
p_vf, mbx->first_tlv.tl.type);
/* check if tlv type is known */
- if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
/* switch on the opcode */
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
@@ -3257,7 +3814,34 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
+ } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* If we've received a message from a VF we consider malicious
+ * we ignore the messasge unless it's one for RELEASE, in which
+ * case we'll let it have the benefit of doubt, allowing the
+ * next loaded driver to start again.
+ */
+ if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+ /* TODO - initiate FLR, remove malicious indication */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+ p_vf->abs_vf_id);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
@@ -3322,21 +3906,31 @@ void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
}
-static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
- u16 abs_vfid,
- struct regpair *vf_msg)
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
{
u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
- struct ecore_vf_info *p_vf;
- if (!ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Got a message from VF [abs 0x%08x] that cannot be"
+ "Got indication for VF [abs 0x%08x] that cannot be"
" handled by PF\n",
abs_vfid);
- return ECORE_SUCCESS;
+ return OSAL_NULL;
}
- p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ u16 abs_vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return ECORE_SUCCESS;
/* List the physical address of the request so that handler
* could later on copy the message from it.
@@ -3346,6 +3940,25 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+
+ if (!p_vf)
+ return;
+
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->errId);
+
+ p_vf->b_malicious = true;
+
+ OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
@@ -3359,6 +3972,9 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF-FLR is still not supported\n");
return ECORE_SUCCESS;
+ case COMMON_EVENT_MALICIOUS_VF:
+ ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return ECORE_SUCCESS;
default:
DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -3381,11 +3997,11 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++)
- if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
return i;
out:
- return MAX_NUM_VFS;
+ return E4_MAX_NUM_VFS;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -3427,6 +4043,12 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced MAC to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << MAC_ADDR_FORCED;
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
@@ -3451,6 +4073,12 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
"Can not set MAC, invalid vfid [%d]\n", vfid);
return ECORE_INVAL;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -3476,7 +4104,14 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->p_dev, true,
- "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ "Can not set untagged default, invalid vfid [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set untagged default to malicious VF [%d]\n",
+ vfid);
return ECORE_INVAL;
}
@@ -3516,18 +4151,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
*opaque_fid = vf_info->opaque_fid;
}
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vort_id)
-{
- struct ecore_vf_info *vf_info;
-
- vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
- if (!vf_info)
- return;
-
- *p_vort_id = vf_info->vport_id;
-}
-
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
{
@@ -3541,6 +4164,12 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced vlan to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid;
@@ -3552,6 +4181,29 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_vf_info *p_vf_info;
@@ -3734,30 +4386,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate)
-{
- struct ecore_vf_info *vf;
- u8 vport_id;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
- DP_NOTICE(p_hwfn, true,
- "SR-IOV sanity check failed,"
- " can't set min rate\n");
- return ECORE_INVAL;
- }
- }
-
- vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
- vport_id = vf->vport_id;
-
- return ecore_configure_vport_wfq(p_dev, vport_id, rate);
-}
-
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
@@ -3856,7 +4484,20 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
return (p_vf->state == VF_ENABLED);
}
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
struct ecore_vf_info *vf_info;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index ed6ddc49..3c2f58bd 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -13,9 +13,10 @@
#include "ecore_vfpf_if.h"
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
+#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
- (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+ (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@@ -62,12 +63,18 @@ struct ecore_iov_vf_mbx {
*/
};
-struct ecore_vf_q_info {
+struct ecore_vf_queue_cid {
+ bool b_is_tx;
+ struct ecore_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct ecore_vf_queue {
+ /* Input from upper-layer, mapping relateive queue to queue-zone */
u16 fw_rx_qid;
u16 fw_tx_qid;
- u8 fw_cid;
- u8 rxq_active;
- u8 txq_active;
+
+ struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
@@ -97,6 +104,7 @@ struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
+ bool b_malicious;
u8 to_disable;
struct ecore_bulletin bulletin;
@@ -110,6 +118,7 @@ struct ecore_vf_info {
u16 mtu;
u8 vport_id;
+ u8 rss_eng_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
@@ -125,7 +134,7 @@ struct ecore_vf_info {
u8 num_mac_filters;
u8 num_vlan_filters;
- struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
@@ -151,10 +160,9 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+ struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
u64 pending_events[ECORE_VF_ARRAY_LENGTH];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
- u16 base_vport_id;
#ifndef REMOVE_DBG
/* This doesn't serve anything functionally, but it makes windows
@@ -276,8 +284,8 @@ u32 ecore_crc32(u32 crc,
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
- u32 *disabled_vfs);
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index 6277bc80..c77ec260 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -10,6 +10,7 @@
#define __ECORE_STATUS_H__
enum _ecore_status_t {
+ ECORE_CONN_RESET = -13,
ECORE_UNKNOWN_ERROR = -12,
ECORE_NORESOURCES = -11,
ECORE_NODEV = -10,
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index 616b44c2..034cf1eb 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -10,6 +10,12 @@
#define __ECORE_UTILS_H__
/* dma_addr_t manip */
+/* Suppress "right shift count >= width of type" warning when that quantity is
+ * 32-bits rquires the >> 16) >> 16)
+ */
+#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
+
#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index be8b1ec4..f4d331cf 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -65,13 +65,15 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
}
-static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
- u8 *done, u32 resp_size)
+static enum _ecore_status_t
+ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = ECORE_SUCCESS, time = 100;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int time = 100;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
@@ -81,16 +83,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
/* need to add the END TLV to the message size */
resp_size += sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel) {
- rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
- done,
- p_req,
- p_hwfn->vf_iov_info->pf2vf_reply,
- sizeof(union vfpf_tlvs), resp_size);
- /* TODO - no prints about message ? */
- return rc;
- }
-
/* Send TLVs over HW channel */
OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -134,7 +126,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
- return rc;
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"PF response: %d [Type %d]\n",
@@ -294,8 +285,17 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
" override\n");
req->vfdev_info.capabilities |=
VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
}
}
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF rejected acquisition by VF\n");
+ rc = ECORE_INVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned err %d to VF acquisition request\n",
@@ -386,8 +386,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
- OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
-
/* Allocate vf2pf msg */
p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_iov->
@@ -453,20 +451,174 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_qid,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod)
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= (1 << mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= (1 << mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls,
+ struct ecore_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+ enum ecore_tunn_mode val)
+{
+ if (feature_mask & (1 << val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ ECORE_MODE_VXLAN_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ ECORE_MODE_L2GENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ ECORE_MODE_IPGENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ ECORE_MODE_L2GRE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ ECORE_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc;
+
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ ECORE_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = ECORE_INVAL;
+ }
+
+ ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
- int rc;
+ u16 rx_qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
@@ -475,19 +627,20 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
/* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them.
*/
- if (pp_prod && p_iov->b_pre_fp_hsi) {
+ if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
@@ -512,7 +665,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
}
/* Learn the address of the producer from the response */
- if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
@@ -536,17 +689,18 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion)
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- req->rx_qid = rx_qid;
+ req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
@@ -571,29 +725,28 @@ exit:
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- int rc;
+ u16 qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- req->tx_qid = tx_queue_id;
+ req->tx_qid = qid;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */
ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -610,42 +763,40 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
goto exit;
}
- if (pp_doorbell) {
- /* Modern PFs provide the actual offsets, while legacy
- * provided only the queue id.
- */
- if (!p_iov->b_pre_fp_hsi) {
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- resp->offset;
- } else {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
-
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
- }
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- tx_queue_id, *pp_doorbell, resp->offset);
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- req->tx_qid = tx_qid;
+ req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
/* add list termination tlv */
@@ -670,20 +821,36 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ struct ecore_queue_cid **pp_cid,
u8 num_rxqs,
- u8 comp_cqe_flg, u8 comp_event_flg)
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
struct vfpf_update_rxq_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
+
+ /* TODO - API is limited to assuming continuous regions of queues,
+ * but VF queues might not fullfil this requirement.
+ * Need to consider whether we need new TLVs for this, or whether
+ * simply doing it iteratively is good enough.
+ */
+ if (!num_rxqs)
+ return ECORE_INVAL;
+again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- req->rx_qid = rx_queue_id;
- req->num_rxqs = num_rxqs;
+ /* Find the length of the current contagious range of queues beginning
+ * at first queue's index.
+ */
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+ if (pp_cid[req->num_rxqs]->rel.queue_id !=
+ req->rx_qid + req->num_rxqs)
+ break;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
@@ -704,9 +871,17 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
goto exit;
}
+ /* Make sure we're done with all the queues */
+ if (req->num_rxqs < num_rxqs) {
+ num_rxqs -= req->num_rxqs;
+ pp_cid += req->num_rxqs;
+ /* TODO - should we give a non-locked variant instead? */
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ goto again;
+ }
+
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
-
return rc;
}
@@ -719,7 +894,8 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_start_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc, i;
+ enum _ecore_status_t rc;
+ int i;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
@@ -761,7 +937,7 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
@@ -859,7 +1035,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
u8 update_rx, update_tx;
u32 resp_size = 0;
u16 size, tlv;
- int rc;
+ enum _ecore_status_t rc;
resp = &p_iov->pf2vf_reply->default_resp;
resp_size = sizeof(*resp);
@@ -956,6 +1132,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (p_params->rss_params) {
struct ecore_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -977,8 +1154,16 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
@@ -1067,7 +1252,7 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
@@ -1101,7 +1286,7 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
@@ -1140,7 +1325,6 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
- p_hwfn->vf_iov_info = OSAL_NULL;
return rc;
}
@@ -1173,7 +1357,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_ucast_filter_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* Sanitize */
if (p_ucast->opcode == ECORE_FILTER_MOVE) {
@@ -1214,7 +1398,7 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
@@ -1240,6 +1424,48 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
@@ -1356,6 +1582,12 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
{
OSAL_MEMCPY(port_mac,
@@ -1372,23 +1604,21 @@ void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
}
-/* @DPDK */
-void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
- u32 *num_mac)
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
{
struct ecore_vf_iov *p_vf;
p_vf = p_hwfn->vf_iov_info;
- *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
}
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs)
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters)
{
- struct ecore_vf_iov *p_vf;
+ struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
- p_vf = p_hwfn->vf_iov_info;
- *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
}
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
@@ -1428,6 +1658,18 @@ bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
return true;
}
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port,
+ u16 *p_geneve_port)
+{
+ struct ecore_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
{
struct ecore_bulletin_content *bulletin;
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 6077d600..f4713884 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -38,10 +38,33 @@ struct ecore_vf_iov {
bool b_pre_fp_hsi;
};
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the configuration.
+ *
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param queue_cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid);
+
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief hw preparation for VF
- * sends ACQUIRE message
+ * sends ACQUIRE message
*
* @param p_hwfn
*
@@ -53,10 +76,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
- * @param cid - zero based within the VF
- * @param rx_queue_id - zero based within the VF
- * @param sb - VF status block for this queue
- * @param sb_index - Index within the status block
+ * @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
@@ -67,9 +87,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -81,46 +99,44 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
* PF.
*
* @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
+ * @param p_cid
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
- * @param rx_qid
+ * @param p_cid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid,
- bool cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
- * @param tx_qid
+ * @param p_cid
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
#ifndef LINUX_REMOVE
/**
@@ -128,20 +144,18 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
* PF
*
* @param p_hwfn
- * @param rx_queue_id
+ * @param pp_cid - list of queue-cids which we want to update
* @param num_rxqs
- * @param init_sge_ring
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxqs_update(
- struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8 comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
#endif
/**
@@ -267,5 +281,10 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn);
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index 571fd374..be3a326b 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -61,6 +61,15 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
u8 *num_rxqs);
/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs);
+
+/**
* @brief Get port mac address for VF
*
* @param p_hwfn
@@ -78,18 +87,18 @@ void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters);
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
- * @param p_hwfn
- * @param num_mac_filters - allocated MAC filters
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac_filters);
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs);
-
/**
* @brief Check if VF can set a MAC address
*
@@ -152,5 +161,7 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_minor,
u16 *fw_rev,
u16 *fw_eng);
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port);
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 149d092b..66184421 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -416,10 +416,55 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -431,6 +476,8 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
struct tlv_buffer_size tlv_buf_size;
};
@@ -439,6 +486,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -506,9 +554,12 @@ struct ecore_bulletin_content {
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
+
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -552,6 +603,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 32130709..6dc969b0 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -34,6 +34,14 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
@@ -141,16 +149,23 @@ struct eth_tx_1st_bd_flags {
/* Do not allow additional VLAN manipulations on this packet. */
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-/* IP checksum recalculation in needed */
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-/* TCP/UDP checksum recalculation in needed */
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-/* If set, need to add the VLAN in vlan field to the packet. */
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-/* If set, this is an LSO packet. */
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
@@ -165,8 +180,9 @@ struct eth_tx_1st_bd_flags {
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd {
- __le16 vlan /* VLAN tag to insert to packet (if needed). */;
-/* Number of BDs in packet. Should be at least 2 in non-LSO packet and at least
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+ __le16 vlan;
+/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least
* 3 in LSO (or Tunnel with IPv6+ext) packet.
*/
u8 nbds;
@@ -209,10 +225,14 @@ struct eth_tx_data_2nd_bd {
/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-/* For LSO / Tunnel header with IPv6+ext - Set if outer header has IPv6+ext */
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-/* Set if Tunnel header has IPv6 ext. (3rd BD is required) */
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 96efc3c8..fcf98477 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -84,9 +84,32 @@ struct eth_phy_cfg {
/* Remote Serdes Loopback (RX to TX) */
#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
- /* features */
- u32 feature_config_flags;
-#define ETH_EEE_MODE_ADV_LPI (1 << 0)
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * 16xmicroseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in 16xmicroseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
+#define EEE_MODE_TIMER_USEC_OFFSET (0)
+#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
+/* Set by the driver to request status timer will be in microseconds and and not
+ * in EEE policy definition
+ */
+#define EEE_MODE_OUTPUT_TIME (1 << 28)
+/* Set by the driver to override default nvm timer */
+#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
+#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
+#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
};
struct port_mf_cfg {
@@ -271,16 +294,20 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
*/
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+/* Fixed TCP OOO TC usage is deprecated and used only for driver backward
+ * compatibility
+ */
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
@@ -447,6 +474,14 @@ struct public_global {
#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
#define MDUMP_REASON_DUMP_AGED (1 << 2)
+ u32 ext_phy_upgrade_fw;
+#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
+#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
+#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
+#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
+#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
+#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
};
/**************************************/
@@ -553,23 +588,20 @@ struct public_port {
#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
@@ -578,25 +610,23 @@ struct public_port {
#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
u32 link_status1;
u32 ext_phy_fw_version;
@@ -654,45 +684,47 @@ struct public_port {
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
/* 1G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
/* 1G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
/* 10G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
/* 10G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+/* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+/* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
/* 25G Passive copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
/* 25G Active copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
/* 25G Passive copper cable - medium */
#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
/* 25G Active copper cable - medium */
@@ -718,6 +750,39 @@ struct public_port {
u32 wol_pkt_len;
u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map;
+
+ /* the status of EEE auto-negotiation
+ * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
+ * are 2'b11.
+ * bit 31 - When 1'b0 bits 15:0 contain
+ * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
+ * When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status;
+#define EEE_TIMER_MASK 0x000fffff
+#define EEE_ADV_STATUS_MASK 0x00f00000
+#define EEE_1G_ADV (1 << 1)
+#define EEE_10G_ADV (1 << 2)
+#define EEE_ADV_STATUS_SHIFT 20
+#define EEE_LP_ADV_STATUS_MASK 0x0f000000
+#define EEE_LP_ADV_STATUS_SHIFT 24
+#define EEE_REQUESTED_BIT 0x10000000
+#define EEE_LPI_REQUESTED_BIT 0x20000000
+#define EEE_ACTIVE_BIT 0x40000000
+#define EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 eee_remote; /* Used for EEE in LLDP */
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_SHIFT 16
};
/**************************************/
@@ -813,9 +878,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -958,6 +1025,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -975,8 +1043,47 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
struct mcp_mac wol_mac; /* UNLOAD_DONE */
/* This configuration should be set by the driver for the LINK_SET command. */
@@ -995,13 +1102,17 @@ union drv_union_data {
struct lan_stats_stc lan_stats;
struct fcoe_stats_stc fcoe_stats;
- struct iscsi_stats_stc icsci_stats;
+ struct iscsi_stats_stc iscsi_stats;
struct rdma_stats_stc rdma_stats;
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
/* ... */
};
@@ -1011,6 +1122,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -1026,16 +1138,15 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
* data: struct resource_info
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1087,19 +1198,6 @@ struct public_drv_mb {
* MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
*/
#define DRV_MSG_CODE_MCP_HALT 0x00100000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
-/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
-
/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
@@ -1108,20 +1206,31 @@ struct public_drv_mb {
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
/* indicate OCBB related information */
#define DRV_MSG_CODE_OCBB_DATA 0x00180000
-
/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
#define BW_MAX_MASK 0x000000ff
@@ -1137,14 +1246,10 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
-
/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_READ 0x001c0000
/* Param: [0:15] - gpio number, [16:31] - gpio value */
#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
-/* Param: [0:15] - gpio number */
-#define DRV_MSG_CODE_GPIO_INFO 0x00270000
-
/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
@@ -1157,11 +1262,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_TIMESTAMP 0x00210000
/* This is an empty mailbox just return OK*/
#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
* param[15:8] - age
*/
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
/* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ 1
/* request resource ownership without aging */
@@ -1169,8 +1279,15 @@ struct public_drv_mb {
/* request resource ownership with specific aging timer (in seconds) */
#define RESOURCE_OPCODE_REQ_W_AGING 3
#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
-#define RESOURCE_OPCODE_FORCE_RELEASE 5 /* force resource release */
-
+/* force resource release */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
/* resource is free and granted to requester */
#define RESOURCE_OPCODE_GNT 1
/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
@@ -1184,11 +1301,11 @@ struct public_drv_mb {
/* indicate wrong owner during release */
#define RESOURCE_OPCODE_WRONG_OWNER 5
#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
/* dedicate resource 0 for dump */
-#define RESOURCE_DUMP (1 << 0)
+#define RESOURCE_DUMP 0
#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
-
/* Send crash dump commands with param[3:0] - opcode */
#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
@@ -1202,14 +1319,26 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
/* Request valid logs and config words */
#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-/* Set triggers mask. drv_mb_param should indicate (bitwise) which trigger
- * enabled
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which
+ * trigger enabled
*/
#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 /* Clear all logs */
-
-
+/* Clear all logs */
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+/* Value will be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
+/* Value should be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -1360,12 +1489,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -1418,6 +1551,10 @@ struct public_drv_mb {
#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
/* MFW reject "mcp reset" command if one of the drivers is up */
#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000
+#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000
+#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000
+
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_PHY_ERROR 0x00120000
#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
@@ -1425,25 +1562,30 @@ struct public_drv_mb {
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
-#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
-#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
-#define FW_MSG_CODE_GPIO_OK 0x00160000
-#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
-#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000
+#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000
+#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000
+#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000
+#define FW_MSG_CODE_RECOVERY_MODE 0x00740000
-/* mdump related response codes */
+ /* mdump related response codes */
#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
@@ -1454,7 +1596,7 @@ struct public_drv_mb {
u32 fw_mb_param;
- /* Resource Allocation params - MFW version support*/
+/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
@@ -1523,6 +1665,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 8e9c08a7..4e588350 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,13 +13,21 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 5/9/2016
+ * Created: 12/15/2016
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
+#define NVM_CFG_version 0x81805
+
+#define NVM_CFG_new_option_seq 15
+
+#define NVM_CFG_removed_option_seq 0
+
+#define NVM_CFG_updated_value_seq 1
+
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
@@ -64,10 +72,12 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \
+ 0x0
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1
u32 engineering_change[3]; /* 0x4 */
u32 manufacturing_id; /* 0x10 */
u32 serial_number[4]; /* 0x14 */
@@ -144,6 +154,7 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
@@ -241,6 +252,11 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ /* ROL enable */
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1
u32 f_lane_cfg1; /* 0x38 */
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
@@ -469,6 +485,15 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ /* Select package id method */
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1
u32 manufacture_time; /* 0x70 */
#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
@@ -476,6 +501,14 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ /* Max MSIX for Ethernet in default mode */
+ #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
+ /* PF Mapping */
+ #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000
+ #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
+ #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
+ #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -485,6 +518,47 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ /* Max. continues operating temperature */
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16
+ /* GPIO which triggers run-time port swap according to the map
+ * specified in option 205
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20
u32 generic_cont1; /* 0x78 */
#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
@@ -496,6 +570,25 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
#define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
#define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ /* Enable option 195 - Overriding the PCIe Preset value */
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+ /* PCIe Preset value - applies only if option 194 is enabled */
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
+ /* Port mapping to be used when the run-time GPIO for port-swap is
+ * defined and set.
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29
u32 mbi_version; /* 0x7C */
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
@@ -503,6 +596,44 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ /* If set to other than NA, 0 - Normal operation, 1 - Thermal event
+ * occurred
+ */
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20
u32 mbi_date; /* 0x80 */
u32 misc_sig; /* 0x84 */
/* Define the GPIO mapping to switch i2c mux */
@@ -543,6 +674,81 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ /* Interrupt signal used for SMBus/I2C management interface
+ * 0 = Interrupt event occurred
+ * 1 = Normal
+ */
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20
+ /* Set aLOM FAN on GPIO */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20
u32 device_capabilities; /* 0x88 */
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
@@ -578,11 +784,263 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
0x80
- u32 reserved[41]; /* 0x9C */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100
+ /* @DPDK */
+ u32 reserved1[12]; /* 0x9C */
+ u32 oem1_number[8]; /* 0xCC */
+ u32 oem2_number[8]; /* 0xEC */
+ u32 mps25_active_txfir_pre; /* 0x10C */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24
+ u32 mps25_active_txfir_main; /* 0x110 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24
+ u32 mps25_active_txfir_post; /* 0x114 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24
+ u32 features; /* 0x118 */
+ /* Set the Aux Fan on temperature */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0
+ /* Set NC-SI package ID */
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20
+ /* PMBUS Clock GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20
+ /* PMBUS Data GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20
+ u32 tx_rx_eq_25g_hlpc; /* 0x11C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24
+ u32 tx_rx_eq_25g_llpc; /* 0x120 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24
+ u32 tx_rx_eq_25g_ac; /* 0x124 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24
+ u32 tx_rx_eq_10g_pc; /* 0x128 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24
+ u32 tx_rx_eq_10g_ac; /* 0x12C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24
+ u32 tx_rx_eq_1g; /* 0x130 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24
+ u32 tx_rx_eq_25g_bt; /* 0x134 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24
+ u32 tx_rx_eq_10g_bt; /* 0x138 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24
+ u32 generic_cont4; /* 0x13C */
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
+ u32 reserved[58]; /* 0x140 */
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[1]; /* 0x0 */
};
struct nvm_cfg1_port {
@@ -621,6 +1079,44 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ /* GPIO for HW reset the PHY. In case it is the same for all ports,
+ * need to set same value for all ports
+ */
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -697,6 +1193,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+ #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000
+ #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20
+ #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
+ #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -736,9 +1242,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
- #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ /* EEE power saving mode */
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
u32 mba_cfg1; /* 0x28 */
#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
@@ -970,6 +1483,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_25g_cap; /* 0x58 */
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1047,6 +1561,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_40g_cap; /* 0x64 */
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1124,6 +1639,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_50g_cap; /* 0x70 */
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1203,6 +1719,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_100g_cap; /* 0x7C */
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
0x0000FFFF
@@ -1277,6 +1794,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
u32 reserved[116]; /* 0x88 */
};
@@ -1387,12 +1905,17 @@ struct nvm_cfg1_func {
#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
struct nvm_cfg1_glob glob; /* 0x0 */
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
};
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 3c369aa5..f9920f37 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1141,3 +1141,62 @@
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
#define PRS_REG_MSG_INFO 0x1f0a1cUL
#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
+
+/* 8.18.7.0 FW */
+#define BRB_REG_INT_MASK_10 0x3401b8UL
+
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+
+#define CDU_REG_CCFC_CTX_VALID0 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 0x580408UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 0x009150UL
+#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL
+#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL
+#define NWM_REG_MAC0_K2_E5 0x800400UL
+#define CNIG_REG_NIG_PORT0_CONF_K2_E5 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE_K2_E5 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH_K2_E5 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH_K2_E5 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG_K2_E5 0x000008UL
+#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL
+#define XMAC_REG_MODE_BB 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
+#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
+#define XMAC_REG_CTRL_BB 0x210000UL
+#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_RX_CTRL_BB 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5 0x2aafa4UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL
+#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL
+#define PCIE_REG_PRTY_MASK_K2_E5 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
+
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index 30fded0f..a3c0b137 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -18,8 +18,8 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
u8 tx_switching = 0;
struct ecore_sp_vport_start_params start = { 0 };
- start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
- ECORE_TPA_MODE_NONE;
+ start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
+ ECORE_TPA_MODE_NONE;
start.remove_inner_vlan = p_params->remove_inner_vlan;
start.tx_switching = tx_switching;
start.only_untagged = false; /* untagged only */
@@ -29,7 +29,6 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
start.handle_ptp_pkts = p_params->handle_ptp_pkts;
start.vport_id = p_params->vport_id;
- start.max_buffers_per_cqe = 16; /* TODO-is this right */
start.mtu = p_params->mtu;
/* @DPDK - Disable FW placement */
start.zero_placement_offset = 1;
@@ -93,58 +92,7 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
sp_params.mtu = params->mtu;
-
- /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
- * We need to re-fix the rss values per engine for CMT.
- */
-
- if (edev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss = &params->rss_params;
- int k, max = 0;
-
- /* Find largest entry, since it's possible RSS needs to
- * be disabled [in case only 1 queue per-hwfn]
- */
- for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
- max = (max > rss->rss_ind_table[k]) ?
- max : rss->rss_ind_table[k];
-
- /* Either fix RSS values or disable RSS */
- if (edev->num_hwfns < max + 1) {
- int divisor = (max + edev->num_hwfns - 1) /
- edev->num_hwfns;
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "CMT - fixing RSS values (modulo %02x)\n",
- divisor);
-
- for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
- rss->rss_ind_table[k] =
- rss->rss_ind_table[k] % divisor;
- } else {
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "CMT - 1 queue per-hwfn; Disabling RSS\n");
- params->update_rss_flg = 0;
- }
- }
-
- /* Now, update the RSS configuration for actual configuration */
- if (params->update_rss_flg) {
- sp_rss_params.update_rss_config = 1;
- sp_rss_params.rss_enable = 1;
- sp_rss_params.update_rss_capabilities = 1;
- sp_rss_params.update_rss_ind_table = 1;
- sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
- ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
- sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
- rte_memcpy(sp_rss_params.rss_ind_table,
- params->rss_params.rss_ind_table,
- ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
- rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
- ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
- sp_params.rss_params = &sp_rss_params;
- }
+ sp_params.sge_tpa_params = params->sge_tpa_params;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
@@ -173,7 +121,8 @@ qed_start_rxq(struct ecore_dev *edev,
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
@@ -184,12 +133,14 @@ qed_start_rxq(struct ecore_dev *edev,
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
@@ -205,19 +156,17 @@ qed_start_rxq(struct ecore_dev *edev,
}
static int
-qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
int rc, hwfn_index;
struct ecore_hwfn *p_hwfn;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- params->rx_queue_id / edev->num_hwfns,
- params->eq_completion_only, false);
+ rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
return rc;
}
@@ -229,7 +178,8 @@ qed_start_txq(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
@@ -240,11 +190,11 @@ qed_start_txq(struct ecore_dev *edev,
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- 0 /* tc */,
- pbl_addr, pbl_size, pp_doorbell);
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, 0 /* tc */,
+ pbl_addr, pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
@@ -260,18 +210,17 @@ qed_start_txq(struct ecore_dev *edev,
}
static int
-qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- params->tx_queue_id / edev->num_hwfns);
+ rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
return rc;
}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index ef4a4b55..097e0257 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -41,20 +41,10 @@ struct qed_dev_eth_info {
struct ether_addr port_mac;
uint16_t num_vlan_filters;
- uint32_t num_mac_addrs;
-};
-
-struct qed_update_vport_rss_params {
- uint16_t rss_ind_table[128];
- uint32_t rss_key[10];
- u8 rss_caps;
-};
+ uint32_t num_mac_filters;
-struct qed_stop_rxq_params {
- uint8_t rss_id;
- uint8_t rx_queue_id;
- uint8_t vport_id;
- bool eq_completion_only;
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
};
struct qed_update_vport_params {
@@ -68,25 +58,20 @@ struct qed_update_vport_params {
uint8_t update_accept_any_vlan_flg;
uint8_t accept_any_vlan;
uint8_t update_rss_flg;
- struct qed_update_vport_rss_params rss_params;
uint16_t mtu;
+ struct ecore_sge_tpa_params *sge_tpa_params;
};
struct qed_start_vport_params {
bool remove_inner_vlan;
bool handle_ptp_pkts;
- bool gro_enable;
+ bool enable_lro;
bool drop_ttl0;
uint8_t vport_id;
uint16_t mtu;
bool clear_stats;
};
-struct qed_stop_txq_params {
- uint8_t rss_id;
- uint8_t tx_queue_id;
-};
-
struct qed_eth_ops {
const struct qed_common_ops *common;
@@ -107,19 +92,21 @@ struct qed_eth_ops {
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params);
int (*q_rx_stop)(struct ecore_dev *edev,
- struct qed_stop_rxq_params *params);
+ uint8_t rss_id, void *handle);
int (*q_tx_start)(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params);
int (*q_tx_stop)(struct ecore_dev *edev,
- struct qed_stop_txq_params *params);
+ uint8_t rss_id, void *handle);
int (*eth_cqe_completion)(struct ecore_dev *edev,
uint8_t rss_id,
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6d6fb9de..7501eb20 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -12,9 +12,113 @@
/* Globals */
static const struct qed_eth_ops *qed_ops;
-static const char *drivername = "qede pmd";
static int64_t timer_period = 1;
+/* VXLAN tunnel classification mapping */
+const struct _qede_vxlan_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint64_t offset;
@@ -175,14 +279,14 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
-qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
+ if (rte_intr_enable(eth_dev->intr_handle))
DP_ERR(edev, "rte_intr_enable failed\n");
}
@@ -194,6 +298,7 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
qdev->ops = qed_ops;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
static void qede_print_adapter_info(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -222,6 +327,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, " Firmware file : %s\n", fw_file);
DP_INFO(edev, "*********************************\n");
}
+#endif
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
@@ -231,6 +337,17 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
+ uint8_t clss, bool mode, bool mask)
+{
+ memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
+ p_tunn->vxlan.b_update_mode = mode;
+ p_tunn->vxlan.b_mode_enabled = mask;
+ p_tunn->b_update_rx_cls = true;
+ p_tunn->b_update_tx_cls = true;
+ p_tunn->vxlan.tun_cls = clss;
+}
+
static int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
@@ -261,13 +378,15 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
ether_addr_copy(mac_addr, &u->mac);
u->vlan = ucast->vlan;
+ u->vni = ucast->vni;
SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
qdev->num_uc_addr++;
} else {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
- ucast->vlan == tmp->vlan)
+ ucast->vlan == tmp->vlan &&
+ ucast->vni == tmp->vni)
break;
}
if (tmp == NULL) {
@@ -368,7 +487,8 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
} else { /* Unicast */
if (add) {
- if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+ if (qdev->num_uc_addr >=
+ qdev->dev_info.num_mac_filters) {
DP_ERR(edev,
"Ucast filter table limit exceeded,"
" Please enable promisc mode\n");
@@ -388,16 +508,18 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
return rc;
}
-static void
+static int
qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct ecore_filter_ucast ucast;
+ int re;
qede_set_ucast_cmn_params(&ucast);
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- (void)qede_mac_int_ops(eth_dev, &ucast, 1);
+ re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
+ return re;
}
static void
@@ -405,15 +527,13 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct ether_addr mac_addr;
struct ecore_filter_ucast ucast;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- if (index >= qdev->dev_info.num_mac_addrs) {
+ if (index >= qdev->dev_info.num_mac_filters) {
DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
- index, qdev->dev_info.num_mac_addrs);
+ index, qdev->dev_info.num_mac_filters);
return;
}
@@ -433,8 +553,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast;
- int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
@@ -444,29 +562,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return;
}
- /* First remove the primary mac */
- qede_set_ucast_cmn_params(&ucast);
- ucast.opcode = ECORE_FILTER_REMOVE;
- ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy(&qdev->primary_mac,
- (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0) {
- DP_ERR(edev, "Unable to remove current macaddr"
- " Reverting to previous default mac\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
- }
-
- /* Add new MAC */
- ucast.opcode = ECORE_FILTER_ADD;
- ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0)
- DP_ERR(edev, "Unable to add new default mac\n");
- else
- ether_addr_copy(mac_addr, &qdev->primary_mac);
+ qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
@@ -510,50 +606,11 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
return rc;
}
+ qdev->vlan_strip_flg = set_stripping;
return 0;
}
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
- (void)qede_vlan_stripping(eth_dev, 1);
- else
- (void)qede_vlan_stripping(eth_dev, 0);
- }
-
- if (mask & ETH_VLAN_FILTER_MASK) {
- /* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
- qede_vlan_filter_set(eth_dev, 0, 1);
- } else {
- if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
- DP_NOTICE(edev, false,
- " Please remove existing VLAN filters"
- " before disabling VLAN filtering\n");
- /* Signal app that VLAN filtering is still
- * enabled
- */
- rxmode->hw_vlan_filter = true;
- } else {
- qede_vlan_filter_set(eth_dev, 0, 0);
- }
- }
- }
-
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
-
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
-}
-
static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
@@ -567,7 +624,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
if (on) {
if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_INFO(edev, "Reached max VLAN filter limit"
+ DP_ERR(edev, "Reached max VLAN filter limit"
" enabling accept_any_vlan\n");
qede_config_accept_any_vlan(qdev, true);
return 0;
@@ -644,6 +701,46 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
return rc;
}
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->hw_vlan_strip)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rxmode->hw_vlan_filter) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_ERR(edev,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ rxmode->hw_vlan_filter = true;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
+ " and classification is based on outer tag only\n");
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
+ mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+}
+
static int qede_init_vport(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -651,7 +748,7 @@ static int qede_init_vport(struct qede_dev *qdev)
int rc;
start.remove_inner_vlan = 1;
- start.gro_enable = 0;
+ start.enable_lro = qdev->enable_lro;
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
start.vport_id = 0;
start.drop_ttl0 = false;
@@ -671,12 +768,62 @@ static int qede_init_vport(struct qede_dev *qdev)
return 0;
}
+static void qede_prandom_bytes(uint32_t *buff)
+{
+ uint8_t i;
+
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+int qede_config_rss(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[2];
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, id, pos, q;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (!rss_conf.rss_key) {
+ DP_INFO(edev, "Applying driver default key\n");
+ rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ qede_prandom_bytes(&def_rss_key[0]);
+ rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
+ }
+
+ /* Configure RSS hash */
+ if (qede_rss_hash_update(eth_dev, &rss_conf))
+ return -EINVAL;
+
+ /* Configure default RETA */
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ id = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ q = i % QEDE_RSS_COUNT(qdev);
+ reta_conf[id].reta[pos] = q;
+ }
+ if (qede_rss_reta_update(eth_dev, &reta_conf[0],
+ ECORE_RSS_IND_TABLE_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc, i, j;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
@@ -684,14 +831,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
- DP_NOTICE(edev, false,
- "100G mode needs min. 2 RX/TX queues\n");
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
(eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
"100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
@@ -701,11 +847,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->enable_scatter == 1)
eth_dev->data->scattered_rx = 1;
- if (rxmode->enable_lro == 1) {
- DP_INFO(edev, "LRO is not supported\n");
- return -EINVAL;
- }
-
if (!rxmode->hw_strip_crc)
DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
@@ -713,6 +854,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
+ if (rxmode->enable_lro) {
+ qdev->enable_lro = true;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+
/* Check for the port restart case */
if (qdev->state != QEDE_DEV_INIT) {
rc = qdev->ops->vport_stop(edev, 0);
@@ -739,11 +887,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rc != 0)
return rc;
- SLIST_INIT(&qdev->vlan_list_head);
+ if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
+ rxmode->mq_mode == ETH_MQ_RX_NONE)) {
+ DP_ERR(edev, "Unsupported RSS mode\n");
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
- /* Add primary mac for PF */
- if (IS_PF(edev))
- qede_mac_addr_set(eth_dev, &qdev->primary_mac);
+ /* Flow director mode check */
+ rc = qede_check_fdir_support(eth_dev);
+ if (rc) {
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+ SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+
+ SLIST_INIT(&qdev->vlan_list_head);
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
@@ -763,13 +924,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
.nb_max = NUM_RX_BDS_MAX,
.nb_min = 128,
- .nb_align = 128 /* lowest common multiple */
+ .nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
.nb_max = NUM_TX_BDS_MAX,
.nb_min = 256,
- .nb_align = 256
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
static void
@@ -783,34 +946,44 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
- dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
- dev_info->max_tx_queues = dev_info->max_rx_queues;
- dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
- if (IS_VF(edev))
- dev_info->max_vfs = 0;
+
+ if (IS_PF(edev))
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
else
- dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
- dev_info->driver_name = qdev->drv_ver;
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
+ dev_info->max_vfs = 0;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = QEDE_TXQ_FLAGS,
};
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO);
+
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -876,10 +1049,12 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
@@ -891,10 +1066,12 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
qed_configure_filter_rx_mode(eth_dev,
@@ -926,12 +1103,15 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
PMD_INIT_FUNC_TRACE(edev);
+ qede_fdir_dealloc_resc(eth_dev);
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
@@ -952,9 +1132,9 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->remove(edev);
- rte_intr_disable(&eth_dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
if (edev->num_hwfns > 1)
@@ -1008,8 +1188,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
RTE_ETHDEV_QUEUE_STAT_CNTRS);
txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
- (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
DP_VERBOSE(edev, ECORE_MSG_DEBUG,
"Not all the queue stats will be displayed. Set"
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
@@ -1062,7 +1242,8 @@ qede_get_xstats_count(struct qede_dev *qdev) {
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned limit)
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
@@ -1277,7 +1458,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
-void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
*rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
@@ -1286,87 +1467,182 @@ void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
-static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf)
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params rss_params;
+ struct ecore_hwfn *p_hwfn;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
- int i;
+ uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
+ uint8_t i;
+ int rc;
memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memset(&rss_params, 0, sizeof(rss_params));
+
+ DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
+ (unsigned long)hf, len, key);
if (hf != 0) {
- /* Enable RSS */
- qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
- if (key)
- memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- vport_update_params.update_rss_flg = 1;
- qdev->rss_enabled = 1;
- } else {
- /* Disable RSS */
- qdev->rss_enabled = 0;
+ /* Enabling RSS */
+ DP_INFO(edev, "Enabling rss\n");
+
+ /* RSS caps */
+ qede_init_rss_caps(&rss_params.rss_caps, hf);
+ rss_params.update_rss_capabilities = 1;
+
+ /* RSS hash key */
+ if (key) {
+ if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
+ DP_ERR(edev, "RSS key length exceeds limit\n");
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Applying user supplied hash key\n");
+ rss_params.update_rss_key = 1;
+ memcpy(&rss_params.rss_key, key, len);
+ }
+ rss_params.rss_enable = 1;
}
- /* If the mapping doesn't fit any supported, return */
- if (qdev->rss_params.rss_caps == 0 && hf != 0)
- return -EINVAL;
-
- DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
- "Enabling RSS" : "Disabling RSS");
-
+ rss_params.update_rss_config = 1;
+ /* tbl_size has to be set with capabilities */
+ rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
+ vport_update_params.rss_params = &rss_params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ return rc;
+ }
+ }
+ qdev->rss_enable = rss_params.rss_enable;
+
+ /* Update local structure for hash query */
+ qdev->rss_conf.rss_hf = hf;
+ qdev->rss_conf.rss_key_len = len;
+ if (qdev->rss_enable) {
+ if (qdev->rss_conf.rss_key == NULL) {
+ qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
+ if (qdev->rss_conf.rss_key == NULL) {
+ DP_ERR(edev, "No memory to store RSS key\n");
+ return -ENOMEM;
+ }
+ }
+ if (key && len) {
+ DP_INFO(edev, "Storing RSS key\n");
+ memcpy(qdev->rss_conf.rss_key, key, len);
+ }
+ } else if (!qdev->rss_enable && len == 0) {
+ if (qdev->rss_conf.rss_key) {
+ free(qdev->rss_conf.rss_key);
+ qdev->rss_conf.rss_key = NULL;
+ DP_INFO(edev, "Free RSS key\n");
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+ return 0;
}
-int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- uint64_t hf;
-
- if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
- return -EINVAL;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- if (rss_conf->rss_key)
- memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
- sizeof(qdev->rss_params.rss_key));
-
- hf = 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
- ETH_RSS_IPV4 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6_EX : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
- ETH_RSS_NONFRAG_IPV4_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_NONFRAG_IPV6_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_IPV6_TCP_EX : 0;
-
- rss_conf->rss_hf = hf;
+ rss_conf->rss_hf = qdev->rss_conf.rss_hf;
+ rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
+ if (rss_conf->rss_key && qdev->rss_conf.rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
+ rss_conf->rss_key_len);
return 0;
}
-static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
+ struct ecore_rss_params *rss)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ int i, fn;
+ bool rss_mode = 1; /* enable */
+ struct ecore_queue_cid *cid;
+ struct ecore_rss_params *t_rss;
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+
+ /* CMT should be round-robin */
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ cid = rss->rss_ind_table[i];
+
+ if (cid->p_owner == ECORE_LEADING_HWFN(edev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
+ }
+
+ t_rss = &rss[1];
+ t_rss->update_rss_ind_table = 1;
+ t_rss->rss_table_size_log = 7;
+ t_rss->update_rss_config = 1;
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(edev, fn) {
+ for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
+ i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+
+ if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
+ DP_INFO(edev,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ rss_mode = 0;
+ goto out;
+ }
+ }
+
+out:
+ t_rss->rss_enable = rss_mode;
+
+ return rss_mode;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params *params;
+ struct ecore_hwfn *p_hwfn;
uint16_t i, idx, shift;
+ uint8_t entry;
+ int rc = 0;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported by hardware\n",
@@ -1375,42 +1651,71 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
}
memset(&vport_update_params, 0, sizeof(vport_update_params));
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
+ params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
+ RTE_CACHE_LINE_SIZE);
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = reta_conf[idx].reta[shift];
- qdev->rss_params.rss_ind_table[i] = entry;
+ entry = reta_conf[idx].reta[shift];
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
}
}
- vport_update_params.update_rss_flg = 1;
+ params->update_rss_ind_table = 1;
+ params->rss_table_size_log = 7;
+ params->update_rss_config = 1;
+
+ /* Fix up RETA for CMT mode device */
+ if (edev->num_hwfns > 1)
+ qdev->rss_enable = qede_update_rss_parm_cmt(edev,
+ params);
vport_update_params.vport_id = 0;
+ /* Use the current value of rss_enable */
+ params->rss_enable = qdev->rss_enable;
+ vport_update_params.rss_params = params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ goto out;
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+out:
+ rte_free(params);
+ return rc;
}
-int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
uint16_t i, idx, shift;
+ uint8_t entry;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
- struct ecore_dev *edev = &qdev->edev;
DP_ERR(edev, "reta_size %d is not supported\n",
reta_size);
+ return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = qdev->rss_params.rss_ind_table[i];
+ entry = qdev->rss_ind_table[i];
reta_conf[idx].reta[shift] = entry;
}
}
@@ -1418,34 +1723,339 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
-int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint32_t frame_size;
- struct qede_dev *qdev = dev->data->dev_private;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ int i;
+ PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
-
- /* VLAN_TAG = 4 */
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ frame_size = mtu + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range\n", mtu);
return -EINVAL;
-
+ }
if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
return -EINVAL;
-
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ qede_dev_stop(dev);
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+ /* Fix up RX buf size for all queues of the port */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
+ }
+ qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- qdev->mtu = mtu;
- qede_dev_stop(dev);
- qede_dev_start(dev);
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+
+ return 0;
+}
+
+static int
+qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ struct ecore_hwfn *p_hwfn;
+ int rc, i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+ if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
+ QEDE_VXLAN_DEF_PORT;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
+}
+
+static int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ enum ecore_filter_ucast_type type;
+ enum ecore_tunn_clss clss;
+ struct ecore_filter_ucast ucast;
+ char str[80];
+ uint16_t filter_type;
+ int rc, i;
+
+ filter_type = conf->filter_type | qdev->vxlan_filter_type;
+ /* First determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Wrong filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ucast.opcode = ECORE_FILTER_ADD;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 1);
+ if (rc == 0) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 1);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+
+ DP_INFO(edev, "Enabling VXLAN tunneling\n");
+ qede_set_cmn_tunn_param(&tunn, clss, true, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+ }
+ qdev->num_tunn_filters++; /* Filter added successfully */
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ucast.opcode = ECORE_FILTER_REMOVE;
+
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 0);
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 0);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+ qdev->num_tunn_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->num_tunn_filters == 0) {
+ DP_INFO(edev, "Disabling VXLAN tunneling\n");
+
+ /* Use 0 as tunnel mode */
+ qede_set_cmn_tunn_param(&tunn, clss, false, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with VXLAN tunneling");
+ return(qede_vxlan_tunn_config(eth_dev, filter_op,
+ filter_conf));
+ /* Place holders for future tunneling support */
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
return 0;
}
@@ -1485,6 +2095,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
+ .filter_ctrl = qede_dev_filter_ctrl,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
@@ -1522,9 +2135,10 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
static void qede_update_pf_params(struct ecore_dev *edev)
{
struct ecore_pf_params pf_params;
- /* 32 rx + 32 tx */
+
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
- pf_params.eth_pf_params.num_cons = 64;
+ pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
qed_ops->common->update_pf_params(edev, &pf_params);
}
@@ -1544,13 +2158,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
- uint32_t max_mac_addrs;
int rc;
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_addr = eth_dev->pci_dev->addr;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -1560,6 +2174,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->rx_pkt_burst = qede_recv_pkts;
eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_NOTICE(edev, false,
@@ -1567,8 +2182,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* @DPDK */
@@ -1593,10 +2206,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_update_pf_params(edev);
- rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
}
@@ -1646,20 +2259,20 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
+ adapter->ops->common->set_name(edev, edev->name);
if (!is_vf)
- adapter->dev_info.num_mac_addrs =
+ adapter->dev_info.num_mac_filters =
(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
ECORE_MAC);
else
ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
- &adapter->dev_info.num_mac_addrs);
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
(ETHER_ADDR_LEN *
- adapter->dev_info.num_mac_addrs),
+ adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1702,7 +2315,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
if (do_once) {
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
qede_print_adapter_info(adapter);
+#endif
do_once = false;
}
@@ -1760,64 +2375,96 @@ static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return qede_dev_common_uninit(eth_dev);
}
-static struct rte_pci_id pci_id_qedevf_map[] = {
+static const struct rte_pci_id pci_id_qedevf_map[] = {
#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
},
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
},
{.vendor_id = 0,}
};
-static struct rte_pci_id pci_id_qede_map[] = {
+static const struct rte_pci_id pci_id_qede_map[] = {
#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
},
{.vendor_id = 0,}
};
-static struct eth_driver rte_qedevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_qedevf_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qedevf_eth_dev_init,
- .eth_dev_uninit = qedevf_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qedevf_eth_dev_init);
+}
+
+static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qedevf_pmd = {
+ .id_table = pci_id_qedevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qedevf_eth_dev_pci_probe,
+ .remove = qedevf_eth_dev_pci_remove,
};
-static struct eth_driver rte_qede_pmd = {
- .pci_drv = {
- .id_table = pci_id_qede_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qede_eth_dev_init,
- .eth_dev_uninit = qede_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qede_eth_dev_init);
+}
+
+static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qede_pmd = {
+ .id_table = pci_id_qede_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qede_eth_dev_pci_probe,
+ .remove = qede_eth_dev_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
-RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a35ea8bd..e4323a0d 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -14,7 +14,9 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
+#include <rte_ip.h>
/* ecore includes */
#include "base/bcm_osal.h"
@@ -31,6 +33,11 @@
#include "base/ecore_iov_api.h"
#include "base/ecore_cxt.h"
#include "base/nvm_cfg.h"
+#include "base/ecore_iov_api.h"
+#include "base/ecore_sp_commands.h"
+#include "base/ecore_l2.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -43,8 +50,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
-#define QEDE_PMD_VERSION_MAJOR 1
-#define QEDE_PMD_VERSION_MINOR 2
+#define QEDE_PMD_VERSION_MAJOR 2
+#define QEDE_PMD_VERSION_MINOR 4
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -89,27 +96,48 @@
struct ecore_dev *edev = &qdev->edev; \
}
-/************* QLogic 25G/40G/100G vendor/devices ids *************/
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-
-#define CHIP_NUM_57980E 0x1634
-#define CHIP_NUM_57980S 0x1629
-#define CHIP_NUM_VF 0x1630
-#define CHIP_NUM_57980S_40 0x1634
-#define CHIP_NUM_57980S_25 0x1656
-#define CHIP_NUM_57980S_IOV 0x1664
-#define CHIP_NUM_57980S_100 0x1644
-
-#define PCI_DEVICE_ID_NX2_57980E CHIP_NUM_57980E
-#define PCI_DEVICE_ID_NX2_57980S CHIP_NUM_57980S
-#define PCI_DEVICE_ID_NX2_VF CHIP_NUM_VF
-#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
-#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
-#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
-#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+/************* QLogic 10G/25G/40G/50G/100G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+
+#define CHIP_NUM_57980E 0x1634
+#define CHIP_NUM_57980S 0x1629
+#define CHIP_NUM_VF 0x1630
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_AH_50G 0x8070
+#define CHIP_NUM_AH_10G 0x8071
+#define CHIP_NUM_AH_40G 0x8072
+#define CHIP_NUM_AH_25G 0x8073
+#define CHIP_NUM_AH_IOV 0x8090
+
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980E CHIP_NUM_57980E
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980S CHIP_NUM_57980S
+#define PCI_DEVICE_ID_QLOGIC_NX2_VF CHIP_NUM_VF
+#define PCI_DEVICE_ID_QLOGIC_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_QLOGIC_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_QLOGIC_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_QLOGIC_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_QLOGIC_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_QLOGIC_AH_50G CHIP_NUM_AH_50G
+#define PCI_DEVICE_ID_QLOGIC_AH_10G CHIP_NUM_AH_10G
+#define PCI_DEVICE_ID_QLOGIC_AH_40G CHIP_NUM_AH_40G
+#define PCI_DEVICE_ID_QLOGIC_AH_25G CHIP_NUM_AH_25G
+#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV
+
+
+#define QEDE_VXLAN_DEF_PORT 8472
extern char fw_file[];
+/* Number of PF connections - 32 RX + 32 TX */
+#define QEDE_PF_NUM_CONNS (64)
+
+/* Maximum number of flowdir filters */
+#define QEDE_RFS_MAX_FLTR (256)
+
/* Port/function states */
enum qede_dev_state {
QEDE_DEV_INIT, /* Init the chip and Slowpath */
@@ -131,9 +159,25 @@ struct qede_mcast_entry {
struct qede_ucast_entry {
struct ether_addr mac;
uint16_t vlan;
+ uint16_t vni;
SLIST_ENTRY(qede_ucast_entry) list;
};
+struct qede_fdir_entry {
+ uint32_t soft_id; /* unused for now */
+ uint16_t pkt_len; /* actual packet length to match */
+ uint16_t rx_queue; /* queue to be steered to */
+ const struct rte_memzone *mz; /* mz used to hold L2 frame */
+ SLIST_ENTRY(qede_fdir_entry) list;
+};
+
+struct qede_fdir_info {
+ struct ecore_arfs_config_params arfs;
+ uint16_t filter_count;
+ SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+};
+
+
/*
* Structure to store private data for each port.
*/
@@ -146,10 +190,12 @@ struct qede_dev {
struct qede_fastpath *fp_array;
uint8_t num_tc;
uint16_t mtu;
- bool rss_enabled;
- struct qed_update_vport_rss_params rss_params;
- uint32_t flags;
- bool gro_disable;
+ bool rss_enable;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ uint64_t rss_hf;
+ uint8_t rss_key_len;
+ bool enable_lro;
uint16_t num_queues;
uint8_t fp_num_tx;
uint8_t fp_num_rx;
@@ -163,25 +209,43 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
+ uint16_t num_tunn_filters;
+ uint16_t vxlan_filter_type;
+ struct qede_fdir_info fdir_info;
+ bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
};
-/* Static functions */
-static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on);
-
-static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf);
+/* Non-static functions */
+int qede_config_rss(struct rte_eth_dev *eth_dev);
-static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
-/* Non-static functions */
-void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
int qed_fill_eth_dev_info(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
+ enum rte_filter_op op, void *arg);
+
+int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
+
+uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
new file mode 100644
index 00000000..7bd5c5d6
--- /dev/null
+++ b/drivers/net/qede/qede_fdir.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2017 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+
+#include "qede_ethdev.h"
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+#define QEDE_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->fdir_info.arfs.arfs_enable = false;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ return -ENOTSUP;
+ }
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "flowdir is enabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fdir_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_fdir_entry *tmp = NULL;
+ struct qede_fdir_entry *fdir = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!fdir) {
+ DP_ERR(edev, "Did not allocate memory for fdir\n");
+ return -ENOMEM;
+ }
+ }
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ rc = -rte_errno;
+ goto err1;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
+ &qdev->fdir_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err2;
+ }
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_ERR(edev, "flowdir filter exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (!qdev->fdir_info.arfs.arfs_enable) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
+ (dma_addr_t)mz->phys_addr,
+ pkt_len,
+ fdir_filter->action.rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ fdir->rx_queue = fdir_filter->action.rx_queue;
+ fdir->pkt_len = pkt_len;
+ fdir->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
+ fdir, list);
+ qdev->fdir_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->fdir_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->fdir_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->fdir_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->fdir_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->fdir_info.filter_count == 0) {
+ memset(&qdev->fdir_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ return 0;
+
+err2:
+ rte_memzone_free(mz);
+err1:
+ if (add)
+ rte_free(fdir);
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+uint16_t
+qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct rte_eth_fdir_input *input;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+ raw_pkt = (uint8_t *)buff;
+ input = &fdir->input;
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (input->flow_ext.vlan_tci) {
+ DP_INFO(edev, "adding VLAN header\n");
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = input->flow.ip4_flow.proto ?
+ input->flow.ip4_flow.proto :
+ next_proto[input->flow_type];
+ ip->time_to_live = input->flow.ip4_flow.ttl ?
+ input->flow.ip4_flow.ttl :
+ QEDE_FDIR_IPV4_DEF_TTL;
+ ip->type_of_service = input->flow.ip4_flow.tos;
+ ip->dst_addr = input->flow.ip4_flow.dst_ip;
+ ip->src_addr = input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = input->flow.udp4_flow.dst_port;
+ udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->proto = input->flow.ipv6_flow.proto ?
+ input->flow.ipv6_flow.proto :
+ next_proto[input->flow_type];
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = input->flow.udp6_flow.dst_port;
+ udp->dst_port = input->flow.udp6_flow.src_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return 0;
+ }
+
+ return len;
+}
+
+int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 2131fe2a..405c525e 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -30,11 +30,24 @@ struct qed_dev_info {
/* MFW version */
uint32_t mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
uint32_t flash_size;
uint8_t mf_mode;
bool tx_switching;
- /* To be added... */
+ u16 mtu;
+
+ /* Out param for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
};
enum qed_sb_type {
@@ -88,8 +101,45 @@ struct qed_slowpath_params {
#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+struct qed_eth_tlvs {
+ u16 feat_flags;
+ u8 mac[3][ETH_ALEN];
+ u16 lso_maxoff;
+ u16 lso_minseg;
+ bool prom_mode;
+ u16 num_txqs;
+ u16 num_rxqs;
+ u16 num_netqs;
+ u16 flex_vlan;
+ u32 tcp4_offloads;
+ u32 tcp6_offloads;
+ u16 tx_avg_qdepth;
+ u16 rx_avg_qdepth;
+ u8 txqs_empty;
+ u8 rxqs_empty;
+ u8 num_txqs_full;
+ u8 num_rxqs_full;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
struct qed_common_cb_ops {
void (*link_update)(void *dev, struct qed_link_output *link);
+ void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
};
struct qed_selftest_ops {
@@ -108,16 +158,17 @@ struct qed_common_ops {
struct rte_pci_device *pci_dev,
enum qed_protocol protocol,
uint32_t dp_module, uint8_t dp_level, bool is_vf);
- void (*set_id)(struct ecore_dev *edev,
- char name[], const char ver_str[]);
- enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,
- enum ecore_chain_use_mode
- intended_use,
- enum ecore_chain_mode mode,
- enum ecore_chain_cnt_type cnt_type,
- uint32_t num_elems,
- osal_size_t elem_size,
- struct ecore_chain *p_chain);
+ void (*set_name)(struct ecore_dev *edev, char name[]);
+ enum _ecore_status_t
+ (*chain_alloc)(struct ecore_dev *edev,
+ enum ecore_chain_use_mode
+ intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ uint32_t num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
void (*chain_free)(struct ecore_dev *edev,
struct ecore_chain *p_chain);
@@ -147,9 +198,16 @@ struct qed_common_ops {
dma_addr_t sb_phy_addr,
uint16_t sb_id, enum qed_sb_type type);
+ int (*get_sb_info)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb, u16 qid,
+ struct ecore_sb_info_dbg *sb_dbg);
+
bool (*can_link_change)(struct ecore_dev *edev);
+
void (*update_msglvl)(struct ecore_dev *edev,
uint32_t dp_module, uint8_t dp_level);
+
+ int (*send_drv_state)(struct ecore_dev *edev, bool active);
};
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 45c4af09..25c14d8b 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -16,15 +16,18 @@
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
"[QEDE PMD: (%s)]%s:" fmt, \
(p_dev)->name ? (p_dev)->name : "", \
__func__, \
##__VA_ARGS__)
+#else
+#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
+#endif
#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
-
#define DP_INFO(p_dev, fmt, ...) \
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
"[%s:%d(%s)]" fmt, \
@@ -33,7 +36,6 @@
##__VA_ARGS__)
#else
#define DP_INFO(p_dev, fmt, ...) do { } while (0)
-
#endif
#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
@@ -77,14 +79,4 @@ do { \
#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
#endif /* _QEDE_LOGS_H_ */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index b666e1c7..712c03fd 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -12,8 +12,6 @@
#include "qede_ethdev.h"
-static uint8_t npar_tx_switching = 1;
-
/* Alarm timeout. */
#define QEDE_ALARM_TIMEOUT_US 100000
@@ -21,7 +19,7 @@ static uint8_t npar_tx_switching = 1;
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.10.9.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -50,11 +48,12 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
int rc;
ecore_init_struct(edev);
+ edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
qdev->protocol = protocol;
- if (is_vf) {
+
+ if (is_vf)
edev->b_is_vf = true;
- edev->b_hw_channel = true; /* @DPDK */
- }
+
ecore_init_dp(edev, dp_module, dp_level, NULL);
qed_init_pci(edev, pci_dev);
@@ -62,6 +61,8 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.personality = ECORE_PCI_ETH;
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;
+ hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
DP_ERR(edev, "hw prepare failed\n");
@@ -73,7 +74,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
static int qed_nic_setup(struct ecore_dev *edev)
{
- int rc, i;
+ int rc;
rc = ecore_resc_alloc(edev);
if (rc)
@@ -221,27 +222,33 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
static int qed_slowpath_start(struct ecore_dev *edev,
struct qed_slowpath_params *params)
{
- bool allow_npar_tx_switching;
const uint8_t *data = NULL;
struct ecore_hwfn *hwfn;
struct ecore_mcp_drv_version drv_version;
struct ecore_hw_init_params hw_init_params;
- struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct ecore_ptt *p_ptt;
int rc;
-#ifdef QED_ENC_SUPPORTED
- struct ecore_tunn_start_params tunn_info;
-#endif
-#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_BINARY_FW
rc = qed_load_firmware_data(edev);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed to find fw file %s\n", fw_file);
+ DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
goto err;
}
- }
#endif
+ hwfn = ECORE_LEADING_HWFN(edev);
+ if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
+ p_ptt = ecore_ptt_acquire(hwfn);
+ if (p_ptt) {
+ ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+ }
rc = qed_nic_setup(edev);
if (rc)
@@ -257,38 +264,26 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed to allocate stream memory\n");
- goto err2;
+ goto err1;
}
}
+#endif
qed_start_iov_task(edev);
-#endif
#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev))
data = (const uint8_t *)edev->firmware + sizeof(u32);
#endif
- allow_npar_tx_switching = npar_tx_switching ? true : false;
-
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
-#ifdef QED_ENC_SUPPORTED
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
- hw_init_params.p_tunn = &tunn_info;
-#endif
hw_init_params.b_hw_start = true;
hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
- hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching;
+ hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
- hw_init_params.epoch = (u32)time(NULL);
+ hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ hw_init_params.avoid_eng_reset = false;
rc = ecore_hw_init(edev, &hw_init_params);
if (rc) {
DP_ERR(edev, "ecore_hw_init failed\n");
@@ -310,7 +305,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed sending drv version command\n");
- return rc;
+ goto err3;
}
}
@@ -318,8 +313,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
return 0;
+err3:
ecore_hw_stop(edev);
err2:
+ qed_stop_iov_task(edev);
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+err1:
+#endif
ecore_resc_free(edev);
err:
#ifdef CONFIG_ECORE_BINARY_FW
@@ -338,27 +339,40 @@ static int
qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
{
struct ecore_ptt *ptt = NULL;
+ struct ecore_tunnel_info *tun = &edev->tunnel;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = edev->num_hwfns;
dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+ dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+
if (IS_PF(edev)) {
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = edev->mf_mode;
dev_info->tx_switching = false;
- } else {
- ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
- &dev_info->fw_minor, &dev_info->fw_rev,
- &dev_info->fw_eng);
- }
- if (IS_PF(edev)) {
ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
if (ptt) {
ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
@@ -386,7 +400,6 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
int
qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
{
- struct qede_dev *qdev = (struct qede_dev *)edev;
uint8_t queues = 0;
int i;
@@ -416,11 +429,6 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
if (edev->num_hwfns > 1) {
ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
info->num_queues += queues;
- /* Restrict 100G VF to advertise 16 queues till the
- * required support is available to go beyond 16.
- */
- info->num_queues = RTE_MIN(info->num_queues,
- ECORE_MAX_VF_CHAINS_PER_PF);
}
ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
@@ -428,6 +436,8 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
ecore_vf_get_port_mac(&edev->hwfns[0],
(uint8_t *)&info->port_mac);
+
+ info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);
}
qed_fill_dev_info(edev, &info->common);
@@ -438,9 +448,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
return 0;
}
-static void
-qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
- const char ver_str[NAME_SIZE])
+static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
{
int i;
@@ -448,8 +456,6 @@ qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
for_each_hwfn(edev, i) {
snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
}
- memcpy(edev->ver_str, ver_str, NAME_SIZE);
- edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static uint32_t
@@ -490,7 +496,6 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
struct ecore_mcp_link_params params;
struct ecore_mcp_link_state link;
struct ecore_mcp_link_capabilities link_caps;
- uint32_t media_type;
uint8_t change = 0;
memset(if_link, 0, sizeof(*if_link));
@@ -638,19 +643,6 @@ static int qed_nic_stop(struct ecore_dev *edev)
return rc;
}
-static int qed_nic_reset(struct ecore_dev *edev)
-{
- int rc;
-
- rc = ecore_hw_reset(edev);
- if (rc)
- return rc;
-
- ecore_resc_free(edev);
-
- return 0;
-}
-
static int qed_slowpath_stop(struct ecore_dev *edev)
{
#ifdef CONFIG_QED_SRIOV
@@ -669,10 +661,11 @@ static int qed_slowpath_stop(struct ecore_dev *edev)
if (IS_QED_ETH_IF(edev))
qed_sriov_disable(edev, true);
#endif
- qed_nic_stop(edev);
}
- qed_nic_reset(edev);
+ qed_nic_stop(edev);
+
+ ecore_resc_free(edev);
qed_stop_iov_task(edev);
return 0;
@@ -686,17 +679,61 @@ static void qed_remove(struct ecore_dev *edev)
ecore_hw_remove(edev);
}
+static int qed_send_drv_state(struct ecore_dev *edev, bool active)
+{
+ struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev);
+ struct ecore_ptt *ptt;
+ int status = 0;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ ECORE_OV_DRIVER_STATE_ACTIVE :
+ ECORE_OV_DRIVER_STATE_DISABLED);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
+ u16 qid, struct ecore_sb_info_dbg *sb_dbg)
+{
+ struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns];
+ struct ecore_ptt *ptt;
+ int rc;
+
+ if (IS_VF(edev))
+ return -EINVAL;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ ecore_ptt_release(hwfn, ptt);
+ return rc;
+}
+
const struct qed_common_ops qed_common_ops_pass = {
INIT_STRUCT_FIELD(probe, &qed_probe),
INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
- INIT_STRUCT_FIELD(set_id, &qed_set_id),
+ INIT_STRUCT_FIELD(set_name, &qed_set_name),
INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+ INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info),
INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
INIT_STRUCT_FIELD(set_link, &qed_set_link),
INIT_STRUCT_FIELD(drain, &qed_drain),
INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
INIT_STRUCT_FIELD(remove, &qed_remove),
+ INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index a34b6659..baea1bb0 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -6,10 +6,9 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
+#include <rte_net.h>
#include "qede_rxtx.h"
-static bool gro_disable = 1; /* mod_param */
-
static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
{
struct rte_mbuf *new_mb = NULL;
@@ -69,7 +68,7 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
{
unsigned int i;
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
if (txq->sw_tx_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
@@ -84,16 +83,16 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct rte_eth_dev_data *eth_data = dev->data;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
size_t size;
- uint16_t data_size;
int rc;
int i;
@@ -127,34 +126,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
-
- /* Sanity check */
- data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM;
-
- if (pkt_len > data_size && !dev->data->scattered_rx) {
- DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
- pkt_len, data_size);
- rte_free(rxq);
- return -EINVAL;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ qdev->mtu = max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
}
-
if (dev->data->scattered_rx)
- rxq->rx_buf_size = data_size;
+ rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+ rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
- qdev->mtu = pkt_len;
-
- DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
- qdev->mtu, rxq->rx_buf_size);
-
- if (pkt_len > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- DP_NOTICE(edev, false, "jumbo frame enabled\n");
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- }
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -176,7 +168,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
ECORE_CHAIN_CNT_TYPE_U16,
rxq->nb_rx_desc,
sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring);
+ &rxq->rx_bd_ring,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(edev, false,
@@ -196,7 +189,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
ECORE_CHAIN_CNT_TYPE_U16,
rxq->nb_rx_desc,
sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring);
+ &rxq->rx_comp_ring,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(edev, false,
@@ -291,7 +285,8 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
ECORE_CHAIN_CNT_TYPE_U16,
txq->nb_tx_desc,
sizeof(union eth_tx_bd_types),
- &txq->tx_pbl);
+ &txq->tx_pbl,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
@@ -335,8 +330,8 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
static void qede_init_fp(struct qede_dev *qdev)
{
struct qede_fastpath *fp;
- uint8_t i, rss_id, tc;
- int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
+ uint8_t i;
+ int fp_rx = qdev->fp_num_rx;
memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
sizeof(*qdev->fp_array)));
@@ -356,7 +351,6 @@ static void qede_init_fp(struct qede_dev *qdev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
}
- qdev->gro_disable = gro_disable;
}
void qede_free_fp_arrays(struct qede_dev *qdev)
@@ -373,11 +367,9 @@ void qede_free_fp_arrays(struct qede_dev *qdev)
}
}
-int qede_alloc_fp_array(struct qede_dev *qdev)
+static int qede_alloc_fp_array(struct qede_dev *qdev)
{
- struct qede_fastpath *fp;
struct ecore_dev *edev = &qdev->edev;
- int i;
qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
sizeof(*qdev->fp_array),
@@ -483,7 +475,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
}
static inline void
-qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
{
uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -510,104 +503,63 @@ qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
*/
rte_wmb();
- PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
-}
-
-static inline uint32_t
-qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
-{
- return index % n_rx_rings;
-}
-
-static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
-{
- unsigned int i;
-
- srand((unsigned int)time(NULL));
-
- for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
- buff[i] = rand();
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
-static bool
-qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev,
- struct qed_update_vport_rss_params *rss_params)
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
{
- struct rte_eth_rss_conf rss_conf;
- enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- uint8_t rss_caps;
- unsigned int i;
- uint64_t hf;
- uint32_t *key;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
- key = (uint32_t *)rss_conf.rss_key;
- hf = rss_conf.rss_hf;
-
- /* Check if RSS conditions are met.
- * Note: Even though its meaningless to enable RSS with one queue, it
- * could be used to produce RSS Hash, so skipping that check.
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
*/
- if (!(mode & ETH_MQ_RX_RSS)) {
- DP_INFO(edev, "RSS flag is not set\n");
- return false;
- }
-
- if (hf == 0) {
- DP_INFO(edev, "Request to disable RSS\n");
- return false;
- }
-
- memset(rss_params, 0, sizeof(*rss_params));
-
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
- rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
- QEDE_RSS_COUNT(qdev));
-
- if (!key)
- qede_prandom_bytes(rss_params->rss_key,
- sizeof(rss_params->rss_key));
- else
- memcpy(rss_params->rss_key, rss_conf.rss_key,
- rss_conf.rss_key_len);
-
- qede_init_rss_caps(&rss_caps, hf);
-
- rss_params->rss_caps = rss_caps;
-
- DP_INFO(edev, "RSS conditions are met\n");
-
- return true;
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
}
-static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+static int qede_start_queues(struct rte_eth_dev *eth_dev,
+ __rte_unused bool clear_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_queue_start_common_params q_params;
- struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
struct qed_dev_info *qed_info = &qdev->dev_info.common;
struct qed_update_vport_params vport_update_params;
+ struct ecore_sge_tpa_params tpa_params;
struct qede_tx_queue *txq;
struct qede_fastpath *fp;
dma_addr_t p_phys_table;
int txq_index;
uint16_t page_cnt;
- int vlan_removal_en = 1;
int rc, tc, i;
for_each_queue(i) {
fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_RX) {
- p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
- rx_comp_ring);
- page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
- rx_comp_ring);
+ struct ecore_rxq_start_ret_params ret_params;
+
+ p_phys_table =
+ ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt =
+ ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = i;
q_params.vport_id = 0;
@@ -621,13 +573,17 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
fp->rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start rxq #%d failed %d\n",
fp->rxq->queue_id, rc);
return rc;
}
+ /* Use the return parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
fp->rxq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[RX_PI];
@@ -637,6 +593,8 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
if (!(fp->type & QEDE_FASTPATH_TX))
continue;
for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct ecore_txq_start_ret_params ret_params;
+
txq = fp->txqs[tc];
txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
@@ -644,6 +602,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
+ memset(&ret_params, 0, sizeof(ret_params));
q_params.queue_id = txq->queue_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
@@ -652,13 +611,16 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
rc = qdev->ops->q_tx_start(edev, i, &q_params,
p_phys_table,
page_cnt, /* **pp_doorbell */
- &txq->doorbell_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start txq %u failed %d\n",
txq_index, rc);
return rc;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
txq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
SET_FIELD(txq->tx_db.data.params,
@@ -688,16 +650,14 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
vport_update_params.tx_switching_flg = 1;
}
- if (qede_check_vport_rss_enable(eth_dev, rss_params)) {
- vport_update_params.update_rss_flg = 1;
- qdev->rss_enabled = 1;
- } else {
- qdev->rss_enabled = 0;
+ /* TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Enabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
+ vport_update_params.sge_tpa_params = &tpa_params;
}
- rte_memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
-
rc = qdev->ops->vport_update(edev, &vport_update_params);
if (rc) {
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
@@ -707,79 +667,91 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
return 0;
}
-#ifdef ENC_SUPPORTED
static bool qede_tunn_exist(uint16_t flag)
{
return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
}
-static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+/*
+ * qede_check_tunn_csum_l4:
+ * Returns:
+ * 1 : If L4 csum is enabled AND if the validation has failed.
+ * 0 : Otherwise
+ */
+static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
{
- uint8_t tcsum = 0;
- uint16_t csum_flag = 0;
-
if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
-
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
- PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
- return QEDE_CSUM_UNNECESSARY | tcsum;
-}
-#else
-static inline uint8_t qede_tunn_exist(uint16_t flag)
-{
return 0;
}
-static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
{
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
+
return 0;
}
-#endif
-static inline uint8_t qede_check_notunn_csum(uint16_t flag)
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
{
- uint8_t csum = 0;
- uint16_t csum_flag = 0;
-
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- csum = QEDE_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
-
- return csum;
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
+ else
+ return RTE_PTYPE_UNKNOWN;
}
-static inline uint8_t qede_check_csum(uint16_t flag)
+static inline uint8_t
+qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
{
- if (likely(!qede_tunn_exist(flag)))
- return qede_check_notunn_csum(flag);
- else
- return qede_check_tunn_csum(flag);
+ struct ipv4_hdr *ip;
+ uint16_t pkt_csum;
+ uint16_t calc_csum;
+ uint16_t val;
+
+ val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
+
+ if (unlikely(val)) {
+ m->packet_type = qede_rx_cqe_to_pkt_type(flag);
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
+ ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ pkt_csum = ip->hdr_checksum;
+ ip->hdr_checksum = 0;
+ calc_csum = rte_ipv4_cksum(ip);
+ ip->hdr_checksum = pkt_csum;
+ return (calc_csum != pkt_csum);
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ return 1;
+ }
+ }
+ return 0;
}
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
@@ -789,7 +761,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
}
static inline void
-qede_reuse_page(struct qede_dev *qdev,
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
@@ -822,56 +794,153 @@ qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
}
}
-static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+static inline void
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ uint8_t agg_index, uint16_t len)
+{
+ struct qede_agg_info *tpa_info;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
+ uint16_t cons_idx;
+
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
+ cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
+ qede_rx_bd_ring_consume(rxq);
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ }
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
{
- uint32_t p_type;
- /* TBD - L4 indications needed ? */
- uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
- PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
-
- /* protocol = 3 means LLC/SNAP over Ethernet */
- if (unlikely(protocol == 0 || protocol == 3))
- p_type = RTE_PTYPE_UNKNOWN;
- else if (protocol == 1)
- p_type = RTE_PTYPE_L3_IPV4;
- else if (protocol == 2)
- p_type = RTE_PTYPE_L3_IPV6;
-
- return RTE_PTYPE_L2_ETHER | p_type;
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
}
-int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
- int num_segs, uint16_t pkt_len)
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+ /* Update total length and frags based on end TPA */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
+ rx_mb->nb_segs = cqe->num_of_bds;
+ rx_mb->pkt_len = cqe->total_packet_len;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
+}
+
+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
+{
+ uint32_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
+ [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ };
+
+ /* Cover bits[4-0] to include tunn_type and next protocol */
+ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
+ (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
+ return ptype_tunn_lkup_tbl[val];
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
{
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = &qdev->edev;
- uint16_t sw_rx_index, cur_size;
-
register struct rte_mbuf *seg1 = NULL;
register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
seg1 = rx_mb;
while (num_segs) {
- cur_size = pkt_len > rxq->rx_buf_size ?
- rxq->rx_buf_size : pkt_len;
- if (!cur_size) {
- PMD_RX_LOG(DEBUG, rxq,
- "SG packet, len and num BD mismatch\n");
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
-
- if (qede_alloc_rx_buffer(rxq)) {
- uint8_t index;
-
- PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
- index = rxq->port_id;
- rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
- rxq->rx_alloc_errors++;
- return -ENOMEM;
- }
-
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
qede_rx_bd_ring_consume(rxq);
@@ -881,16 +950,9 @@ int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
seg1 = seg1->next;
num_segs--;
rxq->rx_segs++;
- continue;
}
- seg1 = NULL;
-
- if (pkt_len)
- PMD_RX_LOG(DEBUG, rxq,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
- return ECORE_SUCCESS;
+ return 0;
}
uint16_t
@@ -903,14 +965,29 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
register struct rte_mbuf *rx_mb = NULL;
register struct rte_mbuf *seg1 = NULL;
enum eth_rx_cqe_type cqe_type;
- uint16_t len, pad, preload_idx, pkt_len, parse_flag;
- uint8_t csum_flag, num_segs;
+ uint16_t pkt_len = 0; /* Sum of all BD segments */
+ uint16_t len; /* Length of first BD */
+ uint8_t num_segs = 1;
+ uint16_t preload_idx;
+ uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ uint8_t bitfield_val;
enum rss_hash_type htype;
- int ret;
+#endif
+ uint8_t tunn_parse_flag;
+ uint8_t j;
+ struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+ uint64_t ol_flags;
+ uint32_t packet_type;
+ uint16_t vlan_tci;
+ bool tpa_start_flg;
+ uint8_t offset, tpa_agg_idx, flags;
+ struct qede_agg_info *tpa_info = NULL;
+ uint32_t rss_hash;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -921,16 +998,59 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return 0;
while (sw_comp_cons != hw_comp_cons) {
+ ol_flags = 0;
+ packet_type = RTE_PTYPE_UNKNOWN;
+ vlan_tci = 0;
+ tpa_start_flg = false;
+ rss_hash = 0;
+
/* Get the CQE from the completion ring */
cqe =
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
cqe_type = cqe->fast_path_regular.type;
-
- if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
-
+ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ fp_cqe = &cqe->fast_path_regular;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_START:
+ cqe_start_tpa = &cqe->fast_path_tpa_start;
+ tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+ tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
+ PMD_RX_LOG(INFO, rxq,
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
+ cqe_start_tpa->tpa_agg_index,
+ rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+ cqe_start_tpa->header_len,
+ rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
+ rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+ break;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_rx_process_tpa_cont_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_rx_process_tpa_end_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_end);
+ tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
+ goto tpa_end;
+ case ETH_RX_CQE_TYPE_SLOW_PATH:
+ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
qdev->ops->eth_cqe_completion(edev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
+ /* fall-thru */
+ default:
goto next_cqe;
}
@@ -939,32 +1059,96 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
assert(rx_mb != NULL);
- /* non GRO */
- fp_cqe = &cqe->fast_path_regular;
-
- len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
- assert((len + pad) <= rx_mb->buf_len);
-
- PMD_RX_LOG(DEBUG, rxq,
- "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
- " len = %u, parsing_flags = %d\n",
- cqe_type, fp_cqe->bitfields,
- rte_le_to_cpu_16(fp_cqe->vlan_tag),
- len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
-
- /* If this is an error packet then drop it */
- parse_flag =
- rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
- csum_flag = qede_check_csum(parse_flag);
- if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- PMD_RX_LOG(ERR, rxq,
- "CQE in CONS = %u has error, flags = 0x%x "
- "dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
- goto next_cqe;
+ /* Handle regular CQE or TPA start CQE */
+ if (!tpa_start_flg) {
+ parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+ offset = fp_cqe->placement_offset;
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+#endif
+ } else {
+ parse_flag =
+ rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+ offset = cqe_start_tpa->placement_offset;
+ /* seg_len = len_on_first_bd */
+ len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
+ }
+ if (qede_tunn_exist(parse_flag)) {
+ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+ if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (tpa_start_flg)
+ flags =
+ cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+ }
+ } else {
+ PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb,
+ parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ packet_type =
+ qede_rx_cqe_to_pkt_type(parse_flag);
+ }
+ }
+
+ if (CQE_HAS_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_VLAN_PKT;
+ if (qdev->vlan_strip_flg) {
+ ol_flags |= PKT_RX_VLAN_STRIPPED;
+ rx_mb->vlan_tci = vlan_tci;
+ }
+ }
+ if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_QINQ_PKT;
+ if (qdev->vlan_strip_flg) {
+ rx_mb->vlan_tci = vlan_tci;
+ ol_flags |= PKT_RX_QINQ_STRIPPED;
+ }
+ rx_mb->vlan_tci_outer = 0;
+ }
+ /* RSS Hash */
+ if (qdev->rss_enable) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rx_mb->hash.rss = rss_hash;
}
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
@@ -977,73 +1161,66 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->rx_alloc_errors++;
break;
}
-
qede_rx_bd_ring_consume(rxq);
- if (fp_cqe->bd_num > 1) {
- pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ if (!tpa_start_flg && fp_cqe->bd_num > 1) {
+ PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
+ " len on first: %04x Total Len: %04x",
+ fp_cqe->bd_num, len, pkt_len);
num_segs = fp_cqe->bd_num - 1;
-
- rxq->rx_segs++;
-
- pkt_len -= len;
seg1 = rx_mb;
- ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
- pkt_len);
- if (ret != ECORE_SUCCESS) {
- qede_recycle_rx_bd_ring(rxq, qdev,
- fp_cqe->bd_num);
+ if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len - len))
goto next_cqe;
+ for (j = 0; j < num_segs; j++) {
+ if (qede_alloc_rx_buffer(rxq)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Buffer allocation failed");
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ rxq->rx_segs++;
}
}
+ rxq->rx_segs++; /* for the first segment */
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
- /* Update MBUF fields */
- rx_mb->ol_flags = 0;
- rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
- rx_mb->nb_segs = fp_cqe->bd_num;
- rx_mb->data_len = len;
- rx_mb->pkt_len = fp_cqe->pkt_len;
+ /* Update rest of the MBUF fields */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
rx_mb->port = rxq->port_id;
- rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
-
- htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
- ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
- if (qdev->rss_enabled && htype) {
- rx_mb->ol_flags |= PKT_RX_RSS_HASH;
- rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
- PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
- rx_mb->hash.rss);
+ rx_mb->ol_flags = ol_flags;
+ rx_mb->data_len = len;
+ rx_mb->packet_type = packet_type;
+ PMD_RX_LOG(INFO, rxq,
+ "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
+ " ol_flags 0x%04lx\n",
+ packet_type, len, htype, rx_mb->hash.rss,
+ (unsigned long)ol_flags);
+ if (!tpa_start_flg) {
+ rx_mb->nb_segs = fp_cqe->bd_num;
+ rx_mb->pkt_len = pkt_len;
+ } else {
+ /* store ref to the updated mbuf */
+ tpa_info->tpa_head = rx_mb;
+ tpa_info->tpa_tail = tpa_info->tpa_head;
}
-
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
-
- if (CQE_HAS_VLAN(parse_flag)) {
- rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
- rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+tpa_end:
+ if (!tpa_start_flg) {
+ rx_pkts[rx_pkt] = rx_mb;
+ rx_pkt++;
}
-
- if (CQE_HAS_OUTER_VLAN(parse_flag)) {
- /* FW does not provide indication of Outer VLAN tag,
- * which is always stripped, so vlan_tci_outer is set
- * to 0. Here vlan_tag represents inner VLAN tag.
- */
- rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
- rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
- rx_mb->vlan_tci_outer = 0;
- }
-
- rx_pkts[rx_pkt] = rx_mb;
- rx_pkt++;
next_cqe:
ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
if (rx_pkt == nb_pkts) {
PMD_RX_LOG(DEBUG, rxq,
- "Budget reached nb_pkts=%u received=%u\n",
+ "Budget reached nb_pkts=%u received=%u",
rx_pkt, nb_pkts);
break;
}
@@ -1053,108 +1230,100 @@ next_cqe:
rxq->rcv_pkts += rx_pkt;
- PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
return rx_pkt;
}
-static inline int
-qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
{
- uint16_t nb_segs, idx = TX_CONS(txq);
- struct eth_tx_bd *tx_data_bd;
- struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
-
- if (unlikely(!mbuf)) {
- PMD_TX_LOG(ERR, txq, "null mbuf\n");
- PMD_TX_LOG(ERR, txq,
- "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
- txq->nb_tx_desc, txq->nb_tx_avail, idx,
- TX_PROD(txq));
- return -1;
- }
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
- nb_segs = mbuf->nb_segs;
- while (nb_segs) {
- /* It's like consuming rxbuf in recv() */
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
ecore_chain_consume(&txq->tx_pbl);
txq->nb_tx_avail++;
- nb_segs--;
}
- rte_pktmbuf_free(mbuf);
- txq->sw_tx_ring[idx].mbuf = NULL;
-
- return 0;
}
-static inline uint16_t
-qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
{
- uint16_t tx_compl = 0;
uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
- hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
rte_compiler_barrier();
-
- while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
- if (qede_free_tx_pkt(edev, txq)) {
- PMD_TX_LOG(ERR, txq,
- "hw_bd_cons = %u, chain_cons = %u\n",
- hw_bd_cons,
- ecore_chain_get_cons_idx(&txq->tx_pbl));
- break;
- }
- txq->sw_tx_cons++; /* Making TXD available */
- tx_compl++;
- }
-
- PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
- tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
- return tx_compl;
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
}
/* Populate scatter gather buffer descriptor fields */
-static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
- struct rte_mbuf *m_seg,
- uint16_t count,
- struct eth_tx_1st_bd *bd1)
+static inline uint8_t
+qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
+ struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
{
struct qede_tx_queue *txq = p_txq;
- struct eth_tx_2nd_bd *bd2 = NULL;
- struct eth_tx_3rd_bd *bd3 = NULL;
struct eth_tx_bd *tx_bd = NULL;
- uint16_t nb_segs = count;
dma_addr_t mapping;
+ uint8_t nb_segs = 0;
/* Check for scattered buffers */
while (m_seg) {
- if (nb_segs == 1) {
- bd2 = (struct eth_tx_2nd_bd *)
- ecore_chain_produce(&txq->tx_pbl);
- memset(bd2, 0, sizeof(*bd2));
+ if (nb_segs == 0) {
+ if (!*bd2) {
+ *bd2 = (struct eth_tx_2nd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nb_segs++;
+ }
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
- } else if (nb_segs == 2) {
- bd3 = (struct eth_tx_3rd_bd *)
- ecore_chain_produce(&txq->tx_pbl);
- memset(bd3, 0, sizeof(*bd3));
+ QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
+ } else if (nb_segs == 1) {
+ if (!*bd3) {
+ *bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nb_segs++;
+ }
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
} else {
tx_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
+ nb_segs++;
mapping = rte_mbuf_data_dma_addr(m_seg);
- tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
- nb_segs++;
- bd1->data.nbds = nb_segs;
m_seg = m_seg->next;
}
@@ -1162,88 +1331,314 @@ static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
return nb_segs;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+print_tx_bd_info(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *bd1,
+ struct eth_tx_2nd_bd *bd2,
+ struct eth_tx_3rd_bd *bd3,
+ uint64_t tx_ol_flags)
+{
+ char ol_buf[256] = { 0 }; /* for verbose prints */
+
+ if (bd1)
+ PMD_TX_LOG(INFO, txq,
+ "BD1: nbytes=%u nbds=%u bd_flags=04%x bf=%04x",
+ rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+ bd1->data.bd_flags.bitfields,
+ rte_cpu_to_le_16(bd1->data.bitfields));
+ if (bd2)
+ PMD_TX_LOG(INFO, txq,
+ "BD2: nbytes=%u bf=%04x\n",
+ rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+ if (bd3)
+ PMD_TX_LOG(INFO, txq,
+ "BD3: nbytes=%u bf=%04x mss=%u\n",
+ rte_cpu_to_le_16(bd3->nbytes),
+ rte_cpu_to_le_16(bd3->data.bitfields),
+ rte_cpu_to_le_16(bd3->data.lso_mss));
+
+ rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
+ PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
+}
+#endif
+
+/* TX prepare to check packets meets TX conditions */
+uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+#endif
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ /* TBD: confirm its ~9700B for both ? */
+ if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ } else {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ }
+ if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ break;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+#endif
+ /* TBD: pseudo csum calcuation required iff
+ * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
+ */
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ if (unlikely(i != nb_pkts))
+ PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
+ nb_pkts - i);
+#endif
+ return i;
+}
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct qede_tx_queue *txq = p_txq;
struct qede_dev *qdev = txq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp;
- struct eth_tx_1st_bd *bd1;
+ struct rte_mbuf *mbuf;
struct rte_mbuf *m_seg = NULL;
uint16_t nb_tx_pkts;
- uint16_t nb_pkt_sent = 0;
uint16_t bd_prod;
uint16_t idx;
- uint16_t tx_count;
- uint16_t nb_segs = 0;
-
- fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
+ uint16_t nb_frags;
+ uint16_t nb_pkt_sent = 0;
+ uint8_t nbds;
+ bool ipv6_ext_flg;
+ bool lso_flg;
+ bool tunn_flg;
+ struct eth_tx_1st_bd *bd1;
+ struct eth_tx_2nd_bd *bd2;
+ struct eth_tx_3rd_bd *bd3;
+ uint64_t tx_ol_flags;
+ uint16_t hdr_size;
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
- PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
- (void)qede_process_tx_compl(edev, txq);
- }
-
- nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
- ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
- if (unlikely(nb_tx_pkts == 0)) {
- PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
- nb_pkts, txq->nb_tx_avail);
- return 0;
+ qede_process_tx_compl(edev, txq);
}
- tx_count = nb_tx_pkts;
+ nb_tx_pkts = nb_pkts;
+ bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
while (nb_tx_pkts--) {
+ /* Init flags/values */
+ ipv6_ext_flg = false;
+ tunn_flg = false;
+ lso_flg = false;
+ nbds = 0;
+ bd1 = NULL;
+ bd2 = NULL;
+ bd3 = NULL;
+ hdr_size = 0;
+
+ mbuf = *tx_pkts++;
+ assert(mbuf);
+
+ /* Check minimum TX BDS availability against available BDs */
+ if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+ break;
+
+ tx_ol_flags = mbuf->ol_flags;
+
+#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
+ if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
+ ipv6_ext_flg = true;
+
+ if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ tunn_flg = true;
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG)
+ lso_flg = true;
+
+ if (lso_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_LSO_PKT))
+ break;
+ } else {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+ break;
+ }
+
+ if (tunn_flg && ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
+ }
+ if (ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
+ break;
+ }
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
- struct rte_mbuf *mbuf = *tx_pkts++;
-
txq->sw_tx_ring[idx].mbuf = mbuf;
+
+ /* BD1 */
bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
- /* Zero init struct fields */
- bd1->data.bd_flags.bitfields = 0;
- bd1->data.bitfields = 0;
+ memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+ nbds++;
- bd1->data.bd_flags.bitfields =
+ bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
- /* Map MBUF linear data for DMA and set in the first BD */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->pkt_len);
+ /* FW 8.10.x specific change */
+ if (!lso_flg) {
+ bd1->data.bitfields |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ } else {
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ }
+
+ if (tunn_flg) {
+ /* First indicate its a tunnel pkt */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ bd1->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
+
+ /* Outer UDP checksum offload */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
/* Descriptor based VLAN insertion */
- if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
+ if (lso_flg)
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+
/* Offload the IP checksum in the hardware */
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- }
/* L4 checksum offload (tcp or udp) */
- if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM)))
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- /* IPv6 + extn. -> later */
+
+ /* BD2 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nbds++;
+ QEDE_BD_SET_ADDR_LEN(bd2,
+ (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ /* TBD: check pseudo csum iff tx_prepare not called? */
+ if (ipv6_ext_flg) {
+ bd2->data.bitfields1 |=
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ }
+ }
+
+ /* BD3 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ if (lso_flg) {
+ bd3->data.lso_mss =
+ rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3->data.bitfields |=
+ rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ }
}
/* Handle fragmented MBUF */
m_seg = mbuf->next;
- nb_segs++;
- bd1->data.nbds = nb_segs;
/* Encode scatter gather buffer descriptors if required */
- nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
- txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
- nb_segs = 0;
+ nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+ bd1->data.nbds = nbds + nb_frags;
+ txq->nb_tx_avail -= bd1->data.nbds;
txq->sw_tx_prod++;
rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
bd_prod =
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
+ lso_flg, tunn_flg, ipv6_ext_flg);
+#endif
nb_pkt_sent++;
txq->xmit_pkts++;
}
@@ -1252,14 +1647,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->tx_db.data.bd_prod = bd_prod;
rte_wmb();
rte_compiler_barrier();
- DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
+ DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
rte_wmb();
/* Check again for Tx completions */
- (void)qede_process_tx_compl(edev, txq);
+ qede_process_tx_compl(edev, txq);
- PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
- nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+ nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
return nb_pkt_sent;
}
@@ -1268,7 +1663,7 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct qede_fastpath *fp;
- uint8_t i, rss_id, txq_index, tc;
+ uint8_t i, txq_index, tc;
int rxq = 0, txq = 0;
for_each_queue(i) {
@@ -1284,6 +1679,8 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
fp->txqs[tc] =
eth_dev->data->tx_queues[txq_index];
fp->txqs[tc]->queue_id = txq_index;
+ if (qdev->dev_info.is_legacy)
+ fp->txqs[tc]->is_legacy = true;
}
txq++;
}
@@ -1294,9 +1691,7 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct qed_link_output link_output;
- struct qede_fastpath *fp;
- int rc, i;
+ int rc;
DP_INFO(edev, "Device state is %d\n", qdev->state);
@@ -1315,13 +1710,18 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
return rc;
}
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ return -1;
+
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
- /* Reset ring */
- if (qede_reset_fp_rings(qdev))
- return -ENOMEM;
-
/* Start/resume traffic */
qdev->ops->fastpath_start(edev);
@@ -1342,8 +1742,7 @@ static int qede_drain_txq(struct qede_dev *qdev,
qede_process_tx_compl(edev, txq);
if (!cnt) {
if (allow_drain) {
- DP_NOTICE(edev, false,
- "Tx queue[%u] is stuck,"
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
"requesting MCP to drain\n",
txq->queue_id);
rc = qdev->ops->common->drain(edev);
@@ -1351,13 +1750,11 @@ static int qede_drain_txq(struct qede_dev *qdev,
return rc;
return qede_drain_txq(qdev, txq, false);
}
-
- DP_NOTICE(edev, false,
- "Timeout waiting for tx queue[%d]:"
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
"PROD=%d, CONS=%d\n",
txq->queue_id, txq->sw_tx_prod,
txq->sw_tx_cons);
- return -ENODEV;
+ return -1;
}
cnt--;
DELAY(1000);
@@ -1374,6 +1771,8 @@ static int qede_stop_queues(struct qede_dev *qdev)
{
struct qed_update_vport_params vport_update_params;
struct ecore_dev *edev = &qdev->edev;
+ struct ecore_sge_tpa_params tpa_params;
+ struct qede_fastpath *fp;
int rc, tc, i;
/* Disable the vport */
@@ -1382,9 +1781,15 @@ static int qede_stop_queues(struct qede_dev *qdev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 0;
vport_update_params.update_rss_flg = 0;
+ /* Disable TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Disabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
+ vport_update_params.sge_tpa_params = &tpa_params;
+ }
DP_INFO(edev, "Deactivate vport\n");
-
rc = qdev->ops->vport_update(edev, &vport_update_params);
if (rc) {
DP_ERR(edev, "Failed to update vport\n");
@@ -1395,7 +1800,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &qdev->fp_array[i];
+ fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
@@ -1410,23 +1815,17 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Stop all Queues in reverse order */
for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- struct qed_stop_rxq_params rx_params;
+ fp = &qdev->fp_array[i];
/* Stop the Tx Queue(s) */
if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = qdev->fp_array[i].txqs[tc]->queue_id;
- tx_params.tx_queue_id = val;
-
+ struct qede_tx_queue *txq = fp->txqs[tc];
DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, &tx_params);
+ rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
+ i);
return rc;
}
}
@@ -1434,20 +1833,15 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Stop the Rx Queue */
if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
- rx_params.eq_completion_only = 1;
-
DP_INFO(edev, "Stopping rx queues\n");
-
- rc = qdev->ops->q_rx_stop(edev, &rx_params);
+ rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
return rc;
}
}
}
+ qede_reset_fp_rings(qdev);
return 0;
}
@@ -1506,10 +1900,14 @@ void qede_free_mem_load(struct rte_eth_dev *eth_dev)
for_each_queue(id) {
fp = &qdev->fp_array[id];
if (fp->type & QEDE_FASTPATH_RX) {
+ if (!fp->rxq)
+ continue;
qede_rx_queue_release(fp->rxq);
eth_dev->data->rx_queues[id] = NULL;
} else {
for (tc = 0; tc < qdev->num_tc; tc++) {
+ if (!fp->txqs[tc])
+ continue;
txq_idx = fp->txqs[tc]->queue_id;
qede_tx_queue_release(fp->txqs[tc]);
eth_dev->data->tx_queues[txq_idx] = NULL;
@@ -1544,3 +1942,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
}
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index ed9a529b..a1bbd256 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -41,10 +41,6 @@
(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
(bd)->nbytes = rte_cpu_to_le_16(len); \
- /* FW 8.10.x specific change */ \
- (bd)->data.bitfields = ((len) & \
- ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) \
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; \
} while (0)
#define CQE_HAS_VLAN(flags) \
@@ -55,18 +51,29 @@
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
-
-/* TBD: Excluding IPV6 */
-#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
+
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP |\
+ ETH_RSS_NONFRAG_IPV4_UDP |\
+ ETH_RSS_IPV6 |\
+ ETH_RSS_NONFRAG_IPV6_TCP |\
+ ETH_RSS_NONFRAG_IPV6_UDP |\
+ ETH_RSS_VXLAN)
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
@@ -74,6 +81,64 @@
#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
+
+/* Macros for non-tunnel packet types lkup table */
+#define QEDE_PKT_TYPE_UNKNOWN 0x0
+#define QEDE_PKT_TYPE_MAX 0xf
+#define QEDE_PKT_TYPE_IPV4 0x1
+#define QEDE_PKT_TYPE_IPV6 0x2
+#define QEDE_PKT_TYPE_IPV4_TCP 0x5
+#define QEDE_PKT_TYPE_IPV6_TCP 0x6
+#define QEDE_PKT_TYPE_IPV4_UDP 0x9
+#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+
+/* Macros for tunneled packets with next protocol lkup table */
+#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
+#define QEDE_PKT_TYPE_TUNN_GRE 0x2
+#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
+
+/* Bit 2 is don't care bit */
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
+
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
+
+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
+
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+
/*
* RX BD descriptor ring
*/
@@ -83,6 +148,12 @@ struct qede_rx_entry {
/* allows expansion .. */
};
+/* TPA related structures */
+struct qede_agg_info {
+ struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
+ struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
+};
+
/*
* Structure associated with each RX queue.
*/
@@ -103,7 +174,9 @@ struct qede_rx_queue {
uint64_t rx_segs;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
struct qede_dev *qdev;
+ void *handle;
};
/*
@@ -133,7 +206,9 @@ struct qede_tx_queue {
volatile union db_prod tx_db;
uint16_t port_id;
uint64_t xmit_pkts;
+ bool is_legacy;
struct qede_dev *qdev;
+ void *handle;
};
struct qede_fastpath {
@@ -177,9 +252,16 @@ void qede_free_mem_load(struct rte_eth_dev *eth_dev);
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts);
+
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/Makefile b/drivers/net/ring/Makefile
index ae835052..b7e1a378 100644
--- a/drivers/net/ring/Makefile
+++ b/drivers/net/ring/Makefile
@@ -53,9 +53,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c
#
SYMLINK-y-include += rte_eth_ring.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_eal lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_mbuf lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index c1767c48..87d22581 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -75,7 +75,6 @@ struct pmd_internals {
};
-static const char *drivername = "Rings PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -89,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
@@ -103,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
@@ -173,13 +172,11 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -227,12 +224,13 @@ eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
{
}
-static void
+static int
eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
struct ether_addr *mac_addr __rte_unused,
uint32_t index __rte_unused,
uint32_t vmdq __rte_unused)
{
+ return -ENOTSUP;
}
static void
@@ -259,6 +257,8 @@ static const struct eth_dev_ops ops = {
.mac_addr_add = eth_mac_addr_add,
};
+static struct rte_vdev_driver pmd_ring_drv;
+
static int
do_eth_dev_ring_create(const char *name,
struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
@@ -339,15 +339,12 @@ do_eth_dev_ring_create(const char *name,
data->mac_addrs = &internals->address;
eth_dev->data = data;
- eth_dev->driver = NULL;
eth_dev->dev_ops = &ops;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
+ data->drv_name = pmd_ring_drv.driver.name;
data->numa_node = numa_node;
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
@@ -505,12 +502,16 @@ out:
}
static int
-rte_pmd_ring_probe(const char *name, const char *params)
+rte_pmd_ring_probe(struct rte_vdev_device *dev)
{
+ const char *name, *params;
struct rte_kvargs *kvlist = NULL;
int ret = 0;
struct node_action_list *info = NULL;
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
if (params == NULL || params[0] == '\0') {
@@ -580,8 +581,9 @@ out_free:
}
static int
-rte_pmd_ring_remove(const char *name)
+rte_pmd_ring_remove(struct rte_vdev_device *dev)
{
+ const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals = NULL;
struct ring_queue *r = NULL;
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644
index 00000000..57aa963b
--- /dev/null
+++ b/drivers/net/sfc/Makefile
@@ -0,0 +1,143 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016-2017 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sfc_efx.a
+
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+CFLAGS += -Wno-strict-aliasing
+
+# Enable extra warnings
+CFLAGS += -Wextra
+
+# More warnings not enabled by above aggregators
+CFLAGS += -Wdisabled-optimization
+
+# Extra CFLAGS for base driver files
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+# Compiler and version dependent flags
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wnested-externs
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wbad-function-cast
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+
+#
+# List of base driver object files for which
+# special CFLAGS above should be applied
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), \
+ $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+EXPORT_MAP := rte_pmd_sfc_efx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
+
+VPATH += $(SRCDIR)/base
+
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_bootcfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_crc32.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_lic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/sfc/base/README b/drivers/net/sfc/base/README
new file mode 100644
index 00000000..9019e8ba
--- /dev/null
+++ b/drivers/net/sfc/base/README
@@ -0,0 +1,36 @@
+
+ Copyright (c) 2006-2016 Solarflare Communications Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Solarflare libefx driver library
+================================
+
+This directory contains source code of Solarflare Communications libefx
+driver library of version v4.10.0.1012.
+
+Updating
+========
+
+The source code in this directory should not be modified.
+Please contact the driver maintainers to request changes.
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
new file mode 100644
index 00000000..35226749
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+/*
+ * Non-interrupting event queue requires interrrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_evq_tmr(
+ __in efx_nic_t *enp,
+ __in uint32_t instance,
+ __in uint32_t mode,
+ __in uint32_t timer_ns)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_EVQ_TMR;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in boolean_t low_latency)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ boolean_t interrupting;
+ int ev_cut_through;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /*
+ * On Huntington RX and TX event batching can only be requested together
+ * (even if the datapath firmware doesn't actually support RX
+ * batching). If event cut through is enabled no RX batching will occur.
+ *
+ * So always enable RX and TX event batching, and enable event cut
+ * through if we want low latency operation.
+ */
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ ev_cut_through = low_latency ? 1 : 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ ev_cut_through = 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ ev_cut_through = 1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq_v2(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ boolean_t interrupting;
+ unsigned int evq_type;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+ MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_evq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = ef10_ev_rx;
+ eep->ee_tx = ef10_ev_tx;
+ eep->ee_driver = ef10_ev_driver;
+ eep->ee_drv_gen = ef10_ev_drv_gen;
+ eep->ee_mcdi = ef10_ev_mcdi;
+
+ /* Set up the event queue */
+ /* INIT_EVQ expects function-relative vector number */
+ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+ irq = index;
+ } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+ irq = index;
+ flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ } else {
+ irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
+ }
+
+ /*
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
+ */
+
+ if (encp->enc_init_evq_v2_supported) {
+ /*
+ * On Medford the low latency license is required to enable RX
+ * and event cut through and to disable RX batching. If event
+ * queue type in flags is auto, we let the firmware decide the
+ * settings to use. If the adapter has a low latency license,
+ * it will choose the best settings for low latency, otherwise
+ * it will choose the best settings for throughput.
+ */
+ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+ if (rc != 0)
+ goto fail4;
+ } else {
+ /*
+ * On Huntington we need to specify the settings to use.
+ * If event queue type in flags is auto, we favour throughput
+ * if the adapter is running virtualization supporting firmware
+ * (i.e. the full featured firmware variant)
+ * and latency otherwise. The Ethernet Virtual Bridging
+ * capability is used to make this decision. (Note though that
+ * the low latency firmware variant is also best for
+ * throughput and corresponding type should be specified
+ * to choose it.)
+ */
+ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
+ rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+ low_latency);
+ if (rc != 0)
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ if (enp->en_nic_cfg.enc_bug35388_workaround) {
+ EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+ } else {
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+ }
+
+ return (0);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_driver_event(
+ __in efx_nic_t *enp,
+ __in uint32_t evq,
+ __in efx_qword_t data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_DRIVER_EVENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+ EFX_QWORD_FIELD(data, EFX_DWORD_0));
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+ EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_3(event,
+ ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+ ESF_DZ_DRV_SUB_CODE, 0,
+ ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+ (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_dword_t dword;
+ uint32_t mode;
+ efx_rc_t rc;
+
+ /* Check that hardware and MCDI use the same timer MODE values */
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ mode = FFE_CZ_TIMER_MODE_DIS;
+ } else {
+ mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+ }
+
+ if (encp->enc_bug61265_workaround) {
+ uint32_t ns = us * 1000;
+
+ rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
+ if (rc != 0)
+ goto fail2;
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ if (encp->enc_bug35388_workaround) {
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ eep->ee_index, &dword, 0);
+ } else {
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ eep->ee_index, &dword, 0);
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+static __checkReturn boolean_t
+ef10_ev_rx_packed_stream(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t label;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int pkt_count;
+ unsigned int current_id;
+ boolean_t new_buffer;
+
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
+
+ flags = 0;
+
+ eersp = &eep->ee_rxq_state[label];
+ pkt_count = (EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS) + 1 +
+ next_read_lbits - eersp->eers_rx_stream_npackets) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_stream_npackets += pkt_count;
+
+ if (new_buffer) {
+ flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+ if (eersp->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ eersp->eers_rx_packed_stream_credits++;
+ eersp->eers_rx_read_ptr++;
+ }
+ current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
+ should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
+ flags);
+
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t size;
+ uint32_t label;
+ uint32_t mac_class;
+ uint32_t eth_tag_class;
+ uint32_t l3_class;
+ uint32_t l4_class;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t cont;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int desc_count;
+ unsigned int last_used_id;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ /* Basic packet information */
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ eersp = &eep->ee_rxq_state[label];
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * Packed stream events are very different,
+ * so handle them separately
+ */
+ if (eersp->eers_rx_packed_stream)
+ return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
+#endif
+
+ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+ mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+ l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+ flags = 0;
+
+ if (cont != 0) {
+ /*
+ * This may be part of a scattered frame, or it may be a
+ * truncated frame if scatter is disabled on this RXQ.
+ * Overlength frames can be received if e.g. a VF is configured
+ * for 1500 MTU but connected to a port set to 9000 MTU
+ * (see bug56567).
+ * FIXME: There is not yet any driver that supports scatter on
+ * Huntington. Scatter support is required for OSX.
+ */
+ flags |= EFX_PKT_CONT;
+ }
+
+ if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+ flags |= EFX_PKT_UNICAST;
+
+ /* Increment the count of descriptors read */
+ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_read_ptr += desc_count;
+
+ /*
+ * FIXME: add error checking to make sure this a batched event.
+ * This could also be an aborted scatter, see Bug36629.
+ */
+ if (desc_count > 1) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+ flags |= EFX_PKT_PREFIX_LEN;
+ }
+
+ /* Calculate the index of the last descriptor consumed */
+ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ /*
+ * Hardware parse failed, due to malformed headers
+ * or headers that are too long for the parser.
+ * Headers and checksums must be validated by the host.
+ */
+ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
+ goto deliver;
+ }
+
+ if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+ (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ switch (l3_class) {
+ case ESE_DZ_L3_CLASS_IP4:
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ flags |= EFX_PKT_IPV4;
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_IPV4;
+ }
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ }
+ break;
+
+ case ESE_DZ_L3_CLASS_IP6:
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ flags |= EFX_PKT_IPV6;
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ }
+ break;
+
+ default:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ break;
+ }
+
+ if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_TCPUDP;
+ }
+ }
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+
+ /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ unsigned int code;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+ switch (code) {
+ case ESE_DZ_DRV_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_START_UP_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+ break;
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+ should_abort = B_FALSE;
+
+ data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ /*
+ * This event notifies a function that an authorization request
+ * has been processed. If the request was authorized then the
+ * function can now re-send the original MCDI request.
+ * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+ */
+ efx_mcdi_ev_proxy_response(enp,
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+ break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ ef10_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ /* Decode monitor stat for MCDI sensor (if supported) */
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+ /* Report monitor stat change */
+ should_abort = eecp->eec_monitor(arg, id, value);
+ } else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else {
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+ }
+#endif
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ /* Falcon/Siena only (should not been seen with Huntington). */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ /* MC_REBOOT event is used for Huntington (EF10) and later. */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_ERR: {
+ /*
+ * After a TXQ error is detected, firmware sends a TX_ERR event.
+ * This may be followed by TX completions (which we discard),
+ * and then finally by a TX_FLUSH event. Firmware destroys the
+ * TXQ automatically after sending the TX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+ EFSYS_PROBE2(tx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+ MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_FLUSH: {
+ uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+ /*
+ * EF10 firmware sends two TX_FLUSH events: one to the txq's
+ * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with TX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_ERR: {
+ /*
+ * After an RXQ error is detected, firmware sends an RX_ERR
+ * event. This may be followed by RX events (which we discard),
+ * and then finally by an RX_FLUSH event. Firmware destroys the
+ * RXQ automatically after sending the RX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+ EFSYS_PROBE2(rx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+ MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_FLUSH: {
+ uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+ /*
+ * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+ * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with RX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ break;
+ }
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * For packed stream modes, the very first event will
+ * have a new buffer flag set, so it will be incremented,
+ * yielding the correct pointer. That results in a simpler
+ * code than trying to detect start-of-the-world condition
+ * in the event handler.
+ */
+ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
+#else
+ eersp->eers_rx_read_ptr = 0;
+#endif
+ eersp->eers_rx_mask = erp->er_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = packed_stream;
+ if (packed_stream) {
+ eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
+ (EFX_RX_PACKED_STREAM_MEM_PER_CREDIT /
+ EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
+ /*
+ * A single credit is allocated to the queue when it is started.
+ * It is immediately spent by the first packet which has NEW
+ * BUFFER flag set, though, but still we shall take into
+ * account, as to not wrap around the maximum number of credits
+ * accidentally
+ */
+ eersp->eers_rx_packed_stream_credits--;
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
+ }
+#else
+ EFSYS_ASSERT(!packed_stream);
+#endif
+}
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = 0;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = B_FALSE;
+ eersp->eers_rx_packed_stream_credits = 0;
+#endif
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
new file mode 100644
index 00000000..695bb847
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -0,0 +1,1501 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_FILTER
+
+#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec)
+
+static efx_filter_spec_t *
+ef10_filter_entry_spec(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+ ~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static boolean_t
+ef10_filter_entry_is_busy(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static boolean_t
+ef10_filter_entry_is_auto_old(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static void
+ef10_filter_set_entry(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index,
+ __in_opt const efx_filter_spec_t *efsp)
+{
+ EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static void
+ef10_filter_set_entry_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_not_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static void
+ef10_filter_set_entry_not_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST));
+#undef MATCH_MASK
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+ if (!eftp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_ef10_filter_table = eftp;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_filter.ef_ef10_filter_table != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+ enp->en_filter.ef_ef10_filter_table);
+ }
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_add(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+ handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+ handle->efh_hi);
+ /* Fall through */
+ case MC_CMD_FILTER_OP_IN_OP_INSERT:
+ case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+ spec->efs_match_flags);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
+ spec->efs_rss_context);
+ }
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+ spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+
+ if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+ /*
+ * NOTE: Unlike most MCDI requests, the filter fields
+ * are presented in network (big endian) byte order.
+ */
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+ spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+ spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+ __CPU_TO_BE_16(spec->efs_rem_port));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+ __CPU_TO_BE_16(spec->efs_loc_port));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+ __CPU_TO_BE_16(spec->efs_ether_type));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+ __CPU_TO_BE_16(spec->efs_inner_vid));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+ __CPU_TO_BE_16(spec->efs_outer_vid));
+
+ /* IP protocol (in low byte, high byte is zero) */
+ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+ spec->efs_ip_proto);
+
+ EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+ &spec->efs_rem_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+ &spec->efs_loc_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_delete(
+ __in efx_nic_t *enp,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ break;
+ case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+ef10_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ /* FIXME: Consider rx vs tx filters (look at efs_flags) */
+ if (left->efs_match_flags != right->efs_match_flags)
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+ return (B_FALSE);
+ if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (left->efs_rem_port != right->efs_rem_port)
+ return (B_FALSE);
+ if (left->efs_loc_port != right->efs_loc_port)
+ return (B_FALSE);
+ if (left->efs_inner_vid != right->efs_inner_vid)
+ return (B_FALSE);
+ if (left->efs_outer_vid != right->efs_outer_vid)
+ return (B_FALSE);
+ if (left->efs_ether_type != right->efs_ether_type)
+ return (B_FALSE);
+ if (left->efs_ip_proto != right->efs_ip_proto)
+ return (B_FALSE);
+
+ return (B_TRUE);
+
+}
+
+static __checkReturn boolean_t
+ef10_filter_same_dest(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_rss_context == right->efs_rss_context)
+ return (B_TRUE);
+ } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+ (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_dmaq_id == right->efs_dmaq_id)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+static __checkReturn uint32_t
+ef10_filter_hash(
+ __in efx_filter_spec_t *spec)
+{
+ EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+ == 0);
+ EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+ sizeof (uint32_t)) == 0);
+
+ /*
+ * As the area of the efx_filter_spec_t we need to hash is DWORD
+ * aligned and an exact number of DWORDs in size we can use the
+ * optimised efx_hash_dwords() rather than efx_hash_bytes()
+ */
+ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
+ (sizeof (efx_filter_spec_t) -
+ EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+ sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static __checkReturn boolean_t
+ef10_filter_is_exclusive(
+ __in efx_filter_spec_t *spec)
+{
+ if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+ !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ return (B_TRUE);
+
+ if ((spec->efs_match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+ ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+ return (B_TRUE);
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+ (spec->efs_loc_host.eo_u8[0] != 0xff))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp)
+{
+ int tbl_id;
+ efx_filter_spec_t *spec;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ boolean_t restoring;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = ef10_filter_entry_spec(eftp, tbl_id);
+ if (spec == NULL) {
+ restoring = B_FALSE;
+ } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+ /* Ignore busy entries. */
+ restoring = B_FALSE;
+ } else {
+ ef10_filter_set_entry_busy(eftp, tbl_id);
+ restoring = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (restoring == B_FALSE)
+ continue;
+
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define EF10_FILTER_SEARCH_LIMIT 200
+
+static __checkReturn efx_rc_t
+ef10_filter_add_internal(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace,
+ __out_opt uint32_t *filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ uint32_t hash;
+ unsigned int depth;
+ int ins_index;
+ boolean_t replacing = B_FALSE;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ hash = ef10_filter_hash(spec);
+
+ /*
+ * FIXME: Add support for inserting filters of different priorities
+ * and removing lower priority multicast filters (bug 42378)
+ */
+
+ /*
+ * Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ ins_index = -1;
+ depth = 1;
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(eftp, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0) {
+ ins_index = i;
+ }
+ } else if (ef10_filter_equal(spec, saved_spec)) {
+ if (ef10_filter_entry_is_busy(eftp, i))
+ break;
+ if (saved_spec->efs_priority
+ == EFX_FILTER_PRI_AUTO) {
+ ins_index = i;
+ goto found;
+ } else if (ef10_filter_is_exclusive(spec)) {
+ if (may_replace) {
+ ins_index = i;
+ goto found;
+ } else {
+ rc = EEXIST;
+ goto fail1;
+ }
+ }
+
+ /* Leave existing */
+ }
+
+ /*
+ * Once we reach the maximum search depth, use
+ * the first suitable slot or return EBUSY if
+ * there was none.
+ */
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+ goto found;
+ }
+ depth++;
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+ }
+
+found:
+ /*
+ * Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+ if (saved_spec) {
+ if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+ /* This is a filter we are refreshing */
+ ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+ goto out_unlock;
+
+ }
+ replacing = B_TRUE;
+ } else {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+ if (!saved_spec) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ *saved_spec = *spec;
+ ef10_filter_set_entry(eftp, ins_index, saved_spec);
+ }
+ ef10_filter_set_entry_busy(eftp, ins_index);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ /*
+ * On replacing the filter handle may change after after a successful
+ * replace operation.
+ */
+ if (replacing) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail4;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->efs_priority = spec->efs_priority;
+ saved_spec->efs_flags = spec->efs_flags;
+ saved_spec->efs_rss_context = spec->efs_rss_context;
+ saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+ }
+
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ if (filter_id)
+ *filter_id = ins_index;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ if (!replacing) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+ saved_spec = NULL;
+ }
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+ ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+
+ rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_delete_internal(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *spec;
+ efsys_lock_state_t state;
+ uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+ /*
+ * Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ *
+ * FIXME: What if the busy flag is never cleared?
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ while (ef10_filter_entry_is_busy(table, filter_idx)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_SPIN(1);
+ EFSYS_LOCK(enp->en_eslp, state);
+ }
+ if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+ ef10_filter_set_entry_busy(table, filter_idx);
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (spec == NULL) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /*
+ * Try to remove the hardware filter. This may fail if the MC has
+ * rebooted (which frees all hardware filter resources).
+ */
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE,
+ &table->eft_entry[filter_idx].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+ &table->eft_entry[filter_idx].efe_handle);
+ }
+
+ /* Free the software table entry */
+ EFSYS_LOCK(enp->en_eslp, state);
+ ef10_filter_set_entry_not_busy(table, filter_idx);
+ ef10_filter_set_entry(table, filter_idx, NULL);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+ /* Check result of hardware filter removal */
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ unsigned int hash;
+ unsigned int depth;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ hash = ef10_filter_hash(spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ depth = 1;
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(table, i);
+ if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+ ef10_filter_same_dest(spec, saved_spec)) {
+ break;
+ }
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ depth++;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ rc = ef10_filter_delete_internal(enp, i);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_parser_disp_info(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ size_t matches_count;
+ size_t list_size;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ matches_count = MCDI_OUT_DWORD(req,
+ GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *list_lengthp = matches_count;
+
+ if (buffer_length < matches_count) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /*
+ * Check that the elements in the list in the MCDI response are the size
+ * we expect, so we can just copy them directly. Any conversion of the
+ * flags is handled by the caller.
+ */
+ EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+ list_size = matches_count *
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN;
+ memcpy(buffer,
+ MCDI_OUT2(req, uint32_t,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+ list_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+
+ size_t mcdi_list_length;
+ size_t list_length;
+ uint32_t i;
+ efx_rc_t rc;
+ uint32_t all_filter_flags =
+ (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
+ EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
+
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length,
+ &mcdi_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC) {
+ /* Pass through mcdi_list_length for the list length */
+ *list_lengthp = mcdi_list_length;
+ }
+ goto fail1;
+ }
+
+ /*
+ * The static assertions in ef10_filter_init() ensure that the values of
+ * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't
+ * need to be converted.
+ *
+ * In case support is added to MCDI for additional flags, remove any
+ * matches from the list which include flags we don't support. The order
+ * of the matches is preserved as they are ordered from highest to
+ * lowest priority.
+ */
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ list_length = 0;
+ for (i = 0; i < mcdi_list_length; i++) {
+ if ((buffer[i] & ~all_filter_flags) == 0) {
+ buffer[list_length] = buffer[i];
+ list_length++;
+ }
+ }
+
+ *list_lengthp = list_length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_unicast(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *addr,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the filter for the local station address */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown unicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_uc_def(&spec);
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_multicast_list(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count,
+ __in efx_filter_flags_t filter_flags,
+ __in boolean_t rollback)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ uint8_t addr[6];
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
+ efx_rc_t rc;
+
+ if (mulcst == B_FALSE)
+ count = 0;
+
+ if (count + (brdcst ? 1 : 0) >
+ EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Insert/renew multicast address list filters */
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
+ efx_filter_spec_init_rx(&spec,
+ EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ &addrs[i * EFX_MAC_ADDR_LEN]);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+
+ }
+
+ if (brdcst == B_TRUE) {
+ /* Insert/renew broadcast address filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ EFX_MAC_BROADCAST_ADDR_SET(addr);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+ }
+
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
+ return (0);
+
+rollback:
+ /* Remove any filters we have inserted */
+ i = filter_count;
+ while (i--) {
+ (void) ef10_filter_delete_internal(enp,
+ eftp->eft_mulcst_filter_indexes[i]);
+ }
+ eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown multicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_mc_def(&spec);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_mulcst_filter_indexes[0]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
+
+ /*
+ * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+ */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_get_workarounds(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t implemented = 0;
+ uint32_t enabled = 0;
+ efx_rc_t rc;
+
+ rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+ if (rc == 0) {
+ /* Check if chained multicast filter support is enabled */
+ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+ encp->enc_bug26807_workaround = B_TRUE;
+ else
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if (rc == ENOTSUP) {
+ /*
+ * Firmware is too old to support GET_WORKAROUNDS, and support
+ * for this workaround was implemented later.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+ __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_flags_t filter_flags;
+ unsigned int i;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
+ efx_rc_t rc;
+
+ if (table->eft_default_rxq == NULL) {
+ /*
+ * Filters direct traffic to the default RXQ, and so cannot be
+ * inserted until it is available. Any currently configured
+ * filters must be removed (ignore errors in case the MC
+ * has rebooted, which removes hardware filters).
+ */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ table->eft_unicst_filter_count = 0;
+
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ table->eft_mulcst_filter_count = 0;
+
+ return (0);
+ }
+
+ if (table->eft_using_rss)
+ filter_flags = EFX_FILTER_FLAG_RX_RSS;
+ else
+ filter_flags = 0;
+
+ /* Mark old filters which may need to be removed */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
+ goto fail1;
+ }
+
+ /*
+ * WORKAROUND_BUG26807 controls firmware support for chained multicast
+ * filters, and can only be enabled or disabled when the hardware filter
+ * table is empty.
+ *
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
+ * Firmware will reset (FLR) functions which have inserted filters in
+ * the hardware filter table when the workaround is enabled/disabled.
+ * Functions without any hardware filters are not reset.
+ *
+ * Re-check if the workaround is enabled after adding unicast hardware
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
+ * firmware state, and that later changes to enable/disable the
+ * workaround will result in this function seeing a reset (FLR).
+ *
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
+ */
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ /* Insert or renew multicast filters */
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
+ }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
+ }
+ }
+
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
+ /* report if any optional flags were rejected */
+ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+ ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+ rc = ENOTSUP;
+ }
+
+ return (rc);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Clear auto old flags */
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ ef10_filter_set_entry_not_auto_old(table, i);
+ }
+ }
+
+ return (rc);
+}
+
+ void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ *erpp = table->eft_default_rxq;
+ *using_rss = table->eft_using_rss;
+}
+
+
+ void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+#if EFSYS_OPT_RX_SCALE
+ EFSYS_ASSERT((using_rss == B_FALSE) ||
+ (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID));
+ table->eft_using_rss = using_rss;
+#else
+ EFSYS_ASSERT(using_rss == B_FALSE);
+ table->eft_using_rss = B_FALSE;
+#endif
+ table->eft_default_rxq = erp;
+}
+
+ void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ table->eft_default_rxq = NULL;
+ table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
new file mode 100644
index 00000000..8c3dffee
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -0,0 +1,1183 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EF10_IMPL_H
+#define _SYS_EF10_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
+#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
+#elif EFSYS_OPT_HUNTINGTON
+#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
+#elif EFSYS_OPT_MEDFORD
+#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
+#endif
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+extern void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t *eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ uint32_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp);
+
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp);
+
+extern __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/* Data space per credit in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16)
+
+/*
+ * Received packets are always aligned at this boundary. Also there always
+ * exists a gap of this size between packets.
+ * (see SF-112241-TC, 4.5)
+ */
+#define EFX_RX_PACKED_STREAM_ALIGNMENT 64
+
+/*
+ * Size of a pseudo-header prepended to received packets
+ * in packed stream mode
+ */
+#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8
+
+/* Minimum space for packet in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
+ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
+ EFX_MAC_PDU_MIN + \
+ EFX_RX_PACKED_STREAM_ALIGNMENT, \
+ EFX_RX_PACKED_STREAM_ALIGNMENT)
+
+/* Maximum number of credits */
+#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EF10_IMPL_H */
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
new file mode 100644
index 00000000..16be3d8c
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ _NOTE(ARGUNUSED(enp, type, esmp))
+ return (0);
+}
+
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_trigger_interrupt(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (level >= enp->en_nic_cfg.enc_intr_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_bug41750_workaround) {
+ /*
+ * bug 41750: Test interrupts don't work on Greenport
+ * bug 50084: Test interrupts don't work on VFs
+ */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_dword_t dword;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read the queue mask and implicitly acknowledge the interrupt. */
+ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ _NOTE(ARGUNUSED(enp, message))
+
+ /* EF10 fatal errors are reported via events */
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ /* EF10 fatal errors are reported via events */
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
new file mode 100644
index 00000000..488633f5
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_fcntl = els.els_fcntl;
+
+ *link_modep = els.els_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ /*
+ * Because EF10 doesn't *require* polling, we can't rely on
+ * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *mac_upp = els.els_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static __checkReturn efx_rc_t
+efx_mcdi_vadapter_set_mac(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ enp->en_vport_id);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+ epp->ep_mac_addr);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /*
+ * Fallback for older Huntington firmware without Vadapter
+ * support.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ /* Only configure the MTU in this call to MC_CMD_SET_MAC */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+ SET_MAC_EXT_IN_CFG_MTU, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_get(
+ __in efx_nic_t *enp,
+ __out size_t *mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+ /*
+ * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+ * MTU. This should always be supported on Medford, but it is not
+ * supported on older Huntington firmware.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_enhanced_set_mac_supported) {
+ if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+ goto fail1;
+ } else {
+ /*
+ * Fallback for older Huntington firmware, which always
+ * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+ * suitable for setting the MTU on unpriviliged functions.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+__checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+
+ /*
+ * Note: The Huntington MAC does not support REJECT_BRDCST.
+ * The REJECT_UNCST flag will also prevent multicast traffic
+ * from reaching the filters. As Huntington filters drop any
+ * traffic that does not match a filter it is ok to leave the
+ * MAC running in promiscuous mode. See bug41141.
+ *
+ * FIXME: Does REJECT_UNCST behave the same way on Medford?
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, 0,
+ SET_MAC_IN_REJECT_BRDCST, 0);
+
+ /*
+ * Flow control, whether it is auto-negotiated or not,
+ * is set via the PHY advertised capabilities. When set to
+ * automatic the MAC will use the PHY settings to determine
+ * the flow control settings.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+ /* Do not include the Ethernet frame checksum in RX packets */
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /*
+ * Unprivileged functions cannot control link state,
+ * but still need to configure filters.
+ */
+ if (req.emr_rc != EACCES) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ /*
+ * Apply the filters for the MAC configuration.
+ * If the NIC isn't ready to accept filters this may
+ * return success without setting anything.
+ */
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rxq_t *old_rxq;
+ boolean_t old_using_rss;
+ efx_rc_t rc;
+
+ ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+ ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+ return (rc);
+}
+
+ void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ ef10_filter_default_rxq_clear(enp);
+
+ efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+}
+
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on EF10 */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range ef10_common[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS },
+ { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS },
+ { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS },
+ };
+ const struct efx_mac_stats_range ef10_tx_size_bins[] = {
+ { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS },
+ };
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0)
+ goto fail1;
+
+ if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+ const struct efx_mac_stats_range ef10_40g_extra[] = {
+ { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0)
+ goto fail2;
+
+ if (encp->enc_mac_stats_40g_tx_size_bins) {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp,
+ mask_size, ef10_tx_size_bins,
+ EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail3;
+ }
+ } else {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail4;
+ }
+
+ if (encp->enc_pm_and_rxdp_counters) {
+ const struct efx_mac_stats_range ef10_pm_and_rxdp[] = {
+ { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0)
+ goto fail5;
+ }
+
+ if (encp->enc_datapath_cap_evb) {
+ const struct efx_mac_stats_range ef10_vadaptor[] = {
+ { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_OVERFLOW },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0)
+ goto fail6;
+ }
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ /* Packet memory (EF10 only) */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value);
+
+ /* RX datapath */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value);
+
+
+ /* VADAPTER RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value);
+
+ /* VADAPTER TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
+
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c
new file mode 100644
index 00000000..5a26bda2
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_mcdi.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+ __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+ /*
+ * All EF10 firmware supports MCDIv2 and MCDIv1.
+ * Medford BootROM supports MCDIv2 and MCDIv1.
+ * Huntington BootROM supports MCDIv1 only.
+ */
+ emip->emi_max_version = 2;
+
+ /* A host DMA buffer is required for EF10 MCDI */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ensure that the MC doorbell is in a known state before issuing MCDI
+ * commands. The recovery algorithm requires that the MC command buffer
+ * must be 256 byte aligned. See bug24769.
+ */
+ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+ /* Save initial MC reboot status */
+ (void) ef10_mcdi_poll_reboot(enp);
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(enp);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ emip->emi_new_epoch = B_FALSE;
+}
+
+/*
+ * In older firmware all commands are processed in a single thread, so a long
+ * running command for one PCIe function can block processing for another
+ * function (see bug 61269).
+ *
+ * In newer firmware that supports multithreaded MCDI processing, we can extend
+ * the timeout for long-running requests which we know firmware may choose to
+ * process in a background thread.
+ */
+#define EF10_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+#define EF10_MCDI_CMD_LONG_TIMEOUT_US (60 * 1000 * 1000)
+
+ void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ switch (emrp->emr_cmd) {
+ case MC_CMD_POLL_BIST:
+ case MC_CMD_NVRAM_ERASE:
+ case MC_CMD_LICENSING_V3:
+ case MC_CMD_NVRAM_UPDATE_FINISH:
+ if (encp->enc_fw_verified_nvram_update_required != B_FALSE) {
+ /*
+ * Potentially longer running commands, which firmware
+ * may choose to process in a background thread.
+ */
+ *timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ *timeoutp = EF10_MCDI_CMD_TIMEOUT_US;
+ break;
+ }
+}
+
+ void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Write the header */
+ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+ EFSYS_MEM_WRITED(esmp, pos, &dword);
+ }
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+ }
+
+ /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Ring the doorbell to post the command DMA address to the MC */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+ __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t hdr;
+
+ EFSYS_MEM_READD(esmp, 0, &hdr);
+ EFSYS_MEM_READ_BARRIER();
+
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ unsigned int pos;
+ efx_dword_t data;
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFSYS_MEM_READD(esmp, offset + pos, &data);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ uint32_t old_status;
+ uint32_t new_status;
+ efx_rc_t rc;
+
+ old_status = emip->emi_mc_reboot_status;
+
+ /* Update MC reboot status word */
+ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+ new_status = dword.ed_u32[0];
+
+ /* MC has rebooted if the value has changed */
+ if (new_status != old_status) {
+ emip->emi_mc_reboot_status = new_status;
+
+ /*
+ * FIXME: Ignore detected MC REBOOT for now.
+ *
+ * The Siena support for checking for MC reboot from status
+ * flags is broken - see comments in siena_mcdi_poll_reboot().
+ * As the generic MCDI code is shared the EF10 reboot
+ * detection suffers similar problems.
+ *
+ * Do not report an error when the boot status changes until
+ * this can be handled by common code drivers (and reworked to
+ * support Siena too).
+ */
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = EIO;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t privilege_mask = encp->enc_privilege_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Use privilege mask state at MCDI attach.
+ */
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ /*
+ * Admin privilege used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of change mac spoofing privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of mac spoofing TX privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
new file mode 100644
index 00000000..aac2679c
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields, unless the current mode
+ * was requested (CurrentMode field was added for Medford).
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ if ((current_modep != NULL) && (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ if (current_modep != NULL) {
+ *current_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_CURRENT_MODE);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ switch (port_mode) {
+ case TLV_PORT_MODE_10G:
+ bandwidth = 10000;
+ break;
+ case TLV_PORT_MODE_10G_10G:
+ bandwidth = 10000 * 2;
+ break;
+ case TLV_PORT_MODE_10G_10G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+ bandwidth = 10000 * 4;
+ break;
+ case TLV_PORT_MODE_40G:
+ bandwidth = 40000;
+ break;
+ case TLV_PORT_MODE_40G_40G:
+ bandwidth = 40000 * 2;
+ break;
+ case TLV_PORT_MODE_40G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_40G:
+ bandwidth = 40000 + (10000 * 2);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+ if (*dpcpu_freqp == 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+static __checkReturn efx_rc_t
+ef10_mcdi_get_pf_count(
+ __in efx_nic_t *enp,
+ __out uint32_t *pf_countp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PF_COUNT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pf_countp = *MCDI_OUT(req, uint8_t,
+ MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
+
+ EFSYS_ASSERT(*pf_countp != 0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ uint32_t tso2nc;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
+ &flags2, &tso2nc)) != 0)
+ goto fail1;
+
+ if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Get the number of TSO contexts (FATSOv2) */
+ encp->enc_fw_assisted_tso_v2_n_contexts =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ /*
+ * Even if batching isn't reported as supported, we may still get
+ * batched events (see bug61153).
+ */
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports packed stream mode */
+ encp->enc_rx_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the firmware supports configurable buffer sizes
+ * for packed stream mode (otherwise buffer size is 1Mbyte)
+ */
+ encp->enc_rx_var_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
+ * us to let the firmware choose the settings to use on an EVQ.
+ */
+ encp->enc_init_evq_v2_supported =
+ CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware-verified NVRAM updates must be used.
+ *
+ * The firmware trusted installer requires all NVRAM updates to use
+ * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
+ * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
+ * partition and report the result).
+ */
+ encp->enc_fw_verified_nvram_update_required =
+ CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware provides packet memory and Rx datapath
+ * counters.
+ */
+ encp->enc_pm_and_rxdp_counters =
+ CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the 40G MAC hardware is capable of reporting
+ * statistics for Tx size bins.
+ */
+ encp->enc_mac_stats_40g_tx_size_bins =
+ CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Table of mapping schemes from port number to the number of the external
+ * connector on the board. The external numbering does not distinguish
+ * off-board separated outputs such as from multi-headed cables.
+ *
+ * The count of adjacent port numbers that map to each external port
+ * and the offset in the numbering, is determined by the chip family and
+ * current port mode.
+ *
+ * For the Huntington family, the current port mode cannot be discovered,
+ * so the mapping used is instead the last match in the table to the full
+ * set of port modes to which the NIC can be configured. Therefore the
+ * ordering of entries in the the mapping table is significant.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ int32_t count;
+ int32_t offset;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes with 1 output per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G),
+ 1,
+ 1
+ },
+ /* Supported modes with 2 outputs per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
+ 2,
+ 1
+ },
+ /* Supported modes with 4 outputs per external port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
+ 4,
+ 1,
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4,
+ 2
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t current;
+ int32_t count = 1; /* Default 1-1 mapping */
+ int32_t offset = 1; /* Default starting external port number */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+ /*
+ * No current port mode information
+ * - infer mapping from available modes
+ */
+ if ((rc = efx_mcdi_get_port_modes(enp,
+ &port_modes, NULL)) != 0) {
+ /*
+ * No port mode information available
+ * - use default mapping
+ */
+ goto out;
+ }
+ } else {
+ /* Only need to scan the current mode */
+ port_modes = 1 << current;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ count = __ef10_external_port_mappings[i].count;
+ offset = __ef10_external_port_mappings[i].offset;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * correctly offset numbering
+ */
+ *external_portp = (uint8_t)((port / count) + offset);
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
new file mode 100644
index 00000000..3f9d3750
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -0,0 +1,2385 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+#include "ef10_tlv_layout.h"
+
+/* Cursor for TLV partition format */
+typedef struct tlv_cursor_s {
+ uint32_t *block; /* Base of data block */
+ uint32_t *current; /* Cursor position */
+ uint32_t *end; /* End tag position */
+ uint32_t *limit; /* Last dword of data block */
+} tlv_cursor_t;
+
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor);
+
+
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
+static uint32_t
+tlv_tag(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, tag;
+
+ dword = cursor->current[0];
+ tag = __LE_TO_CPU_32(dword);
+
+ return (tag);
+}
+
+static size_t
+tlv_length(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, length;
+
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (0);
+
+ dword = cursor->current[1];
+ length = __LE_TO_CPU_32(dword);
+
+ return ((size_t)length);
+}
+
+static uint8_t *
+tlv_value(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)(&cursor->current[2]));
+}
+
+static uint8_t *
+tlv_item(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)cursor->current);
+}
+
+/*
+ * TLV item DWORD length is tag + length + value (rounded up to DWORD)
+ * equivalent to tlv_n_words_for_len in mc-comms tlv.c
+ */
+#define TLV_DWORD_COUNT(length) \
+ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
+
+
+static uint32_t *
+tlv_next_item_ptr(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t length;
+
+ length = tlv_length(cursor);
+
+ return (cursor->current + TLV_DWORD_COUNT(length));
+}
+
+static __checkReturn efx_rc_t
+tlv_advance(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (cursor->current == cursor->end) {
+ /* No more tags after END tag */
+ cursor->current = NULL;
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /* Advance to next item and validate */
+ cursor->current = tlv_next_item_ptr(cursor);
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_rewind(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ cursor->current = cursor->block;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_find(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag)
+{
+ efx_rc_t rc;
+
+ rc = tlv_rewind(cursor);
+ while (rc == 0) {
+ if (tlv_tag(cursor) == tag)
+ break;
+
+ rc = tlv_advance(cursor);
+ }
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ /* Check cursor position */
+ if (cursor->current < cursor->block) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (cursor->current > cursor->limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (tlv_tag(cursor) != TLV_TAG_END) {
+ /* Check current item has space for tag and length */
+ if (cursor->current > (cursor->limit - 2)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ /* Check we have value data for current item and another tag */
+ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_init_cursor(
+ __out tlv_cursor_t *cursor,
+ __in uint32_t *block,
+ __in uint32_t *limit,
+ __in uint32_t *current)
+{
+ cursor->block = block;
+ cursor->limit = limit;
+
+ cursor->current = current;
+ cursor->end = NULL;
+
+ return (tlv_validate_state(cursor));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_from_size(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size)
+{
+ uint32_t *limit;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ return (tlv_init_cursor(cursor, (uint32_t *)block,
+ limit, (uint32_t *)block));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ current = (uint32_t *)(block + offset);
+ return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
+tlv_require_end(
+ __inout tlv_cursor_t *cursor)
+{
+ uint32_t *pos;
+ efx_rc_t rc;
+
+ if (cursor->end == NULL) {
+ pos = cursor->current;
+ if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
+ goto fail1;
+
+ cursor->end = cursor->current;
+ cursor->current = pos;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static size_t
+tlv_block_length_used(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ /* Return space used (including the END tag) */
+ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static uint32_t *
+tlv_write(
+ __in tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint32_t len = size;
+ uint32_t *ptr;
+
+ ptr = cursor->current;
+
+ *ptr++ = __CPU_TO_LE_32(tag);
+ *ptr++ = __CPU_TO_LE_32(len);
+
+ if (len > 0) {
+ ptr[(len - 1) / sizeof (uint32_t)] = 0;
+ memcpy(ptr, data, len);
+ ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr);
+ }
+
+ return (ptr);
+}
+
+static __checkReturn efx_rc_t
+tlv_insert(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ if (tag == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ delta = TLV_DWORD_COUNT(size);
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Move data up: new space at cursor->current */
+ memmove(cursor->current + delta, cursor->current,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ /* Write new TLV item */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_modify(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ uint32_t *pos;
+ unsigned int old_ndwords;
+ unsigned int new_ndwords;
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ if (tlv_tag(cursor) != tag) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
+ new_ndwords = TLV_DWORD_COUNT(size);
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail4;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ if (new_ndwords > old_ndwords) {
+ /* Expand space used for TLV item */
+ delta = new_ndwords - old_ndwords;
+ pos = cursor->current + old_ndwords;
+
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Move up: new space at (cursor->current + old_ndwords) */
+ memmove(pos + delta, pos,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ } else if (new_ndwords < old_ndwords) {
+ /* Shrink space used for TLV item */
+ delta = old_ndwords - new_ndwords;
+ pos = cursor->current + new_ndwords;
+
+ /* Move down: remove words at (cursor->current + new_ndwords) */
+ memmove(pos, pos + delta,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+ }
+
+ /* Write new data */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((partn_data == NULL) || (partn_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* The partition header must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
+ partn_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV partition length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > partn_size) {
+ rc = EFBIG;
+ goto fail4;
+ }
+
+ /* Check partition ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /* Check generation counts are consistent */
+ if (trailer->generation != header->generation) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ /* Verify partition checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(partn_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (uint32_t *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned int min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (uint32_t *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uint8_t *)position - (uint8_t *)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ /* Read past partition header to find start address of the first key */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ /* Read to end of partition */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+ uint32_t *segment_used;
+
+ _NOTE(ARGUNUSED(offset))
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ segment_used = cursor.block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the used space including that end tag.
+ */
+ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ if (tlv_require_end(&cursor) != 0) {
+ if (segment_used == cursor.block) {
+ /*
+ * First segment is corrupt, so there is
+ * no valid data in partition.
+ */
+ rc = EINVAL;
+ goto fail2;
+ }
+ break;
+ }
+ segment_used = cursor.end + 1;
+
+ cursor.current = segment_used;
+ }
+ /* Return space used (including the END tag) */
+ *endp = (segment_used - cursor.block) * sizeof (uint32_t);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ /* Find TLV at offset and return key start and length */
+ tlv_cursor_t cursor;
+ uint8_t *key;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while ((key = tlv_item(&cursor)) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+/*
+ * Read and validate a segment from a partition. A segment is a complete
+ * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
+ * be multiple segments in a partition, so seg_offset allows segments
+ * beyond the first to be read.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_read_tlv_segment(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in size_t seg_offset,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Read initial chunk of the segment, starting at offset */
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
+ EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
+ goto fail2;
+ }
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > max_seg_size) {
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /* Read the remaining segment content */
+ if (total_length > EF10_NVRAM_CHUNK) {
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn,
+ seg_offset + EF10_NVRAM_CHUNK,
+ seg_data + EF10_NVRAM_CHUNK,
+ total_length - EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
+ goto fail6;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Check data read from segment is consistent */
+ if (trailer->generation != header->generation) {
+ /*
+ * The partition data may have been modified between successive
+ * MCDI NVRAM_READ requests by the MC or another PCI function.
+ *
+ * The caller must retry to obtain consistent partition data.
+ */
+ rc = EAGAIN;
+ goto fail10;
+ }
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read a single TLV item from a host memory
+ * buffer containing a TLV formatted segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep)
+{
+ tlv_cursor_t cursor;
+ caddr_t data;
+ size_t length;
+ caddr_t value;
+ efx_rc_t rc;
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Find requested TLV tag in segment data */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if ((rc = tlv_find(&cursor, tag)) != 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+ value = (caddr_t)tlv_value(&cursor);
+ length = tlv_length(&cursor);
+
+ if (length == 0)
+ data = NULL;
+ else {
+ /* Copy out data from TLV item */
+ EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
+ if (data == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memcpy(data, value, length);
+ }
+
+ *datap = data;
+ *sizep = length;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Read a single TLV item from the first segment in a TLV formatted partition */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap,
+ __out size_t *seg_sizep)
+{
+ caddr_t seg_data = NULL;
+ size_t partn_size = 0;
+ size_t length;
+ caddr_t data;
+ int retry;
+ efx_rc_t rc;
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ if (partn_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
+ if (seg_data == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /*
+ * Read the first segment in a TLV partition. Retry until consistent
+ * segment contents are returned. Inconsistent data may be read if:
+ * a) the segment contents are invalid
+ * b) the MC has rebooted while we were reading the partition
+ * c) the partition has been modified while we were reading it
+ * Limit retry attempts to ensure forward progress.
+ */
+ retry = 10;
+ do {
+ rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size);
+ } while ((rc == EAGAIN) && (--retry > 0));
+
+ if (rc != 0) {
+ /* Failed to obtain consistent segment data */
+ goto fail4;
+ }
+
+ if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
+ tag, &data, &length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+
+ *seg_datap = data;
+ *seg_sizep = length;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Compute the size of a segment. */
+ static __checkReturn efx_rc_t
+ef10_nvram_buf_segment_size(
+ __in caddr_t seg_data,
+ __in size_t max_seg_size,
+ __out size_t *seg_sizep)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ uint32_t cksum;
+ int pos;
+ uint32_t *end_tag_position;
+ uint32_t segment_length;
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ *seg_sizep = __LE_TO_CPU_32(header->total_length);
+ if (*seg_sizep > max_seg_size) {
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ end_tag_position = cursor.current;
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /*
+ * Calculate total length from HEADER to END tags and compare to
+ * max_seg_size and the total_length field in the HEADER tag.
+ */
+ segment_length = tlv_block_length_used(&cursor);
+
+ if (segment_length > max_seg_size) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (segment_length != *seg_sizep) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Skip over the first HEADER tag. */
+ rc = tlv_rewind(&cursor);
+ rc = tlv_advance(&cursor);
+
+ while (rc == 0) {
+ if (tlv_tag(&cursor) == TLV_TAG_END) {
+ /* Check that the END tag is the one found earlier. */
+ if (cursor.current != end_tag_position)
+ goto fail10;
+ break;
+ }
+ /* Check for duplicate HEADER tags before the END tag. */
+ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ rc = tlv_advance(&cursor);
+ }
+ if (rc != 0)
+ goto fail12;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in a host memory buffer containing a TLV
+ * formatted segment. Historically partitions consisted of only one segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ uint32_t generation;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Update the TLV chain to contain the new data */
+ if ((rc = tlv_find(&cursor, tag)) == 0) {
+ /* Modify existing TLV item */
+ if ((rc = tlv_modify(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0)
+ goto fail3;
+ } else {
+ /* Insert a new TLV item before the PARTITION_TRAILER */
+ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
+ if (rc != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ if ((rc = tlv_insert(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ /* Find the trailer tag */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ /* Update PARTITION_HEADER and PARTITION_TRAILER fields */
+ *total_lengthp = tlv_block_length_used(&cursor);
+ if (*total_lengthp > max_seg_size) {
+ rc = ENOSPC;
+ goto fail7;
+ }
+ generation = __LE_TO_CPU_32(header->generation) + 1;
+
+ header->total_length = __CPU_TO_LE_32(*total_lengthp);
+ header->generation = __CPU_TO_LE_32(generation);
+ trailer->generation = __CPU_TO_LE_32(generation);
+
+ /* Recompute PARTITION_TRAILER checksum */
+ trailer->checksum = 0;
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ trailer->checksum = ~cksum + 1;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in the first segment of a TLV formatted
+ * dynamic config partition. The first segment is the current active
+ * configuration.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
+ size, B_FALSE);
+}
+
+/*
+ * Read a segment from nvram at the given offset into a buffer (segment_data)
+ * and optionally write a new tag to it.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_segment_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout caddr_t *seg_datap,
+ __inout size_t *partn_offsetp,
+ __inout size_t *src_remain_lenp,
+ __inout size_t *dest_remain_lenp,
+ __in boolean_t write)
+{
+ efx_rc_t rc;
+ efx_rc_t status;
+ size_t original_segment_size;
+ size_t modified_segment_size;
+
+ /*
+ * Read the segment from NVRAM into the segment_data buffer and validate
+ * it, returning if it does not validate. This is not a failure unless
+ * this is the first segment in a partition. In this case the caller
+ * must propagate the error.
+ */
+ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
+ *seg_datap, *src_remain_lenp);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ status = ef10_nvram_buf_segment_size(*seg_datap,
+ *src_remain_lenp, &original_segment_size);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (write) {
+ /* Update the contents of the segment in the buffer */
+ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
+ *dest_remain_lenp, tag, data, size,
+ &modified_segment_size)) != 0) {
+ goto fail3;
+ }
+ *dest_remain_lenp -= modified_segment_size;
+ *seg_datap += modified_segment_size;
+ } else {
+ /*
+ * We won't modify this segment, but still need to update the
+ * remaining lengths and pointers.
+ */
+ *dest_remain_lenp -= original_segment_size;
+ *seg_datap += original_segment_size;
+ }
+
+ *partn_offsetp += original_segment_size;
+ *src_remain_lenp -= original_segment_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in either the first segment or in all
+ * segments in a TLV formatted dynamic config partition. Dynamic config
+ * partitions on boards that support RFID are divided into a number of segments,
+ * each formatted like a partition, with header, trailer and end tags. The first
+ * segment is the current active configuration.
+ *
+ * The segments are initialised by manftest and each contain a different
+ * configuration e.g. firmware variant. The firmware can be instructed
+ * via RFID to copy a segment to replace the first segment, hence changing the
+ * active configuration. This allows ops to change the configuration of a board
+ * prior to shipment using RFID.
+ *
+ * Changes to the dynamic config may need to be written to all segments (e.g.
+ * firmware versions) or just the first segment (changes to the active
+ * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
+ * If only the first segment is written the code still needs to be aware of the
+ * possible presence of subsequent segments as writing to a segment may cause
+ * its size to increase, which would overwrite the subsequent segments and
+ * invalidate them.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments)
+{
+ size_t partn_size = 0;
+ caddr_t partn_data;
+ size_t total_length = 0;
+ efx_rc_t rc;
+ size_t current_offset = 0;
+ size_t remaining_original_length;
+ size_t remaining_modified_length;
+ caddr_t segment_data;
+
+ EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ remaining_original_length = partn_size;
+ remaining_modified_length = partn_size;
+ segment_data = partn_data;
+
+ /* Lock the partition */
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail3;
+
+ /* Iterate over each (potential) segment to update it. */
+ do {
+ boolean_t write = all_segments || current_offset == 0;
+
+ rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
+ &segment_data, &current_offset, &remaining_original_length,
+ &remaining_modified_length, write);
+ if (rc != 0) {
+ if (current_offset == 0) {
+ /*
+ * If no data has been read then the first
+ * segment is invalid, which is an error.
+ */
+ goto fail4;
+ }
+ break;
+ }
+ } while (current_offset < partn_size);
+
+ total_length = segment_data - partn_data;
+
+ /*
+ * We've run out of space. This should actually be dealt with by
+ * ef10_nvram_buf_write_tlv returning ENOSPC.
+ */
+ if (total_length > partn_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Erase the whole partition in NVRAM */
+ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write new partition contents from the buffer to NVRAM */
+ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
+ total_length)) != 0)
+ goto fail7;
+
+ /* Unlock the partition */
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Get the size of a NVRAM partition. This is the total size allocated in nvram,
+ * not the data used by the segments in the partition.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, EF10_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
+ data, chunk, mode)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * Read requests which come in through the EFX API expect to
+ * read the current, active partition.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+ uint32_t erase_size;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ &erase_size, NULL)) != 0)
+ goto fail1;
+
+ if (erase_size == 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
+ goto fail2;
+ } else {
+ if (size % erase_size != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ while (size > 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
+ erase_size)) != 0)
+ goto fail4;
+ offset += erase_size;
+ size -= erase_size;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ uint32_t write_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if (write_size != 0) {
+ /*
+ * Check that the size is a multiple of the write chunk size if
+ * the write chunk size is available.
+ */
+ if (size % write_size != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ } else {
+ write_size = EF10_NVRAM_CHUNK;
+ }
+
+ while (size > 0) {
+ chunk = MIN(size, write_size);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail3;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp)
+{
+ boolean_t reboot = B_FALSE;
+ efx_rc_t rc;
+
+ if (resultp != NULL)
+ *resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ struct tlv_partition_version partn_version;
+ size_t size;
+ efx_rc_t rc;
+
+ /* Add or modify partition version TLV item */
+ partn_version.version_w = __CPU_TO_LE_16(version[0]);
+ partn_version.version_x = __CPU_TO_LE_16(version[1]);
+ partn_version.version_y = __CPU_TO_LE_16(version[2]);
+ partn_version.version_z = __CPU_TO_LE_16(version[3]);
+
+ size = sizeof (partn_version) - (2 * sizeof (uint32_t));
+
+ /* Write the version number to all segments in the partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ TLV_TAG_PARTITION_VERSION(partn),
+ (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct ef10_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} ef10_parttbl_entry_t;
+
+/* Translate EFX NVRAM types to firmware partition types */
+static ef10_parttbl_entry_t hunt_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+};
+
+static ef10_parttbl_entry_t medford_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 1, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 2, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 3, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 4, EFX_NVRAM_UEFIROM}
+};
+
+static __checkReturn efx_rc_t
+ef10_parttbl_get(
+ __in efx_nic_t *enp,
+ __out ef10_parttbl_entry_t **parttblp,
+ __out size_t *parttbl_rowsp)
+{
+ switch (enp->en_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ *parttblp = hunt_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ *parttblp = medford_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->nvtype == type &&
+ entry->port == emip->emi_port) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_DIAG
+
+static __checkReturn efx_rc_t
+ef10_nvram_partn_to_type(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out efx_nvram_type_t *typep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT(typep != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->partn == partn &&
+ entry->port == emip->emi_port) {
+ *typep = entry->nvtype;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_type_t type;
+ unsigned int npartns = 0;
+ uint32_t *partns = NULL;
+ size_t size;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /* Read available partitions from NVRAM partition map */
+ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
+ if (partns == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
+ &npartns)) != 0) {
+ goto fail2;
+ }
+
+ for (i = 0; i < npartns; i++) {
+ /* Check if the partition is supported for this port */
+ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
+ goto fail3;
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_rc_t rc;
+
+ /* FIXME: get highest partn version from all ports */
+ /* FIXME: return partn description if available */
+
+ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
+ version, NULL, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
new file mode 100644
index 00000000..81309f29
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ boolean_t supported;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)
+ goto fail1;
+ if (supported == B_FALSE)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+out:
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ uint32_t result;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(type))
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ if (result == MC_CMD_POLL_BIST_FAILED &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN &&
+ count > EFX_BIST_MEM_ECC_FATAL) {
+ if (valuesp != NULL) {
+ valuesp[EFX_BIST_MEM_TEST] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST);
+ valuesp[EFX_BIST_MEM_ADDR] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR);
+ valuesp[EFX_BIST_MEM_BUS] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS);
+ valuesp[EFX_BIST_MEM_EXPECT] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT);
+ valuesp[EFX_BIST_MEM_ACTUAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL);
+ valuesp[EFX_BIST_MEM_ECC] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC);
+ valuesp[EFX_BIST_MEM_ECC_PARITY] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY);
+ valuesp[EFX_BIST_MEM_ECC_FATAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL);
+ }
+ value_mask |= (1 << EFX_BIST_MEM_TEST) |
+ (1 << EFX_BIST_MEM_ADDR) |
+ (1 << EFX_BIST_MEM_BUS) |
+ (1 << EFX_BIST_MEM_EXPECT) |
+ (1 << EFX_BIST_MEM_ACTUAL) |
+ (1 << EFX_BIST_MEM_ECC) |
+ (1 << EFX_BIST_MEM_ECC_PARITY) |
+ (1 << EFX_BIST_MEM_ECC_FATAL);
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_XFI_FARMI &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on EF10. */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
new file mode 100644
index 00000000..b65faedd
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in efsys_mem_t *esmp,
+ __in boolean_t disable_scatter,
+ __in uint32_t ps_bufsize)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
+ MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ int npages = EFX_RXQ_NBUFS(size);
+ int i;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ efx_rc_t rc;
+ uint32_t dma_mode;
+
+ /* If this changes, then the payload size might need to change. */
+ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
+ EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+
+ if (ps_bufsize > 0)
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,
+ INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_EXT_IN_CRC_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+ INIT_RXQ_EXT_IN_DMA_MODE,
+ dma_mode,
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+efx_mcdi_rss_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_support_t scale_support,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ uint32_t rss_context;
+ uint32_t context_type;
+ efx_rc_t rc;
+
+ if (num_queues > EFX_MAXRSS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (scale_support) {
+ case EFX_RX_SCALE_EXCLUSIVE:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
+ break;
+ case EFX_RX_SCALE_SHARED:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
+ /* NUM_QUEUES is only used to validate indirection table offsets */
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ *rss_contextp = rss_context;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_flags(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
+ (type & EFX_RX_HASH_IPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
+ (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
+ (type & EFX_RX_HASH_IPV6) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
+ (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_key(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
+ key, n);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_table(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ uint8_t *req_table;
+ int i, rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ req_table =
+ MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
+
+ for (i = 0;
+ i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
+ i++) {
+ req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+ __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+
+ if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
+ &enp->en_rss_context) == 0) {
+ /*
+ * Allocated an exclusive RSS context, which allows both the
+ * indirection table and key to be modified.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+ } else {
+ /*
+ * Failed to allocate an exclusive RSS context. Continue
+ * operation without support for RSS. The pseudo-header in
+ * received packets will not contain a Toeplitz hash value.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
+ }
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ _NOTE(ARGUNUSED(enp, buf_size))
+ return (0);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
+ EFSYS_ASSERT3U(insert, ==, B_TRUE);
+
+ if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_flags(enp,
+ enp->en_rss_context, type)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_key(enp,
+ enp->en_rss_context, key, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_table(enp,
+ enp->en_rss_context, table, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ * +00: Toeplitz hash value.
+ * (32bit little-endian)
+ * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ * (16bit big-endian)
+ * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ * (16bit big-endian)
+ * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ * (16bit little-endian)
+ * +10: MAC timestamp. Zero if timestamping is not enabled.
+ * (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+ __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ /*
+ * The RX pseudo-header contains the packet length, excluding the
+ * pseudo-header. If the hardware receive datapath was operating in
+ * cut-through mode then the length in the RX pseudo-header will be
+ * zero, and the packet length must be obtained from the DMA length
+ * reported in the RX event.
+ */
+ *lengthp = buffer[8] | (buffer[9] << 8);
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return (buffer[0] |
+ (buffer[1] << 8) |
+ (buffer[2] << 16) |
+ (buffer[3] << 24));
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ ESF_DZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+ if (pushed == wptr)
+ return;
+
+ *pushedp = wptr;
+
+ /* Push the populated descriptors out */
+ wptr &= erp->er_mask;
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_dword_t dword;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ if (rxq_state->eers_rx_packed_stream_credits == 0)
+ return;
+
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
+ ERF_DZ_RX_DESC_MAGIC_CMD,
+ ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
+ ERF_DZ_RX_DESC_MAGIC_DATA,
+ rxq_state->eers_rx_packed_stream_credits);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+
+ rxq_state->eers_rx_packed_stream_credits = 0;
+}
+
+ __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ uint16_t buf_len;
+ uint8_t *pkt_start;
+ efx_qword_t *qwordp;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ buffer += current_offset;
+ pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
+
+ qwordp = (efx_qword_t *)buffer;
+ *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
+ *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
+ buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
+
+ buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
+ EFX_RX_PACKED_STREAM_ALIGNMENT);
+ *next_offsetp =
+ current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
+
+ EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
+ EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
+
+ if ((*next_offsetp ^ current_offset) &
+ EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) {
+ if (rxq_state->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ rxq_state->eers_rx_packed_stream_credits++;
+ }
+
+ return (pkt_start);
+}
+
+
+#endif
+
+ __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(erp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+ boolean_t disable_scatter;
+ unsigned int ps_buf_size;
+
+ _NOTE(ARGUNUSED(id, erp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ case EFX_RXQ_TYPE_SCATTER:
+ ps_buf_size = 0;
+ break;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ case EFX_RXQ_TYPE_PACKED_STREAM_1M:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_512K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_256K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_128K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_64K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ break;
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ if (ps_buf_size != 0) {
+ /* Check if datapath firmware supports packed stream mode */
+ if (encp->enc_rx_packed_stream_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /* Check if packed stream allows configurable buffer sizes */
+ if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) &&
+ (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
+ rc = ENOTSUP;
+ goto fail5;
+ }
+ }
+#else /* EFSYS_OPT_RX_PACKED_STREAM */
+ EFSYS_ASSERT(ps_buf_size == 0);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ /* Scatter can only be disabled if the firmware supports doing so */
+ if (type == EFX_RXQ_TYPE_SCATTER)
+ disable_scatter = B_FALSE;
+ else
+ disable_scatter = encp->enc_rx_disable_scatter_supported;
+
+ if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
+ esmp, disable_scatter, ps_buf_size)) != 0)
+ goto fail6;
+
+ erp->er_eep = eep;
+ erp->er_label = label;
+
+ ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+#if EFSYS_OPT_RX_PACKED_STREAM
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_evq_t *eep = erp->er_eep;
+ unsigned int label = erp->er_label;
+
+ ef10_ev_rxlabel_fini(eep, label);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+ef10_rx_fini(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
+ }
+ enp->en_rss_context = 0;
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+#else
+ _NOTE(ARGUNUSED(enp))
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h
new file mode 100644
index 00000000..7d099b81
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -0,0 +1,941 @@
+/**************************************************************************\
+*//*! \file
+** <L5_PRIVATE L5_SOURCE>
+** \author mjs
+** \brief TLV item layouts for EF10 static and dynamic config in NVRAM
+** \date 2012/11/20
+** \cop (c) Solarflare Communications Inc.
+** </L5_PRIVATE>
+*//*
+\**************************************************************************/
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding. (Big-endian host systems will require some byte-swapping.)
+ *
+ * -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ * -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ * - L is a location, indicating where this tag is expected to be found:
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
+ *
+ * - TTT is a type, which is just a unique value. The same type value
+ * might appear in both locations, indicating a relationship between
+ * the items (e.g. static and dynamic VPD below).
+ *
+ * - NNNN is an index of some form. Some item types are per-port, some
+ * are per-PF, some are per-partition-type.
+ *
+ * -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary. (No, technically this does not
+ * absolutely guarantee portability. But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ * General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP (0x00000000)
+#define TLV_TAG_INVALID (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A)
+
+struct tlv_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+ uint16_t preset;
+ uint32_t generation;
+ uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER (0xEF101A57)
+
+struct tlv_partition_trailer {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t generation;
+ uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0. (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+ uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ * Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000)
+
+struct tlv_global_capabilities {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port))
+
+struct tlv_port_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved;
+ uint16_t count;
+ uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change. It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000)
+
+struct tlv_global_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI (0x00210000)
+
+struct tlv_global_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM. For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t subtype;
+ uint8_t description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM. This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t version_w;
+ uint16_t version_x;
+ uint16_t version_y;
+ uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */
+#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vfs_total;
+ uint8_t port_allocation;
+ uint16_t vectors_per_pf;
+ uint16_t vectors_per_vf;
+ uint8_t pf_bar0_aperture;
+ uint8_t pf_bar2_aperture;
+ uint8_t vf_bar0_aperture;
+ uint8_t vf_base;
+ uint16_t supp_pagesz;
+ uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+ uint32_t tag;
+ uint32_t length;
+ /* Consumed by dpcpu.c */
+ uint64_t tx0_tags; /* Bitmap */
+ uint64_t tx1_tags; /* Bitmap */
+ uint64_t dl_tags; /* Bitmap */
+ uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */
+ /* Consumed by features.c */
+ uint32_t dut_features; /* All 1s -> leave alone */
+ int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* Consumed by clocks_hunt.c */
+ int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+ int8_t rx_dc_size; /* -1 -> leave alone */
+ int8_t tx_dc_size;
+ int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000)
+
+struct tlv_global_port_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t ports_per_core;
+ uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000)
+
+struct tlv_firmware_options {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step. Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+ uint16_t target_voltage; /* In millivolts */
+ /* Since the limits are meant to be centred to the target (and must at least
+ * contain it) they need setting as well. */
+ uint16_t warn_low; /* In millivolts */
+ uint16_t warn_high; /* In millivolts */
+ uint16_t panic_low; /* In millivolts */
+ uint16_t panic_high; /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_icore; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000)
+
+struct tlv_clock_config_medford {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_mc; /* MHz */
+ uint16_t clk_rmon; /* MHz */
+ uint16_t clk_vswitch; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous. Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC (0x000e0000)
+
+struct tlv_global_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved1;
+ uint16_t count;
+ uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t millivolts;
+ uint16_t reserved;
+};
+
+/* Factory settings for amplitude calibration of the PCIE TX serdes */
+#define TLV_TAG_TX_PCIE_AMP_CONFIG (0x00220000)
+struct tlv_pcie_tx_amp_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t quad_tx_imp2k[4];
+ uint8_t quad_tx_imp50[4];
+ uint8_t lane_amp[16];
+};
+
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely). */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t visible_pfs; /**< Bitmap of visible PFs */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000)
+
+struct tlv_global_port_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
+#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
+#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
+#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
+#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
+#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE (0)
+#define TLV_VSWITCH_TYPE_VLAN (1)
+#define TLV_VSWITCH_TYPE_VEB (2)
+#define TLV_VSWITCH_TYPE_VEPA (3)
+#define TLV_VSWITCH_TYPE_MUX (4)
+#define TLV_VSWITCH_TYPE_TEST (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET (0x10140000)
+
+struct tlv_0v9_atb_offset {
+ uint32_t tag;
+ uint32_t length;
+ int16_t offset_millivolts;
+ uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask { /* legacy structure - do not use */
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000)
+
+struct tlv_privilege_mask_add {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000)
+
+struct tlv_privilege_mask_rem {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port) (0x10170000 + (port))
+
+struct tlv_pfiov {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t pfiov;
+#define TLV_PFIOV_OFF (0) /* Default */
+#define TLV_PFIOV_ON (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t rx_desc_cache_size;
+ uint8_t tx_desc_cache_size;
+ uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t gen; /* Target PCIe generation: 1, 2, 3 */
+ uint16_t width; /* Number of lanes */
+};
+
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000)
+struct tlv_tx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+ uint32_t qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t data[];
+} tlv_license_t;
+
+/* TSA NIC IP address configuration
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+
+#define TLV_TSAN_IP_MODE_STATIC (0)
+#define TLV_TSAN_IP_MODE_DHCP (1)
+#define TLV_TSAN_IP_MODE_SNOOP (2)
+typedef struct tlv_tsan_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+ uint32_t ip;
+ uint32_t netmask;
+ uint32_t gateway;
+ uint32_t port;
+ uint32_t bind_retry; /* DEPRECATED */
+ uint32_t bind_bkout; /* DEPRECATED */
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t num_tsacs;
+ uint32_t ip[TLV_MAX_TSACS];
+ uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+
+typedef struct tlv_binding_ticket {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key (DEPRECATED)
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF (0x10250000) /* DEPRECATED */
+
+typedef struct tlv_pik_sf {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+
+typedef struct tlv_ca_root_cert {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_ca_root_cert_t;
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10270000)
+struct tlv_tx_vfifo_ull_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0
+};
+
+/* BIU mode
+ *
+ * Medford2 tag for selecting VI window decode (see values below)
+ */
+#define TLV_TAG_BIU_VI_WINDOW_MODE (0x10280000)
+struct tlv_biu_vi_window_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_BIU_VI_WINDOW_MODE_8K 0 /* 8k per VI, CTPIO not mapped, medford/hunt compatible */
+#define TLV_BIU_VI_WINDOW_MODE_16K 1 /* 16k per VI, CTPIO mapped */
+#define TLV_BIU_VI_WINDOW_MODE_64K 2 /* 64k per VI, CTPIO mapped, POWER-friendly */
+};
+
+/* FastPD mode
+ *
+ * Medford2 tag for configuring the FastPD mode (see values below)
+ */
+#define TLV_TAG_FASTPD_MODE(port) (0x10290000 + (port))
+struct tlv_fastpd_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_FASTPD_MODE_SOFT_ALL 0 /* All packets to the SoftPD */
+#define TLV_FASTPD_MODE_FAST_ALL 1 /* All packets to the FastPD */
+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
+};
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
new file mode 100644
index 00000000..0f8e9b1b
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+ EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+
+ npages = EFX_TXQ_NBUFS(size);
+ if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_qword_t desc;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id))
+
+ if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
+ esmp)) != 0)
+ goto fail1;
+
+ /*
+ * A previous user of this TX queue may have written a descriptor to the
+ * TX push collector, but not pushed the doorbell (e.g. after a crash).
+ * The next doorbell write would then push the stale descriptor.
+ *
+ * Ensure the (per network port) TX push collector is cleared by writing
+ * a no-op TX option descriptor. See bug29981 for details.
+ */
+ *addedp = 1;
+ EFX_POPULATE_QWORD_4(desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ ef10_tx_qpush(etp, *addedp, 0);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_piobuf_handle_t handle;
+ efx_rc_t rc;
+
+ if (etp->et_pio_size != 0) {
+ rc = EALREADY;
+ goto fail1;
+ }
+
+ /* Sub-allocate a PIO block from a piobuf */
+ if ((rc = ef10_nic_pio_alloc(enp,
+ &etp->et_pio_bufnum,
+ &handle,
+ &etp->et_pio_blknum,
+ &etp->et_pio_offset,
+ &etp->et_pio_size)) != 0) {
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+ /* Link the piobuf to this TXQ */
+ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+ goto fail3;
+ }
+
+ /*
+ * et_pio_offset is the offset of the sub-allocated block within the
+ * hardware PIO buffer. It is used as the buffer address in the PIO
+ * option descriptor.
+ *
+ * et_pio_write_offset is the offset of the sub-allocated block from the
+ * start of the write-combined memory mapping, and is used for writing
+ * data into the PIO buffer.
+ */
+ etp->et_pio_write_offset =
+ (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+ ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+fail2:
+ EFSYS_PROBE(fail2);
+ etp->et_pio_size = 0;
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+
+ if (etp->et_pio_size != 0) {
+ /* Unlink the piobuf from this TXQ */
+ ef10_nic_pio_unlink(enp, etp->et_index);
+
+ /* Free the sub-allocated PIO block */
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+ etp->et_pio_write_offset = 0;
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(length) uint8_t *buffer,
+ __in size_t length,
+ __in size_t offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efsys_bar_t *esbp = enp->en_esbp;
+ uint32_t write_offset;
+ uint32_t write_offset_limit;
+ efx_qword_t *eqp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ if (offset + length > etp->et_pio_size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /*
+ * Writes to PIO buffers must be 64 bit aligned, and multiples of
+ * 64 bits.
+ */
+ write_offset = etp->et_pio_write_offset + offset;
+ write_offset_limit = write_offset + length;
+ eqp = (efx_qword_t *)buffer;
+ while (write_offset < write_offset_limit) {
+ EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+ eqp++;
+ write_offset += sizeof (efx_qword_t);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_qword_t pio_desc;
+ unsigned int id;
+ size_t offset;
+ unsigned int added = *addedp;
+ efx_rc_t rc;
+
+
+ if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+ unsigned int, id, uint32_t, etp->et_pio_offset,
+ size_t, pkt_length);
+
+ EFX_POPULATE_QWORD_5(pio_desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, 1,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+ ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+ *addedp = added;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t addr = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ boolean_t eop = ebp->eb_eop;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t qword;
+
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <=
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+ unsigned int, id, efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(qword,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * This improves performance by pushing a TX descriptor at the same time as the
+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the
+ * hardware decides not to use the pushed descriptor.
+ */
+ void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ unsigned int wptr;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ wptr = added & etp->et_mask;
+ id = pushed & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
+ &oword);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+ void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint8_t, tcp_flags);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+ void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint16_t, tcp_mss);
+
+ EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+ EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+ void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+ uint16_t, tci);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_VLAN,
+ ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+ ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c
new file mode 100644
index 00000000..71123a90
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_vpd.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp)
+{
+ caddr_t svpd;
+ size_t svpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_STATIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_STATIC_VPD(pci_pf);
+ }
+
+ /*
+ * The VPD interface exposes VPD resources from the combined static and
+ * dynamic VPD storage. As the static VPD configuration should *never*
+ * change, we can cache it.
+ */
+ svpd = NULL;
+ svpd_size = 0;
+ rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_STATIC_CONFIG,
+ tag, &svpd, &svpd_size);
+ if (rc != 0) {
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot access VPD */
+ goto out;
+ }
+ goto fail1;
+ }
+
+ if (svpd != NULL && svpd_size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_arch.ef10.ena_svpd = svpd;
+ enp->en_arch.ef10.ena_svpd_length = svpd_size;
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd,
+ * which is the size of the DYNAMIC_CONFIG partition.
+ */
+ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ sizep, NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ caddr_t dvpd;
+ size_t dvpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ if ((rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, &dvpd, &dvpd_size)) != 0)
+ goto fail1;
+
+ if (dvpd_size > size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+ memcpy(data, dvpd, dvpd_size);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + dvpd_size, 0xff, size - dvpd_size);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. ef10_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create an ID string if the dynamic cfg doesn't have one
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_arch.ef10.ena_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t vpd_length;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ /* Determine total length of new dynamic VPD */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, data, vpd_length, B_TRUE)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
+ enp->en_arch.ef10.ena_svpd);
+
+ enp->en_arch.ef10.ena_svpd = NULL;
+ enp->en_arch.ef10.ena_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
new file mode 100644
index 00000000..7eabc370
--- /dev/null
+++ b/drivers/net/sfc/base/efx.h
@@ -0,0 +1,2535 @@
+/*
+ * Copyright (c) 2006-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+#include "efx_check.h"
+#include "efx_phy_ids.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) \
+ ((void)sizeof(char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) \
+ (sizeof(_array) / sizeof((_array)[0]))
+
+#define EFX_FIELD_OFFSET(_type, _field) \
+ ((size_t) &(((_type *)0)->_field))
+
+/* Return codes */
+
+typedef __success(return == 0) int efx_rc_t;
+
+
+/* Chip families */
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_HUNTINGTON,
+ EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp);
+
+
+#define EFX_PCI_VENID_SFC 0x1924
+
+#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */
+
+#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */
+#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901
+#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */
+#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */
+
+#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */
+#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */
+
+#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913
+#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
+#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
+
+#define EFX_MEM_BAR 2
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+extern __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length);
+
+
+/* Type prototypes */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+extern __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#define EFX_PCIE_LINK_SPEED_GEN1 1
+#define EFX_PCIE_LINK_SPEED_GEN2 2
+#define EFX_PCIE_LINK_SPEED_GEN3 3
+
+typedef enum efx_pcie_link_performance_e {
+ EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY,
+ EFX_PCIE_LINK_PERFORMANCE_OPTIMAL
+} efx_pcie_link_performance_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp);
+
+#if EFSYS_OPT_MCDI
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/* Huntington and Medford require MCDIv2 commands */
+#define WITH_MCDI_V2 1
+#endif
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+#if EFSYS_OPT_MCDI_LOGGING
+typedef enum efx_log_msg_e {
+ EFX_LOG_INVALID,
+ EFX_LOG_MCDI_REQUEST,
+ EFX_LOG_MCDI_RESPONSE,
+} efx_log_msg_t;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ efsys_mem_t *emt_dma_mem;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+#if EFSYS_OPT_MCDI_LOGGING
+ void (*emt_logger)(void *, efx_log_msg_t,
+ void *, size_t, void *, size_t);
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t);
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+} efx_mcdi_transport_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *usec_timeoutp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_PM_TRUNC_BB_OVERFLOW,
+ EFX_MAC_PM_DISCARD_BB_OVERFLOW,
+ EFX_MAC_PM_TRUNC_VFIFO_FULL,
+ EFX_MAC_PM_DISCARD_VFIFO_FULL,
+ EFX_MAC_PM_TRUNC_QBB,
+ EFX_MAC_PM_DISCARD_QBB,
+ EFX_MAC_PM_DISCARD_MAPPING,
+ EFX_MAC_RXDP_Q_DISABLED_PKTS,
+ EFX_MAC_RXDP_DI_DROPPED_PKTS,
+ EFX_MAC_RXDP_STREAMING_PKTS,
+ EFX_MAC_RXDP_HLB_FETCH,
+ EFX_MAC_RXDP_HLB_WAIT,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_RX_BAD_BYTES,
+ EFX_MAC_VADAPTER_RX_OVERFLOW,
+ EFX_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_TX_BAD_BYTES,
+ EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_40000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_ADDR_LEN 6
+
+#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
+
+#define EFX_MAC_MULTICAST_LIST_MAX 256
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU_ADJUSTMENT \
+ (/* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16) \
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8)
+
+/*
+ * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give
+ * the SDU rounded up slightly.
+ */
+#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT)
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst);
+
+extern __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t))
+
+#define EFX_MAC_STATS_MASK_NPAGES \
+ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \
+ EFX_MAC_STATS_MASK_BITS_PER_PAGE)
+
+/*
+ * Get mask of MAC statistics supported by the hardware.
+ *
+ * If mask_size is insufficient to return the mask, EINVAL error is
+ * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page
+ * (which is sizeof (uint32_t)) is sufficient.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \
+ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
+ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
+
+#define EFX_MAC_STATS_SIZE 0x400
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
+ * and page aligned.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_SFC90X0,
+ EFX_MON_SFC91X0,
+ EFX_MON_SFC92X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_PAGE_SIZE 0x100
+#define EFX_MON_MASK_ELEMENT_SIZE 32
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_STAT_1_2VA,
+ EFX_MON_STAT_VREF,
+ EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_AOE_TEMP,
+ EFX_MON_STAT_PSU_AOE_TEMP,
+ EFX_MON_STAT_PSU_TEMP,
+ EFX_MON_STAT_FAN0,
+ EFX_MON_STAT_FAN1,
+ EFX_MON_STAT_FAN2,
+ EFX_MON_STAT_FAN3,
+ EFX_MON_STAT_FAN4,
+ EFX_MON_STAT_VAOE_IN,
+ EFX_MON_STAT_IAOE,
+ EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_NIC_POWER,
+ EFX_MON_STAT_0_9V,
+ EFX_MON_STAT_I0_9V,
+ EFX_MON_STAT_I1_2V,
+ EFX_MON_STAT_0_9V_ADC,
+ EFX_MON_STAT_INT_TEMP2,
+ EFX_MON_STAT_VREG_TEMP,
+ EFX_MON_STAT_VREG_0_9V_TEMP,
+ EFX_MON_STAT_VREG_1_2V_TEMP,
+ EFX_MON_STAT_INT_VPTAT,
+ EFX_MON_STAT_INT_ADC_TEMP,
+ EFX_MON_STAT_EXT_VPTAT,
+ EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_AMBIENT_TEMP,
+ EFX_MON_STAT_AIRFLOW,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
+ EFX_MON_STAT_HOTPOINT_TEMP,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_MUM_VCC,
+ EFX_MON_STAT_0V9_A,
+ EFX_MON_STAT_I0V9_A,
+ EFX_MON_STAT_0V9_A_TEMP,
+ EFX_MON_STAT_0V9_B,
+ EFX_MON_STAT_I0V9_B,
+ EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_SODIMM_VOUT,
+ EFX_MON_STAT_SODIMM_0_TEMP,
+ EFX_MON_STAT_SODIMM_1_TEMP,
+ EFX_MON_STAT_PHY0_VCC,
+ EFX_MON_STAT_PHY1_VCC,
+ EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+ EFX_MON_STAT_STATE_NO_READING = 4,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_s {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+extern __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_XPORT = 18,
+ EFX_LOOPBACK_XGMII_WS = 19,
+ EFX_LOOPBACK_XAUI_WS = 20,
+ EFX_LOOPBACK_XAUI_WS_FAR = 21,
+ EFX_LOOPBACK_XAUI_WS_NEAR = 22,
+ EFX_LOOPBACK_GMII_WS = 23,
+ EFX_LOOPBACK_XFI_WS = 24,
+ EFX_LOOPBACK_XFI_WS_FAR = 25,
+ EFX_LOOPBACK_PHYXS_WS = 26,
+ EFX_LOOPBACK_PMA_INT = 27,
+ EFX_LOOPBACK_SD_NEAR = 28,
+ EFX_LOOPBACK_SD_FAR = 29,
+ EFX_LOOPBACK_PMA_INT_WS = 30,
+ EFX_LOOPBACK_SD_FEP2_WS = 31,
+ EFX_LOOPBACK_SD_FEP1_5_WS = 32,
+ EFX_LOOPBACK_SD_FEP_WS = 33,
+ EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+typedef enum efx_loopback_kind_e {
+ EFX_LOOPBACK_KIND_OFF = 0,
+ EFX_LOOPBACK_KIND_ALL,
+ EFX_LOOPBACK_KIND_MAC,
+ EFX_LOOPBACK_KIND_PHY,
+ EFX_LOOPBACK_NKINDS
+} efx_loopback_kind_t;
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_QSFP_PLUS,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/* Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+extern efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+typedef enum efx_bist_type_e {
+ EFX_BIST_TYPE_UNKNOWN,
+ EFX_BIST_TYPE_PHY_NORMAL,
+ EFX_BIST_TYPE_PHY_CABLE_SHORT,
+ EFX_BIST_TYPE_PHY_CABLE_LONG,
+ EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/
+ EFX_BIST_TYPE_REG, /* Test the register memories */
+ EFX_BIST_TYPE_NTYPES,
+} efx_bist_type_t;
+
+typedef enum efx_bist_result_e {
+ EFX_BIST_RESULT_UNKNOWN,
+ EFX_BIST_RESULT_RUNNING,
+ EFX_BIST_RESULT_PASSED,
+ EFX_BIST_RESULT_FAILED,
+} efx_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_bist_value_e {
+ EFX_BIST_PHY_CABLE_LENGTH_A,
+ EFX_BIST_PHY_CABLE_LENGTH_B,
+ EFX_BIST_PHY_CABLE_LENGTH_C,
+ EFX_BIST_PHY_CABLE_LENGTH_D,
+ EFX_BIST_PHY_CABLE_STATUS_A,
+ EFX_BIST_PHY_CABLE_STATUS_B,
+ EFX_BIST_PHY_CABLE_STATUS_C,
+ EFX_BIST_PHY_CABLE_STATUS_D,
+ EFX_BIST_FAULT_CODE,
+ /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response. */
+ EFX_BIST_MEM_TEST,
+ EFX_BIST_MEM_ADDR,
+ EFX_BIST_MEM_BUS,
+ EFX_BIST_MEM_EXPECT,
+ EFX_BIST_MEM_ACTUAL,
+ EFX_BIST_MEM_ECC,
+ EFX_BIST_MEM_ECC_PARITY,
+ EFX_BIST_MEM_ECC_FATAL,
+ EFX_BIST_NVALUES,
+} efx_bist_value_t;
+
+extern __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+#define EFX_FEATURE_TURBO 0x00000100
+#define EFX_FEATURE_MCDI_DMA 0x00000200
+#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400
+#define EFX_FEATURE_PIO_BUFFERS 0x00000800
+#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
+#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
+#define EFX_FEATURE_PACKED_STREAM 0x00004000
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_dma_buf_size;
+ uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
+#endif
+ unsigned int enc_features;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port; /* PHY port number */
+ uint32_t enc_intr_vec_base;
+ uint32_t enc_intr_limit;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_txq_max_ndescs;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_piobuf_limit;
+ uint32_t enc_piobuf_size;
+ uint32_t enc_piobuf_min_alloc_size;
+ uint32_t enc_evq_timer_quantum_ns;
+ uint32_t enc_evq_timer_max_us;
+ uint32_t enc_clk_mult;
+ uint32_t enc_rx_prefix_size;
+ uint32_t enc_rx_buf_align_start;
+ uint32_t enc_rx_buf_align_end;
+#if EFSYS_OPT_LOOPBACK
+ efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MCDI
+ uint8_t enc_mcdi_mdio_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_mcdi_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MON_STATS
+ uint32_t *enc_mcdi_sensor_maskp;
+ uint32_t enc_mcdi_sensor_mask_size;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_BIST */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ uint32_t enc_pf;
+ uint32_t enc_vf;
+ uint32_t enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+ boolean_t enc_bug26807_workaround;
+ boolean_t enc_bug35388_workaround;
+ boolean_t enc_bug41750_workaround;
+ boolean_t enc_bug61265_workaround;
+ boolean_t enc_rx_batching_enabled;
+ /* Maximum number of descriptors completed in an rx event. */
+ uint32_t enc_rx_batch_max;
+ /* Number of rx descriptors the hardware requires for a push. */
+ uint32_t enc_rx_push_align;
+ /* Maximum amount of data in DMA descriptor */
+ uint32_t enc_tx_dma_desc_size_max;
+ /*
+ * Boundary which DMA descriptor data must not cross or 0 if no
+ * limitation.
+ */
+ uint32_t enc_tx_dma_desc_boundary;
+ /*
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint32_t enc_tx_tso_tcp_header_offset_limit;
+ boolean_t enc_fw_assisted_tso_enabled;
+ boolean_t enc_fw_assisted_tso_v2_enabled;
+ /* Number of TSO contexts on the NIC (FATSOv2) */
+ uint32_t enc_fw_assisted_tso_v2_n_contexts;
+ boolean_t enc_hw_tx_insert_vlan_enabled;
+ /* Number of PFs on the NIC */
+ uint32_t enc_hw_pf_count;
+ /* Datapath firmware vadapter/vport/vswitch support */
+ boolean_t enc_datapath_cap_evb;
+ boolean_t enc_rx_disable_scatter_supported;
+ boolean_t enc_allow_set_mac_with_installed_filters;
+ boolean_t enc_enhanced_set_mac_supported;
+ boolean_t enc_init_evq_v2_supported;
+ boolean_t enc_rx_packed_stream_supported;
+ boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_pm_and_rxdp_counters;
+ boolean_t enc_mac_stats_40g_tx_size_bins;
+ /* External port identifier */
+ uint8_t enc_external_port;
+ uint32_t enc_mcdi_max_payload_length;
+ /* VPD may be per-PF or global */
+ boolean_t enc_vpd_is_global;
+ /* Minimum unidirectional bandwidth in Mb/s to max out all ports */
+ uint32_t enc_required_pcie_bandwidth_mbps;
+ uint32_t enc_max_pcie_link_gen;
+ /* Firmware verifies integrity of NVRAM updates */
+ uint32_t enc_fw_verified_nvram_update_required;
+} efx_nic_cfg_t;
+
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+
+#define EFX_PCI_FUNCTION(_encp) \
+ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
+
+#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf)
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+typedef struct efx_nic_fw_info_s {
+ /* Basic FW version information */
+ uint16_t enfi_mc_fw_version[4];
+ /*
+ * If datapath capabilities can be detected,
+ * additional FW information is to be shown
+ */
+ boolean_t enfi_dpcpu_fw_ids_valid;
+ /* Rx and Tx datapath CPU FW IDs */
+ uint16_t enfi_rx_dpcpu_fw_id;
+ uint16_t enfi_tx_dpcpu_fw_id;
+} efx_nic_fw_info_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip);
+
+/* Driver resource limits (minimum required/maximum usable). */
+typedef struct efx_drv_limits_s {
+ uint32_t edl_min_evq_count;
+ uint32_t edl_max_evq_count;
+
+ uint32_t edl_min_rxq_count;
+ uint32_t edl_max_rxq_count;
+
+ uint32_t edl_min_txq_count;
+ uint32_t edl_max_txq_count;
+
+ /* PIO blocks (sub-allocated from piobuf) */
+ uint32_t edl_min_pio_alloc_size;
+ uint32_t edl_max_pio_alloc_count;
+} efx_drv_limits_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+typedef enum efx_nic_region_e {
+ EFX_REGION_VI, /* Memory BAR UC mapping */
+ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */
+} efx_nic_region_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp);
+
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_FPGA,
+ EFX_NVRAM_FCFW,
+ EFX_NVRAM_CPLD,
+ EFX_NVRAM_FPGA_BACKUP,
+ EFX_NVRAM_DYNAMIC_CFG,
+ EFX_NVRAM_LICENSE,
+ EFX_NVRAM_UEFIROM,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+/* Report size and offset of bootcfg sector in NVRAM partition. */
+extern __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep);
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+extern efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors);
+
+extern efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_BATCH,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_MNT,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+#define EFX_EVQ_FLAGS_TYPE_MASK (0x3)
+#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0)
+#define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1)
+#define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2)
+
+#define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC)
+#define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */
+#define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */
+
+extern __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_PKT_PREFIX_LEN 0x2000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+/*
+ * The following flags are used only for packed stream
+ * mode. The values for the flags are reused to fit into 16 bit,
+ * since EFX_PKT_START and EFX_PKT_CONT are never used in
+ * packed stream mode
+ */
+#define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START
+#define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT
+
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Packed stream mode is documented in SF-112241-TC.
+ * The general idea is that, instead of putting each incoming
+ * packet into a separate buffer which is specified in a RX
+ * descriptor, a large buffer is provided to the hardware and
+ * packets are put there in a continuous stream.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently.
+ */
+
+typedef __checkReturn boolean_t
+(*efx_rx_ps_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t pkt_count,
+ __in uint16_t flags);
+
+#endif
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+#define EFX_EXCEPTION_RX_ERROR 0x00000007
+#define EFX_EXCEPTION_TX_ERROR 0x00000008
+#define EFX_EXCEPTION_EV_ERROR 0x00000009
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t txq_index);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation
+ );
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ efx_rx_ps_ev_t eec_rx_ps;
+#endif
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int usecs,
+ __out unsigned int *ticksp);
+
+extern __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ
+} efx_rx_hash_alg_t;
+
+#define EFX_RX_HASH_IPV4 (1U << 0)
+#define EFX_RX_HASH_TCPIPV4 (1U << 1)
+#define EFX_RX_HASH_IPV6 (1U << 2)
+#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+
+typedef unsigned int efx_rx_hash_type_t;
+
+typedef enum efx_rx_hash_support_e {
+ EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
+ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
+} efx_rx_hash_support_t;
+
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+typedef enum efx_rx_scale_support_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */
+ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
+ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
+} efx_rx_scale_support_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp);
+
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *pkt_lengthp);
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_SCATTER,
+ EFX_RXQ_TYPE_PACKED_STREAM_1M,
+ EFX_RXQ_TYPE_PACKED_STREAM_512K,
+ EFX_RXQ_TYPE_PACKED_STREAM_256K,
+ EFX_RXQ_TYPE_PACKED_STREAM_128K,
+ EFX_RXQ_TYPE_PACKED_STREAM_64K,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+typedef struct efx_desc_s {
+ efx_qword_t ed_eq;
+} efx_desc_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Fake length for RXQ descriptors in packed stream mode
+ * to make hardware happy
+ */
+#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
+
+extern void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_POST_PIO,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
+
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+
+extern __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+/* Number of FATSOv2 option descriptors */
+#define EFX_TX_FATSOV2_OPT_NDESCS 2
+
+/* Maximum number of DMA segments per TSO packet (not superframe) */
+#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24
+
+extern void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+#define EFX_ETHER_TYPE_IPV4 0x0800
+#define EFX_ETHER_TYPE_IPV6 0x86DD
+
+#define EFX_IPPROTO_TCP 6
+#define EFX_IPPROTO_UDP 17
+
+/* Use RSS to spread across multiple queues */
+#define EFX_FILTER_FLAG_RX_RSS 0x01
+/* Enable RX scatter */
+#define EFX_FILTER_FLAG_RX_SCATTER 0x02
+/*
+ * Override an automatic filter (priority EFX_FILTER_PRI_AUTO).
+ * May only be set by the filter implementation for each type.
+ * A removal request will restore the automatic filter in its place.
+ */
+#define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04
+/* Filter is for RX */
+#define EFX_FILTER_FLAG_RX 0x08
+/* Filter is for TX */
+#define EFX_FILTER_FLAG_TX 0x10
+
+typedef unsigned int efx_filter_flags_t;
+
+typedef enum efx_filter_match_flags_e {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
+ * address */
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host
+ * address */
+ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */
+ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */
+ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
+ * protocol */
+ /* Match otherwise-unmatched multicast and broadcast packets */
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000,
+ /* Match otherwise-unmatched unicast packets */
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST = 0x80000000,
+} efx_filter_match_flags_t;
+
+typedef enum efx_filter_priority_s {
+ EFX_FILTER_PRI_HINT = 0, /* Performance hint */
+ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device
+ * address list or hardware
+ * requirements. This may only be used
+ * by the filter implementation for
+ * each NIC type. */
+ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */
+ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the
+ * client (e.g. SR-IOV, HyperV VMQ etc.)
+ */
+} efx_filter_priority_t;
+
+/*
+ * FIXME: All these fields are assumed to be in little-endian byte order.
+ * It may be better for some to be big-endian. See bug42804.
+ */
+
+typedef struct efx_filter_spec_s {
+ uint32_t efs_match_flags;
+ uint32_t efs_priority:2;
+ uint32_t efs_flags:6;
+ uint32_t efs_dmaq_id:12;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
+} efx_filter_spec_t;
+
+
+/* Default values for use in filter specifications */
+#define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff
+#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
+#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
+
+extern __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp);
+
+extern void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec);
+
+#endif /* EFSYS_OPT_FILTER */
+
+/* HASH */
+
+extern __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init);
+
+extern __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init);
+
+#if EFSYS_OPT_LICENSING
+
+/* LICENSING */
+
+typedef struct efx_key_stats_s {
+ uint32_t eks_valid;
+ uint32_t eks_invalid;
+ uint32_t eks_blacklisted;
+ uint32_t eks_unverifiable;
+ uint32_t eks_wrong_node;
+ uint32_t eks_licensed_apps_lo;
+ uint32_t eks_licensed_apps_hi;
+ uint32_t eks_licensed_features_lo;
+ uint32_t eks_licensed_features_hi;
+} efx_key_stats_t;
+
+extern __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_lic_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *ksp);
+
+extern __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp);
+
+
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_LICENSING */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
new file mode 100644
index 00000000..d589c86a
--- /dev/null
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * NOTE: This is larger than the Medford per-PF bootcfg sector.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+/* Medford per-PF bootcfg sector */
+#define BOOTCFG_PER_PF 0x800
+#define BOOTCFG_PF_COUNT 16
+
+#define DHCP_END ((uint8_t)0xff)
+#define DHCP_PAD ((uint8_t)0)
+
+
+/* Report the layout of bootcfg sectors in NVRAM partition. */
+ __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep)
+{
+ uint32_t count;
+ size_t max_size;
+ size_t offset;
+ int rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE);
+
+ if (sector_countp != NULL)
+ *sector_countp = count;
+ *offsetp = offset;
+ *max_sizep = max_size;
+
+ return (0);
+
+#if EFSYS_OPT_MEDFORD
+fail2:
+ EFSYS_PROBE(fail2);
+#endif
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn efx_rc_t
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size,
+ __out_opt size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ efx_rc_t rc;
+
+ /* Start parsing tags immediately after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+ efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors)
+{
+ size_t used_bytes;
+ efx_rc_t rc;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, sector, sector_length,
+ &used_bytes);
+
+ if (!handle_format_errors) {
+ if (rc != 0)
+ goto fail1;
+
+ if ((used_bytes < 2) ||
+ (sector[used_bytes - 1] != DHCP_END)) {
+ /* Block too short, or DHCP_END missing */
+ rc = ENOENT;
+ goto fail2;
+ }
+ }
+
+ /* Synthesize empty format on verification failure */
+ if (rc != 0 || used_bytes == 0) {
+ sector[0] = 0;
+ sector[1] = DHCP_END;
+ used_bytes = 2;
+ }
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+ EFSYS_ASSERT(sector_length >= 2);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does.
+ * Reinitialise the sector if there isn't room for the character.
+ */
+ if (sector[used_bytes - 1] != DHCP_END) {
+ if (used_bytes >= sector_length) {
+ sector[0] = 0;
+ used_bytes = 1;
+ }
+ sector[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the target buffer is large enough for the
+ * entire used bootcfg area, then copy into the target buffer.
+ */
+ if (used_bytes > data_size) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(data, sector, used_bytes);
+
+ /* Zero out the unused portion of the target buffer */
+ if (used_bytes < data_size)
+ (void) memset(data + used_bytes, 0, data_size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, data_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /*
+ * We need to read the entire BOOTCFG sector to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail5;
+
+ if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ sector_offset, (caddr_t)payload, sector_length)) != 0) {
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+ goto fail6;
+ }
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail7;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = (uint8_t)~DHCP_END;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes + 1 > sector_length) {
+ payload[0] = 0;
+ used_bytes = 1;
+ }
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ if (sector_length > size) {
+ memcpy(data, payload, used_bytes);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *partn_data;
+ uint8_t checksum;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ size_t used_bytes;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail4;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) {
+ /* Block too short or DHCP_END missing */
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /*
+ * If the BOOTCFG sector is stored in a shared partition, then we must
+ * read the whole partition and insert the updated bootcfg sector at the
+ * correct offset.
+ */
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail7;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+ if (rc != 0)
+ goto fail8;
+
+ /* Read the entire partition */
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)partn_data, partn_length);
+ if (rc != 0)
+ goto fail9;
+
+ /*
+ * Insert the BOOTCFG sector into the partition, Zero out all data after
+ * the DHCP_END tag, and adjust the checksum.
+ */
+ (void) memset(partn_data + sector_offset, 0x0, sector_length);
+ (void) memcpy(partn_data + sector_offset, data, used_bytes);
+
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ partn_data[sector_offset] -= checksum;
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail10;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ 0, (caddr_t)partn_data, partn_length)) != 0)
+ goto fail11;
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail12;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+fail8:
+ EFSYS_PROBE(fail8);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h
new file mode 100644
index 00000000..c8548c04
--- /dev/null
+++ b/drivers/net/sfc/base/efx_check.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_CHECK_H
+#define _SYS_EFX_CHECK_H
+
+#include "efsys.h"
+
+/*
+ * Check that the efsys.h header in client code has a valid combination of
+ * EFSYS_OPT_xxx options.
+ *
+ * NOTE: Keep checks for obsolete options here to ensure that they are removed
+ * from client code (and do not reappear in merges from other branches).
+ */
+
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_BOOTCFG
+/* Support NVRAM based boot config */
+# if !EFSYS_OPT_NVRAM
+# error "BOOTCFG requires NVRAM"
+# endif
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_CHECK_REG
+/* Verify chip implements accessed registers */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_CHECK_REG */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+/* Decode fatal errors */
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
+
+#if EFSYS_OPT_DIAG
+/* Support diagnostic hardware tests */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_EV_PREFETCH
+/* Support optimized EVQ data access */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_FILTER
+/* Support hardware packet filters */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_FILTER */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_FILTER
+# error "HUNTINGTON or MEDFORD requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_LOOPBACK
+/* Support hardware loopback modes */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+/* Support MAC statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_MCDI
+/* Support management controller messages */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MCDI */
+
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_MCDI
+# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# endif
+#endif
+
+#if EFSYS_OPT_MCDI_LOGGING
+/* Support MCDI logging */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_LOGGING requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+/* Support MCDI proxy authorization */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_PROXY_AUTH requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_SIENA
+# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
+#endif
+
+#ifdef EFSYS_OPT_MON_HUNTINGTON
+# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
+#endif
+
+#if EFSYS_OPT_MON_STATS
+/* Support monitor statistics (voltage/temperature) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MON_MCDI
+/* Support Monitor via mcdi */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_MCDI*/
+
+#if EFSYS_OPT_NAMES
+/* Support printable names for statistics */
+# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
+# endif
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_NVRAM
+/* Support non volatile configuration */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_NVRAM */
+
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
+
+#if EFSYS_OPT_PHY_FLAGS
+/* Support PHY flags */
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+/* Support for PHY LED control */
+# if !EFSYS_OPT_SIENA
+# error "PHY_LED_CONTROL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PM8358
+# error "PHY_PM8358 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_PHY_STATS
+/* Support PHY statistics */
+# if !EFSYS_OPT_SIENA
+# error "PHY_STATS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_QSTATS
+/* Support EVQ/RXQ/TXQ statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_QSTATS */
+
+#ifdef EFSYS_OPT_RX_HDR_SPLIT
+# error "RX_HDR_SPLIT is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+/* Support receive scaling (RSS) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCATTER
+/* Support receive scatter DMA */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#ifdef EFSYS_OPT_STAT_NAME
+# error "STAT_NAME is obsolete (replaced by NAMES)."
+#endif
+
+#if EFSYS_OPT_VPD
+/* Support PCI Vital Product Data (VPD) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_VPD */
+
+#ifdef EFSYS_OPT_WOL
+# error "WOL is obsolete and is not supported"
+#endif /* EFSYS_OPT_WOL */
+
+#ifdef EFSYS_OPT_MCAST_FILTER_LIST
+# error "MCAST_FILTER_LIST is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_BIST
+/* Support BIST */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_BIST */
+
+#if EFSYS_OPT_LICENSING
+/* Support MCDI licensing API */
+# if !EFSYS_OPT_MCDI
+# error "LICENSING requires MCDI"
+# endif
+# if !EFSYS_HAS_UINT64
+# error "LICENSING requires UINT64"
+# endif
+#endif /* EFSYS_OPT_LICENSING */
+
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+/* Support adapters with missing static config (for factory use only) */
+# if !EFSYS_OPT_MEDFORD
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# endif
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+/* Support packed stream mode */
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD"
+# endif
+#endif
+
+#endif /* _SYS_EFX_CHECK_H */
diff --git a/drivers/net/sfc/base/efx_crc32.c b/drivers/net/sfc/base/efx_crc32.c
new file mode 100644
index 00000000..27e2708a
--- /dev/null
+++ b/drivers/net/sfc/base/efx_crc32.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/*
+ * Precomputed table for computing IEEE 802.3 CRC32
+ * with polynomial 0x04c11db7 (bit-reversed 0xedb88320)
+ */
+
+static const uint32_t efx_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+ __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length)
+{
+ int index;
+ uint32_t crc = crc_init;
+
+ for (index = 0; index < length; index++) {
+ uint32_t data = *(input++);
+ crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff];
+ }
+
+ return (crc);
+}
diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c
new file mode 100644
index 00000000..42ded5aa
--- /dev/null
+++ b/drivers/net/sfc/base/efx_ev.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ siena_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
+ ef10_ev_init, /* eevo_init */
+ ef10_ev_fini, /* eevo_fini */
+ ef10_ev_qcreate, /* eevo_qcreate */
+ ef10_ev_qdestroy, /* eevo_qdestroy */
+ ef10_ev_qprime, /* eevo_qprime */
+ ef10_ev_qpost, /* eevo_qpost */
+ ef10_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ ef10_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eevop = &__efx_ev_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ if ((rc = eevop->eevo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_eevop = eevop;
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+ return (rc);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ eevop->eevo_fini(enp);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
+
+
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_evq_t *eep;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+
+ switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
+ case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
+ break;
+ case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
+ if (us != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = n - 1;
+ eep->ee_flags = flags;
+ eep->ee_esmp = esmp;
+
+ /*
+ * Set outputs before the queue is created because interrupts may be
+ * raised for events immediately after the queue is created, before the
+ * function call below returns. See bug58606.
+ *
+ * The eepp pointer passed in by the client must therefore point to data
+ * shared with the client's event processing context.
+ */
+ enp->en_ev_qcount++;
+ *eepp = eep;
+
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags,
+ eep)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ *eepp = NULL;
+ enp->en_ev_qcount--;
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ eevop->eevo_qdestroy(eep);
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qprime(eep, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_EV_PRESENT(qword));
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#define EFX_EV_BATCH 8
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
+
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
+ FSE_AZ_EV_CODE_DRV_GEN_EV);
+#if EFSYS_OPT_MCDI
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
+ FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
+#endif
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ switch (code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ should_abort = eep->ee_rx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ should_abort = eep->ee_tx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ should_abort = eep->ee_driver(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ should_abort = eep->ee_drv_gen(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#if EFSYS_OPT_MCDI
+ case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
+ should_abort = eep->ee_mcdi(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#endif
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (eep->ee_global) {
+ should_abort = eep->ee_global(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ }
+ /* else fallthrough */
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(eevop != NULL &&
+ eevop->eevo_qpost != NULL);
+
+ eevop->eevo_qpost(eep, data);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int us,
+ __out unsigned int *ticksp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int ticks;
+
+ /* Convert microseconds to a timer tick count */
+ if (us == 0)
+ ticks = 0;
+ else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
+ ticks = 1; /* Never round down to zero */
+ else
+ ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ *ticksp = ticks;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+
+{ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ eevop->eevo_qstats_update(eep, stat);
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ return (0);
+
+}
+
+static __checkReturn boolean_t
+siena_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /*
+ * Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if EFSYS_OPT_RX_SCATTER
+ /*
+ * Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+siena_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if EFSYS_OPT_RX_SCATTER
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if EFSYS_OPT_RX_SCATTER
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (!ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t txq_index;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t rxq_index;
+ uint32_t failed;
+
+ rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg,
+ rxq_index);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+siena_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFSYS_ASSERT(eecp->eec_link_change != NULL);
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+#if EFSYS_OPT_MON_STATS
+ EFSYS_ASSERT(eecp->eec_monitor != NULL);
+#endif
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+}
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int locked;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail2;
+
+ EFSYS_ASSERT(ticks > 0);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, ticks - 1);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_oword_t oword;
+ efx_rc_t rc;
+ boolean_t notify_mode;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_mcdi = siena_ev_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /* Set up the new event queue */
+ EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
+
+ /* Set initial interrupt moderation */
+ siena_ev_qmoderate(eep, us);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
+static const char * const __efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_batch",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_mnt",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword, B_TRUE);
+
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
+}
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
new file mode 100644
index 00000000..ba310260
--- /dev/null
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -0,0 +1,1424 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
+ ef10_filter_init, /* efo_init */
+ ef10_filter_fini, /* efo_fini */
+ ef10_filter_restore, /* efo_restore */
+ ef10_filter_add, /* efo_add */
+ ef10_filter_delete, /* efo_delete */
+ ef10_filter_supported_filters, /* efo_supported_filters */
+ ef10_filter_reconfigure, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ return (efop->efo_add(enp, spec, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ return (efop->efo_delete(enp, spec));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if ((rc = enp->en_efop->efo_restore(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ const efx_filter_ops_t *efop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ efop = &__efx_filter_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efop->efo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_efop = efop;
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ enp->en_efop->efo_fini(enp);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+/*
+ * Query the possible combinations of match flags which can be filtered on.
+ * These are returned as a list, of which each 32 bit element is a bitmask
+ * formed of EFX_FILTER_MATCH flags.
+ *
+ * The combinations are ordered in priority from highest to lowest.
+ *
+ * If the provided buffer is too short to hold the list, the call with fail with
+ * ENOSPC and *list_lengthp will be set to the buffer length required.
+ */
+ __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL);
+
+ if (buffer == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = enp->en_efop->efo_supported_filters(enp, buffer, buffer_length,
+ list_lengthp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if (enp->en_efop->efo_reconfigure != NULL) {
+ if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr,
+ all_unicst, mulcst,
+ all_mulcst, brdcst,
+ addrs, count)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(erp, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = priority;
+ spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
+ spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+}
+
+ void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(etp, !=, NULL);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = EFX_FILTER_PRI_REQUIRED;
+ spec->efs_flags = EFX_FILTER_FLAG_TX;
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+}
+
+
+/*
+ * Specify IPv4 host, transport protocol and port in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = host;
+ spec->efs_loc_port = port;
+ return (0);
+}
+
+/*
+ * Specify IPv4 hosts, transport protocol and ports in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = lhost;
+ spec->efs_loc_port = lport;
+ spec->efs_rem_host.eo_u32[0] = rhost;
+ spec->efs_rem_port = rport;
+ return (0);
+}
+
+/*
+ * Specify local Ethernet address and/or VID in filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(addr, !=, NULL);
+
+ if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL)
+ return (EINVAL);
+
+ if (vid != EFX_FILTER_SPEC_VID_UNSPEC) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->efs_outer_vid = vid;
+ }
+ if (addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN);
+ }
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched unicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched multicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ return (0);
+}
+
+
+
+#if EFSYS_OPT_SIENA
+
+/*
+ * "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/*
+ * Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+static __checkReturn efx_rc_t
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
+ __in efx_filter_spec_t *gen_spec)
+{
+ efx_rc_t rc;
+ boolean_t is_full = B_FALSE;
+
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX)
+ EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX);
+ else
+ EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ /* Falconsiena only has one RSS context */
+ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->efs_rss_context != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
+
+ switch (gen_spec->efs_match_flags) {
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: {
+ uint32_t rhost, host1, host2;
+ uint16_t rport, port1, port2;
+
+ if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if (gen_spec->efs_loc_port == 0 ||
+ (is_full && gen_spec->efs_rem_port == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ switch (gen_spec->efs_ip_proto) {
+ case EFX_IPPROTO_TCP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
+ }
+ break;
+ case EFX_IPPROTO_UDP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /*
+ * The filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * addresses (zero for a wildcard).
+ */
+ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0;
+ rport = is_full ? gen_spec->efs_rem_port : 0;
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ host1 = gen_spec->efs_loc_host.eo_u32[0];
+ host2 = rhost;
+ } else {
+ host1 = rhost;
+ host2 = gen_spec->efs_loc_host.eo_u32[0];
+ }
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ } else {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ }
+ } else {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ }
+ }
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
+ }
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
+ gen_spec->efs_loc_mac[2] << 24 |
+ gen_spec->efs_loc_mac[3] << 16 |
+ gen_spec->efs_loc_mac[4] << 8 |
+ gen_spec->efs_loc_mac[5];
+ sf_spec->sfs_dword[2] =
+ gen_spec->efs_loc_mac[0] << 8 |
+ gen_spec->efs_loc_mac[1];
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.
+ */
+static uint16_t
+siena_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+/*
+ * To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash.
+ */
+static uint16_t
+siena_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
+}
+
+static void
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --sftp->sft_used;
+ EFSYS_ASSERT3U(sftp->sft_used, >=, 0);
+}
+
+
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id) {
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+siena_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+siena_filter_build(
+ __out efx_oword_t *filter,
+ __in siena_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
+
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_CZ_RMFT_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (0);
+ }
+
+ key =
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
+ dword3;
+
+ return (key);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in siena_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ efx_rc_t rc;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
+
+
+ if (left->sfs_type != right->sfs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out unsigned int *depth_required)
+{
+ unsigned int hash, incr, filter_idx, depth;
+
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
+
+ filter_idx = hash & (sftp->sft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /*
+ * Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return (for_insert ? EBUSY : ENOENT);
+
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
+ ++depth;
+ }
+}
+
+static void
+siena_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_t *sftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
+ index, &filter);
+
+ memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
+ }
+}
+
+ void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ int index;
+ efsys_lock_state_t state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
+ }
+
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
+ int tbl_id;
+ efx_rc_t rc;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
+
+ if (!sfp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_siena_filter = sfp;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_SIENA:
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ siena_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (sfp == NULL)
+ return;
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ if (sftp->sft_bitmap != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
+ }
+
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
+ }
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
+}
+
+/* Restore filter state after a reset */
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ efsys_lock_state_t state;
+ uint32_t key;
+ efx_rc_t rc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
+ for (filter_idx = 0;
+ filter_idx < sftp->sft_size;
+ filter_idx++) {
+ if (!siena_filter_test_used(sftp, filter_idx))
+ continue;
+
+ spec = &sftp->sft_spec[filter_idx];
+ if ((key = siena_filter_build(&filter, spec)) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
+ goto fail2;
+ }
+ }
+
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ if (sftp->sft_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail3;
+
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
+
+ if (siena_filter_test_used(sftp, filter_idx)) {
+ if (may_replace == B_FALSE) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
+ else
+ siena_filter_push_rx_limits(enp);
+ }
+
+ siena_filter_push_entry(enp, sf_spec.sfs_type,
+ filter_idx, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail2;
+
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail2:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#define SIENA_MAX_SUPPORTED_MATCHES 4
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ uint32_t index = 0;
+ uint32_t rx_matches[SIENA_MAX_SUPPORTED_MATCHES];
+ size_t list_length;
+ efx_rc_t rc;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+
+ if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) {
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC;
+
+ rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC;
+ }
+
+ EFSYS_ASSERT3U(index, <=, SIENA_MAX_SUPPORTED_MATCHES);
+ list_length = index;
+
+ *list_lengthp = list_length;
+
+ if (buffer_length < list_length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(buffer, rx_matches, list_length * sizeof (rx_matches[0]));
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#undef MAX_SUPPORTED
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/drivers/net/sfc/base/efx_hash.c b/drivers/net/sfc/base/efx_hash.c
new file mode 100644
index 00000000..3cc0d200
--- /dev/null
+++ b/drivers/net/sfc/base/efx_hash.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2006 Bob Jenkins
+ *
+ * Derived from public domain source, see
+ * <http://burtleburtle.net/bob/c/lookup3.c>:
+ *
+ * "lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup...
+ * ...You can use this free for any purpose. It's in the public domain.
+ * It has no warranty."
+ *
+ * Copyright (c) 2014-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/* Hash initial value */
+#define EFX_HASH_INITIAL_VALUE 0xdeadbeef
+
+/*
+ * Rotate a 32-bit value left
+ *
+ * Allow platform to provide an intrinsic or optimised routine and
+ * fall-back to a simple shift based implementation.
+ */
+#if EFSYS_HAS_ROTL_DWORD
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ EFSYS_ROTL_DWORD(_value, _shift)
+
+#else
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ (((_value) << (_shift)) | ((_value) >> (32 - (_shift))))
+
+#endif
+
+/* Mix three 32-bit values reversibly */
+#define EFX_HASH_MIX(_a, _b, _c) \
+ do { \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 4); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 6); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 8); \
+ _b += _a; \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 16); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 19); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 4); \
+ _b += _a; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Final mixing of three 32-bit values into one (_c) */
+#define EFX_HASH_FINALISE(_a, _b, _c) \
+ do { \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 14); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 11); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 25); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 16); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 4); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 14); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 24); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+/* Produce a 32-bit hash from 32-bit aligned input */
+ __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE +
+ (((uint32_t)count) * sizeof (uint32_t)) + init;
+
+ /* Handle all but the last three dwords of the input */
+ while (count > 3) {
+ a += input[0];
+ b += input[1];
+ c += input[2];
+ EFX_HASH_MIX(a, b, c);
+
+ count -= 3;
+ input += 3;
+ }
+
+ /* Handle the left-overs */
+ switch (count) {
+ case 3:
+ c += input[2];
+ /* Fall-through */
+ case 2:
+ b += input[1];
+ /* Fall-through */
+ case 1:
+ a += input[0];
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if count parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#if EFSYS_IS_BIG_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]) << 24;
+ a += ((uint32_t)input[1]) << 16;
+ a += ((uint32_t)input[2]) << 8;
+ a += ((uint32_t)input[3]);
+ b += ((uint32_t)input[4]) << 24;
+ b += ((uint32_t)input[5]) << 16;
+ b += ((uint32_t)input[6]) << 8;
+ b += ((uint32_t)input[7]);
+ c += ((uint32_t)input[8]) << 24;
+ c += ((uint32_t)input[9]) << 16;
+ c += ((uint32_t)input[10]) << 8;
+ c += ((uint32_t)input[11]);
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]);
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 8;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 16;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]) << 24;
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]);
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 8;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 16;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]) << 24;
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]);
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 8;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 16;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]) << 24;
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]);
+ a += ((uint32_t)input[1]) << 8;
+ a += ((uint32_t)input[2]) << 16;
+ a += ((uint32_t)input[3]) << 24;
+ b += ((uint32_t)input[4]);
+ b += ((uint32_t)input[5]) << 8;
+ b += ((uint32_t)input[6]) << 16;
+ b += ((uint32_t)input[7]) << 24;
+ c += ((uint32_t)input[8]);
+ c += ((uint32_t)input[9]) << 8;
+ c += ((uint32_t)input[10]) << 16;
+ c += ((uint32_t)input[11]) << 24;
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]) << 24;
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 16;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 8;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]);
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]) << 24;
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 16;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 8;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]);
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]) << 24;
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 16;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 8;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]);
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
new file mode 100644
index 00000000..43add6d9
--- /dev/null
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+/* FIXME: Add definition for driver generated software events */
+#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV
+#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
+#endif
+
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+#include "medford_impl.h"
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#include "ef10_impl.h"
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_FILTER 0x00001000
+#define EFX_MOD_LIC 0x00002000
+
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_SIENA,
+ EFX_MAC_HUNTINGTON,
+ EFX_MAC_MEDFORD,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_ev_ops_s {
+ efx_rc_t (*eevo_init)(efx_nic_t *);
+ void (*eevo_fini)(efx_nic_t *);
+ efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int,
+ efsys_mem_t *, size_t, uint32_t,
+ uint32_t, uint32_t, efx_evq_t *);
+ void (*eevo_qdestroy)(efx_evq_t *);
+ efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int);
+ void (*eevo_qpost)(efx_evq_t *, uint16_t);
+ efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int);
+#if EFSYS_OPT_QSTATS
+ void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *);
+#endif
+} efx_ev_ops_t;
+
+typedef struct efx_tx_ops_s {
+ efx_rc_t (*etxo_init)(efx_nic_t *);
+ void (*etxo_fini)(efx_nic_t *);
+ efx_rc_t (*etxo_qcreate)(efx_nic_t *,
+ unsigned int, unsigned int,
+ efsys_mem_t *, size_t,
+ uint32_t, uint16_t,
+ efx_evq_t *, efx_txq_t *,
+ unsigned int *);
+ void (*etxo_qdestroy)(efx_txq_t *);
+ efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int);
+ efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int);
+ efx_rc_t (*etxo_qflush)(efx_txq_t *);
+ void (*etxo_qenable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_enable)(efx_txq_t *);
+ void (*etxo_qpio_disable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t,
+ size_t);
+ efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int,
+ unsigned int *);
+ efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t,
+ size_t, boolean_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint8_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint16_t,
+ efx_desc_t *, int);
+ void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+#if EFSYS_OPT_QSTATS
+ void (*etxo_qstats_update)(efx_txq_t *,
+ efsys_stat_t *);
+#endif
+} efx_tx_ops_t;
+
+typedef struct efx_rx_ops_s {
+ efx_rc_t (*erxo_init)(efx_nic_t *);
+ void (*erxo_fini)(efx_nic_t *);
+#if EFSYS_OPT_RX_SCATTER
+ efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
+#endif
+#if EFSYS_OPT_RX_SCALE
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t,
+ efx_rx_hash_type_t, boolean_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *,
+ size_t);
+ uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
+ uint8_t *);
+#endif /* EFSYS_OPT_RX_SCALE */
+ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *,
+ uint16_t *);
+ void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t,
+ unsigned int, unsigned int,
+ unsigned int);
+ void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
+#if EFSYS_OPT_RX_PACKED_STREAM
+ void (*erxo_qps_update_credits)(efx_rxq_t *);
+ uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *,
+ uint32_t, uint32_t,
+ uint16_t *, uint32_t *, uint32_t *);
+#endif
+ efx_rc_t (*erxo_qflush)(efx_rxq_t *);
+ void (*erxo_qenable)(efx_rxq_t *);
+ efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
+ unsigned int, efx_rxq_type_t,
+ efsys_mem_t *, size_t, uint32_t,
+ efx_evq_t *, efx_rxq_t *);
+ void (*erxo_qdestroy)(efx_rxq_t *);
+} efx_rx_ops_t;
+
+typedef struct efx_mac_ops_s {
+ efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
+ efx_rc_t (*emo_addr_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *);
+ efx_rc_t (*emo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
+ efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
+ efx_rxq_t *, boolean_t);
+ void (*emo_filter_default_rxq_clear)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t);
+ efx_rc_t (*emo_stats_clear)(efx_nic_t *);
+ efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ efx_rc_t (*epo_reset)(efx_nic_t *);
+ efx_rc_t (*epo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*epo_verify)(efx_nic_t *);
+ efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
+ efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
+ efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t,
+ efx_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t);
+#endif /* EFSYS_OPT_BIST */
+} efx_phy_ops_t;
+
+#if EFSYS_OPT_FILTER
+typedef struct efx_filter_ops_s {
+ efx_rc_t (*efo_init)(efx_nic_t *);
+ void (*efo_fini)(efx_nic_t *);
+ efx_rc_t (*efo_restore)(efx_nic_t *);
+ efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *,
+ boolean_t may_replace);
+ efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *);
+ efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *,
+ size_t, size_t *);
+ efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
+ boolean_t, boolean_t, boolean_t,
+ uint8_t const *, uint32_t);
+} efx_filter_ops_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+#endif /* EFSYS_OPT_FILTER */
+
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_all_unicst;
+ boolean_t ep_mulcst;
+ boolean_t ep_all_mulcst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+ uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN *
+ EFX_MAC_MULTICAST_LIST_MAX];
+ uint32_t ep_mulcst_addr_count;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+ boolean_t ep_mac_drain;
+ boolean_t ep_mac_stats_pending;
+#if EFSYS_OPT_BIST
+ efx_bist_type_t ep_current_bist;
+#endif
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+#if EFSYS_OPT_MON_STATS
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_ops_s {
+ efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *);
+ void (*eio_enable)(efx_nic_t *);
+ void (*eio_disable)(efx_nic_t *);
+ void (*eio_disable_unlocked)(efx_nic_t *);
+ efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int);
+ void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *);
+ void (*eio_status_message)(efx_nic_t *, unsigned int,
+ boolean_t *);
+ void (*eio_fatal)(efx_nic_t *);
+ void (*eio_fini)(efx_nic_t *);
+} efx_intr_ops_t;
+
+typedef struct efx_intr_s {
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ efx_rc_t (*eno_probe)(efx_nic_t *);
+ efx_rc_t (*eno_board_cfg)(efx_nic_t *);
+ efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*);
+ efx_rc_t (*eno_reset)(efx_nic_t *);
+ efx_rc_t (*eno_init)(efx_nic_t *);
+ efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
+ uint32_t *, size_t *);
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#ifndef EFX_TXQ_LIMIT_TARGET
+#define EFX_TXQ_LIMIT_TARGET 259
+#endif
+#ifndef EFX_RXQ_LIMIT_TARGET
+#define EFX_RXQ_LIMIT_TARGET 512
+#endif
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+
+#endif /* EFSYS_OPT_SIENA */
+
+typedef struct efx_filter_s {
+#if EFSYS_OPT_SIENA
+ siena_filter_t *ef_siena_filter;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ ef10_filter_table_t *ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+} efx_filter_t;
+
+#if EFSYS_OPT_SIENA
+
+extern void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_MCDI
+
+typedef struct efx_mcdi_ops_s {
+ efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
+ void (*emco_send_request)(efx_nic_t *, void *, size_t,
+ void *, size_t);
+ efx_rc_t (*emco_poll_reboot)(efx_nic_t *);
+ boolean_t (*emco_poll_response)(efx_nic_t *);
+ void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t);
+ void (*emco_fini)(efx_nic_t *);
+ efx_rc_t (*emco_feature_supported)(efx_nic_t *,
+ efx_mcdi_feature_id_t, boolean_t *);
+ void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *,
+ uint32_t *);
+} efx_mcdi_ops_t;
+
+typedef struct efx_mcdi_s {
+ const efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_transport_t *em_emtp;
+ efx_mcdi_iface_t em_emip;
+} efx_mcdi_t;
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
+ unsigned int, size_t);
+ efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t);
+ efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
+ uint32_t *, uint16_t *);
+ efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
+ uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ efx_rc_t (*evpdo_init)(efx_nic_t *);
+ efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *);
+ efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *, unsigned int *);
+ efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp);
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_LICENSING
+
+typedef struct efx_lic_ops_s {
+ efx_rc_t (*elo_update_licenses)(efx_nic_t *);
+ efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *);
+ efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
+ efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
+ size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
+} efx_lic_ops_t;
+
+#endif
+
+typedef struct efx_drv_cfg_s {
+ uint32_t edc_min_vi_count;
+ uint32_t edc_max_vi_count;
+
+ uint32_t edc_max_piobuf_count;
+ uint32_t edc_pio_alloc_size;
+} efx_drv_cfg_t;
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_drv_cfg_t en_drv_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+ const efx_filter_ops_t *en_efop;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_MCDI
+ efx_mcdi_t en_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM
+ efx_nvram_type_t en_nvram_locked;
+ const efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ const efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_support_t en_rss_support;
+ uint32_t en_rss_context;
+#endif /* EFSYS_OPT_RX_SCALE */
+ uint32_t en_vport_id;
+#if EFSYS_OPT_LICENSING
+ const efx_lic_ops_t *en_elop;
+ boolean_t en_licensing_supported;
+#endif
+ union {
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ int enu_unused;
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ int enu_unused;
+ } en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+ union en_arch {
+ struct {
+ int ena_vi_base;
+ int ena_vi_count;
+ int ena_vi_shift;
+#if EFSYS_OPT_VPD
+ caddr_t ena_svpd;
+ size_t ena_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_piobuf_count;
+ uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_pio_write_vi_base;
+ /* Memory BAR mapping regions */
+ uint32_t ena_uc_mem_map_offset;
+ size_t ena_uc_mem_map_size;
+ uint32_t ena_wc_mem_map_offset;
+ size_t ena_wc_mem_map_size;
+ } ef10;
+ } en_arch;
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+typedef struct efx_evq_rxq_state_s {
+ unsigned int eers_rx_read_ptr;
+ unsigned int eers_rx_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ unsigned int eers_rx_stream_npackets;
+ boolean_t eers_rx_packed_stream;
+ unsigned int eers_rx_packed_stream_credits;
+#endif
+} efx_evq_rxq_state_t;
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+
+ efx_ev_handler_t ee_rx;
+ efx_ev_handler_t ee_tx;
+ efx_ev_handler_t ee_driver;
+ efx_ev_handler_t ee_global;
+ efx_ev_handler_t ee_drv_gen;
+#if EFSYS_OPT_MCDI
+ efx_ev_handler_t ee_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS];
+
+ uint32_t ee_flags;
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ efx_evq_t *er_eep;
+ unsigned int er_index;
+ unsigned int er_label;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+ uint32_t et_pio_bufnum;
+ uint32_t et_pio_blknum;
+ uint32_t et_pio_write_offset;
+ uint32_t et_pio_offset;
+ size_t et_pio_size;
+#endif
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \
+ do { \
+ uint16_t *_d = (uint16_t *)(_dst); \
+ _d[0] = 0xffff; \
+ _d[1] = 0xffff; \
+ _d[2] = 0xffff; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ case EFX_FAMILY_HUNTINGTON: \
+ rev = 'D'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD: \
+ rev = 'E'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit doorbell writes.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
+ * the need for locking in the host, and are the only ones known to be safe to
+ * use 128-bites write with.
+ */
+#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ const char *, \
+ #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \
+ do { \
+ unsigned int _new = (_wptr); \
+ unsigned int _old = (_owptr); \
+ \
+ if ((_new) >= (_old)) \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ (_old) * sizeof (efx_desc_t), \
+ ((_new) - (_old)) * sizeof (efx_desc_t)); \
+ else \
+ /* \
+ * It is cheaper to sync entire map than sync \
+ * two parts especially when offset/size are \
+ * ignored and entire map is synced in any case.\
+ */ \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ 0, \
+ (_entries) * sizeof (efx_desc_t)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high);
+
+extern __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct efx_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} efx_register_set_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count);
+
+extern __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_MAC_STATS
+
+/*
+ * Closed range of stats (i.e. the first and the last are included).
+ * The last must be greater or equal (if the range is one item only) to
+ * the first.
+ */
+struct efx_mac_stats_range {
+ efx_mac_stat_t first;
+ efx_mac_stat_t last;
+};
+
+extern efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c
new file mode 100644
index 00000000..f0422d53
--- /dev/null
+++ b/drivers/net/sfc/base/efx_intr.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp);
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
+ ef10_intr_init, /* eio_init */
+ ef10_intr_enable, /* eio_enable */
+ ef10_intr_disable, /* eio_disable */
+ ef10_intr_disable_unlocked, /* eio_disable_unlocked */
+ ef10_intr_trigger, /* eio_trigger */
+ ef10_intr_status_line, /* eio_status_line */
+ ef10_intr_status_message, /* eio_status_message */
+ ef10_intr_fatal, /* eio_fatal */
+ ef10_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ eip->ei_esmp = esmp;
+ eip->ei_type = type;
+ eip->ei_level = 0;
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eiop = &__efx_intr_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = eiop->eio_init(enp, type, esmp)) != 0)
+ goto fail3;
+
+ eip->ei_eiop = eiop;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_enable(enp);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable(enp);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable_unlocked(enp);
+}
+
+
+ __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ return (eiop->eio_trigger(enp, level));
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_line(enp, fatalp, qmaskp);
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_message(enp, message, fatalp);
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fatal(enp);
+}
+
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+}
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ efx_rc_t rc;
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
new file mode 100644
index 00000000..2cd05cc8
--- /dev/null
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -0,0 +1,1751 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_LICENSING
+
+#include "ef10_tlv_layout.h"
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static const efx_lic_ops_t __efx_lic_v1_ops = {
+ efx_mcdi_fc_license_update_license, /* elo_update_licenses */
+ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
+ NULL, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static const efx_lic_ops_t __efx_lic_v2_ops = {
+ efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
+ efx_mcdi_licensed_app_state, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
+ efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
+ efx_mcdi_licensing_v3_app_state, /* elo_app_state */
+ efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+/* V1 Licensing - used in Siena Modena only */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
+ eksp->eks_unverifiable = 0;
+ eksp->eks_wrong_node = 0;
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]);
+
+ if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ /* Ensure space for terminator remains */
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ /* Shift everything after the key down */
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ /* Write terminator */
+ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ /* V2 licensing supports 32bit app id only */
+ if ((app_id >> 32) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
+ app_id & 0xffffffff);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
+ eksp->eks_blacklisted = 0;
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
+ eksp->eks_licensed_apps_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
+ eksp->eks_licensed_apps_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
+ eksp->eks_licensed_features_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
+ eksp->eks_licensed_features_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
+ app_id & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
+ app_id >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
+
+ if (bufferp == NULL) {
+ /* Request id type and length only */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ (void) memset(payload, 0, sizeof (payload));
+ } else {
+ /* Request full buffer */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ (void) memset(bufferp, 0, req.emr_out_length);
+ }
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
+ *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+
+ if (bufferp == NULL) {
+ /* modify length requirements to indicate to caller the extra buffering
+ ** needed to read the complete output.
+ */
+ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ } else {
+ /* Shift ID down to start of buffer */
+ memmove(bufferp,
+ bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ *lengthp);
+ memset(bufferp + (*lengthp), 0,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ /* Check key is a valid V3 key */
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+
+ key_type = ((uint8_t *)keyp)[0];
+ key_length = ((uint8_t *)keyp)[1];
+
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length > length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ /* Construct empty partition */
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ /* Validate completed partition */
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop;
+ efx_key_stats_t eks;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
+
+ switch (enp->en_family) {
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ elop = &__efx_lic_v1_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ elop = &__efx_lic_v2_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_elop = elop;
+ enp->en_mod_flags |= EFX_MOD_LIC;
+
+ /* Probe for support */
+ if (efx_lic_get_key_stats(enp, &eks) == 0) {
+ enp->en_licensing_supported = B_TRUE;
+ } else {
+ enp->en_licensing_supported = B_FALSE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ return enp->en_licensing_supported;
+}
+
+ void
+efx_lic_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ enp->en_elop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_LIC;
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_update_licenses(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_app_state == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_get_id == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_get_id(enp, buffer_size, typep,
+ lengthp, bufferp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Buffer management API - abstracts varying TLV format used for License partition */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/* Validate that the buffer contains a single key in a recognised format.
+** An empty or terminator buffer is not accepted as a valid key.
+*/
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LICENSING */
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
new file mode 100644
index 00000000..752e7205
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_mac_ops_t __efx_siena_mac_ops = {
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_addr_set */
+ siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_pdu_get, /* emo_pdu_get */
+ siena_mac_reconfigure, /* emo_reconfigure */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
+ NULL, /* emo_filter_set_default_rxq */
+ NULL, /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ siena_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_mac_ops_t __efx_ef10_mac_ops = {
+ ef10_mac_poll, /* emo_poll */
+ ef10_mac_up, /* emo_up */
+ ef10_mac_addr_set, /* emo_addr_set */
+ ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_pdu_get, /* emo_pdu_get */
+ ef10_mac_reconfigure, /* emo_reconfigure */
+ ef10_mac_multicast_list_set, /* emo_multicast_list_set */
+ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
+ ef10_mac_filter_default_rxq_clear,
+ /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ ef10_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ ef10_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ ef10_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_pdu_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ if ((rc = emop->emo_pdu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (EFX_MAC_ADDR_IS_MULTICAST(addr)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_addr_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_all_unicst;
+ boolean_t old_mulcst;
+ boolean_t old_all_mulcst;
+ boolean_t old_brdcst;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_all_unicst = epp->ep_all_unicst;
+ old_mulcst = epp->ep_mulcst;
+ old_all_mulcst = epp->ep_all_mulcst;
+ old_brdcst = epp->ep_brdcst;
+
+ epp->ep_all_unicst = all_unicst;
+ epp->ep_mulcst = mulcst;
+ epp->ep_all_mulcst = all_mulcst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_all_unicst = old_all_unicst;
+ epp->ep_mulcst = old_mulcst;
+ epp->ep_all_mulcst = old_all_mulcst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control auto-negotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = epp->ep_fcntl_autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * Always encode the flow control settings in the advertised
+ * capabilities even if we are not trying to auto-negotiate
+ * them and reconfigure both the PHY and the MAC.
+ */
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted = 0;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * Decode the requested flow control settings from the PHY
+ * advertised capabilities.
+ */
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t *old_mulcst_addr_list = NULL;
+ uint32_t old_mulcst_addr_count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (count > EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ old_mulcst_addr_count = epp->ep_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ /* Allocate memory to store old list (instead of using stack) */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ if (old_mulcst_addr_list == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Save the old list in case we need to rollback */
+ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+ }
+
+ /* Store the new list */
+ memcpy(epp->ep_mulcst_addr_list, addrs,
+ count * EFX_MAC_ADDR_LEN);
+ epp->ep_mulcst_addr_count = count;
+
+ if ((rc = emop->emo_multicast_list_set(enp)) != 0)
+ goto fail3;
+
+ if (old_mulcst_addr_count > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ /* Restore original list on failure */
+ epp->ep_mulcst_addr_count = old_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_set != NULL) {
+ rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_clear != NULL)
+ emop->emo_filter_default_rxq_clear(enp);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */
+static const char * const __efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+ "pm_trunc_bb_overflow",
+ "pm_discard_bb_overflow",
+ "pm_trunc_vfifo_full",
+ "pm_discard_vfifo_full",
+ "pm_trunc_qbb",
+ "pm_discard_qbb",
+ "pm_discard_mapping",
+ "rxdp_q_disabled_pkts",
+ "rxdp_di_dropped_pkts",
+ "rxdp_streaming_pkts",
+ "rxdp_hlb_fetch",
+ "rxdp_hlb_wait",
+ "vadapter_rx_unicast_packets",
+ "vadapter_rx_unicast_bytes",
+ "vadapter_rx_multicast_packets",
+ "vadapter_rx_multicast_bytes",
+ "vadapter_rx_broadcast_packets",
+ "vadapter_rx_broadcast_bytes",
+ "vadapter_rx_bad_packets",
+ "vadapter_rx_bad_bytes",
+ "vadapter_rx_overflow",
+ "vadapter_tx_unicast_packets",
+ "vadapter_tx_unicast_bytes",
+ "vadapter_tx_multicast_packets",
+ "vadapter_tx_multicast_bytes",
+ "vadapter_tx_broadcast_packets",
+ "vadapter_tx_broadcast_bytes",
+ "vadapter_tx_bad_packets",
+ "vadapter_tx_bad_bytes",
+ "vadapter_tx_overflow",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+static efx_rc_t
+efx_mac_stats_mask_add_range(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in const struct efx_mac_stats_range *rngp)
+{
+ unsigned int mask_npages = mask_size / sizeof (*maskp);
+ unsigned int el;
+ unsigned int el_min;
+ unsigned int el_max;
+ unsigned int low;
+ unsigned int high;
+ unsigned int width;
+ efx_rc_t rc;
+
+ if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <=
+ (unsigned int)rngp->last) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(rngp->first, <=, rngp->last);
+ EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS);
+
+ for (el = 0; el < mask_npages; ++el) {
+ el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE;
+ el_max =
+ el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1);
+ if ((unsigned int)rngp->first > el_max ||
+ (unsigned int)rngp->last < el_min)
+ continue;
+ low = MAX((unsigned int)rngp->first, el_min);
+ high = MIN((unsigned int)rngp->last, el_max);
+ width = high - low + 1;
+ maskp[el] |=
+ (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ?
+ (~0ULL) : (((1ULL << width) - 1) << (low - el_min));
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count)
+{
+ unsigned int i;
+ efx_rc_t rc;
+
+ for (i = 0; i < rng_count; ++i) {
+ if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size,
+ &rngp[i])) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(maskp != NULL);
+ EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0);
+
+ (void) memset(maskp, 0, mask_size);
+
+ if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if ((rc = emop->emo_stats_clear(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ /*
+ * Don't assert !ep_mac_stats_pending, because the client might
+ * have failed to finalise statistics when previously stopping
+ * the port.
+ */
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ epp->ep_mac_stats_pending = B_TRUE;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __inout_opt uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+ if (rc == 0)
+ epp->ep_mac_stats_pending = B_FALSE;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ const efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_siena_mac_ops;
+ type = EFX_MAC_SIENA;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_HUNTINGTON;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_MEDFORD;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_emop = emop;
+ epp->ep_mac_type = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+/* Compute the multicast hash as used on Falcon and Siena. */
+static void
+siena_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high)
+{
+ uint32_t crc, index;
+ int i;
+
+ EFSYS_ASSERT(hash_low != NULL);
+ EFSYS_ASSERT(hash_high != NULL);
+
+ EFX_ZERO_OWORD(*hash_low);
+ EFX_ZERO_OWORD(*hash_high);
+
+ for (i = 0; i < count; i++) {
+ /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */
+ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN);
+ index = crc % EFX_MAC_HASH_BITS;
+ if (index < 128) {
+ EFX_SET_OWORD_BIT(*hash_low, index);
+ } else {
+ EFX_SET_OWORD_BIT(*hash_high, index - 128);
+ }
+
+ addrs += EFX_MAC_ADDR_LEN;
+ }
+}
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
new file mode 100644
index 00000000..c61b943c
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -0,0 +1,2346 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/*
+ * There are three versions of the MCDI interface:
+ * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
+ * - MCDIv1: Siena firmware and Huntington BootROM.
+ * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
+ * Transport uses MCDIv2 headers.
+ *
+ * MCDIv2 Header NOT_EPOCH flag
+ * ----------------------------
+ * A new epoch begins at initial startup or after an MC reboot, and defines when
+ * the MC should reject stale MCDI requests.
+ *
+ * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
+ * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
+ *
+ * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
+ * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
+ */
+
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+ siena_mcdi_init, /* emco_init */
+ siena_mcdi_send_request, /* emco_send_request */
+ siena_mcdi_poll_reboot, /* emco_poll_reboot */
+ siena_mcdi_poll_response, /* emco_poll_response */
+ siena_mcdi_read_response, /* emco_read_response */
+ siena_mcdi_fini, /* emco_fini */
+ siena_mcdi_feature_supported, /* emco_feature_supported */
+ siena_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+ ef10_mcdi_init, /* emco_init */
+ ef10_mcdi_send_request, /* emco_send_request */
+ ef10_mcdi_poll_reboot, /* emco_poll_reboot */
+ ef10_mcdi_poll_response, /* emco_poll_response */
+ ef10_mcdi_read_response, /* emco_read_response */
+ ef10_mcdi_fini, /* emco_fini */
+ ef10_mcdi_feature_supported, /* emco_feature_supported */
+ ef10_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ const efx_mcdi_ops_t *emcop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emcop = &__efx_mcdi_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
+ /* MCDI requires a DMA buffer in host memory */
+ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+ enp->en_mcdi.em_emtp = emtp;
+
+ if (emcop != NULL && emcop->emco_init != NULL) {
+ if ((rc = emcop->emco_init(enp, emtp)) != 0)
+ goto fail3;
+ }
+
+ enp->en_mcdi.em_emcop = emcop;
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mcdi.em_emtp = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+
+ if (emcop != NULL && emcop->emco_fini != NULL)
+ emcop->emco_fini(enp);
+
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+}
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_lock_state_t state;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emip->emi_new_epoch = B_TRUE;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static void
+efx_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
+}
+
+static efx_rc_t
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ rc = emcop->emco_poll_reboot(enp);
+ return (rc);
+}
+
+static boolean_t
+efx_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ boolean_t available;
+
+ available = emcop->emco_poll_response(enp);
+ return (available);
+}
+
+static void
+efx_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_read_response(enp, bufferp, offset, length);
+}
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ unsigned int max_version;
+ unsigned int seq;
+ unsigned int xflags;
+ boolean_t new_epoch;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one outstanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
+ new_epoch = emip->emi_new_epoch;
+ max_version = emip->emi_max_version;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /*
+ * Huntington firmware supports MCDIv2, but the Huntington BootROM only
+ * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
+ * possible to support this.
+ */
+ if ((max_version >= 2) &&
+ ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ /* Construct MCDI v2 header */
+ hdr_len = sizeof (hdr);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
+ } else {
+ /* Construct MCDI v1 header */
+ hdr_len = sizeof (hdr[0]);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
+ &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ efx_mcdi_send_request(enp, &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+}
+
+
+static void
+efx_mcdi_read_response_header(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ unsigned int data_len;
+ unsigned int seq;
+ unsigned int cmd;
+ unsigned int error;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emrp != NULL);
+
+ efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
+ hdr_len = sizeof (hdr[0]);
+
+ cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
+ seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
+ error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
+
+ if (cmd != MC_CMD_V2_EXTN) {
+ data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
+ } else {
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
+ data_len =
+ EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && (data_len == 0)) {
+ /* The MC has rebooted since the request was sent. */
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ efx_mcdi_poll_reboot(enp);
+ rc = EIO;
+ goto fail1;
+ }
+ if ((cmd != emrp->emr_cmd) ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail2;
+ }
+ if (error) {
+ efx_dword_t err[2];
+ unsigned int err_len = MIN(data_len, sizeof (err));
+ int err_code = MC_CMD_ERR_EPROTO;
+ int err_arg = 0;
+
+ /* Read error code (and arg num for MCDI v2 commands) */
+ efx_mcdi_read_response(enp, &err, hdr_len, err_len);
+
+ if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
+ err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
+#ifdef WITH_MCDI_V2
+ if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
+ err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
+#endif
+ emrp->emr_err_code = err_code;
+ emrp->emr_err_arg = err_arg;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
+ (err_len == sizeof (err))) {
+ /*
+ * The MCDI request would normally fail with EPERM, but
+ * firmware has forwarded it to an authorization agent
+ * attached to a privileged PF.
+ *
+ * Save the authorization request handle. The client
+ * must wait for a PROXY_RESPONSE event, or timeout.
+ */
+ emrp->emr_proxy_handle = err_arg;
+ }
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ &err[0], err_len);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
+ int, err_code, int, err_arg);
+ }
+
+ rc = efx_mcdi_request_errcode(err_code);
+ goto fail3;
+ }
+
+ emrp->emr_rc = 0;
+ emrp->emr_out_length_used = data_len;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ emrp->emr_proxy_handle = 0;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+ return;
+
+fail3:
+fail2:
+fail1:
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+}
+
+static void
+efx_mcdi_finish_response(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ size_t bytes;
+
+ if (emrp->emr_out_buf == NULL)
+ return;
+
+ /* Read the command header to detect MCDI response format */
+ hdr_len = sizeof (hdr[0]);
+ efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
+ if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
+ /*
+ * Read the actual payload length. The length given in the event
+ * is only correct for responses with the V1 format.
+ */
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ /* Copy payload out into caller supplied buffer */
+ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
+ efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ emrp->emr_out_buf, bytes);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+}
+
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ goto fail1;
+ }
+ }
+
+ /* Check if a response is available */
+ if (efx_mcdi_poll_response(enp) == B_FALSE) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Read the response header */
+ efx_mcdi_read_response_header(enp, emrp);
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+
+ /* Ensure stale MCDI requests fail after an MC reboot. */
+ emip->emi_new_epoch = B_FALSE;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if ((rc = emrp->emr_rc) != 0)
+ goto fail2;
+
+ efx_mcdi_finish_response(enp, emrp);
+ return (B_TRUE);
+
+fail2:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE(fail2);
+fail1:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (B_TRUE);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_get_timeout(enp, emrp, timeoutp);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ /* MCDI v1 */
+ case MC_CMD_ERR_EPERM:
+ return (EACCES);
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+ case MC_CMD_ERR_ENOTSUP:
+ return (ENOTSUP);
+ case MC_CMD_ERR_EALREADY:
+ return (EALREADY);
+
+ /* MCDI v2 */
+ case MC_CMD_ERR_EEXIST:
+ return (EEXIST);
+#ifdef MC_CMD_ERR_EAGAIN
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+#endif
+#ifdef MC_CMD_ERR_ENOSPC
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+ case MC_CMD_ERR_ERANGE:
+ return (ERANGE);
+
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return (ENOMEM);
+ case MC_CMD_ERR_NO_VADAPTOR:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_VSWITCH:
+ return (ENODEV);
+ case MC_CMD_ERR_VLAN_LIMIT:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_PCI_FUNC:
+ return (ENODEV);
+ case MC_CMD_ERR_BAD_VLAN_MODE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VSWITCH_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VPORT_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_MAC_EXIST:
+ return (EEXIST);
+
+ case MC_CMD_ERR_PROXY_PENDING:
+ return (EAGAIN);
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+ void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_FALSE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_TRUE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (emip->emi_max_version >= 2) {
+ /* MCDIv2 response details do not fit into an event. */
+ efx_mcdi_read_response_header(enp, emrp);
+ } else {
+ if (errcode != 0) {
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ }
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ }
+ }
+ if (errcode == 0) {
+ efx_mcdi_finish_response(enp, emrp);
+ }
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep)
+{
+ efx_rc_t rc;
+
+ /*
+ * Return proxy handle from MCDI request that returned with error
+ * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
+ * PROXY_RESPONSE event.
+ */
+ if ((emrp == NULL) || (handlep == NULL)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((emrp->emr_rc != 0) &&
+ (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
+ *handlep = emrp->emr_proxy_handle;
+ rc = 0;
+ } else {
+ *handlep = 0;
+ rc = ENOENT;
+ }
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_rc_t rc;
+
+ /*
+ * Handle results of an authorization request for a privileged MCDI
+ * command. If authorization was granted then we must re-issue the
+ * original MCDI request. If authorization failed or timed out,
+ * then the original MCDI request should be completed with the
+ * result code from this event.
+ */
+ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
+
+ emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
+}
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ efsys_lock_state_t state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an outstanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /*
+ * Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ emip->emi_new_epoch = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
+ MC_CMD_GET_VERSION_OUT_LEN),
+ MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot access BOOT_STATUS */
+ status = EFX_MCDI_BOOT_PRIMARY;
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = 0;
+ goto out;
+ }
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ boolean_t v2_capable;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (flagsp != NULL)
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (rx_dpcpu_fw_idp != NULL)
+ *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+
+ if (tx_dpcpu_fw_idp != NULL)
+ *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ v2_capable = B_FALSE;
+ else
+ v2_capable = B_TRUE;
+
+ if (flags2p != NULL) {
+ *flags2p = (v2_capable) ?
+ MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
+ 0;
+ }
+
+ if (tso2ncp != NULL) {
+ *tso2ncp = (v2_capable) ?
+ MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
+ 0;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_do_reboot(
+ __in efx_nic_t *enp,
+ __in boolean_t after_assertion)
+{
+ uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot reboot the MC. */
+ goto out;
+ }
+
+ /* A successful reboot request returns EIO. */
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+out:
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_TRUE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ efx_rc_t rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with efx_mcdi_exit_assertion_handler() running on
+ * partner port(s) on the same NIC.
+ */
+ retry = 2;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute_quiet(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot clear assertions. */
+ goto out;
+ }
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
+ ? "illegal address trap"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers (r1 ... r31) */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1;
+ index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Internal routines for for specific MCDI requests.
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
+
+ /*
+ * Use DONT_CARE for the datapath firmware type to ensure that the
+ * driver can attach to an unprivileged function. The datapath firmware
+ * type to use is controlled by the 'sfboot' utility.
+ */
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ if (emip->emi_port == 1) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ } else if (emip->emi_port == 2) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ if (capabilitiesp != NULL) {
+ if (emip->emi_port == 1) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ } else if (emip->emi_port == 2) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail4;
+ }
+ }
+
+ if (board_typep != NULL) {
+ *board_typep = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (nevqp != NULL)
+ *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
+ if (nrxqp != NULL)
+ *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
+ if (ntxqp != NULL)
+ *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ (void) strncpy(encp->enc_phy_name,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
+ MIN(sizeof (encp->enc_phy_name) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
+ epp->ep_fixed_port_type =
+ (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_mcdi_mdio_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_mcdi_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported updates */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC changes */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported link control */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC spoofing */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_BIST
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/*
+ * Enter bist offline mode. This is a fw mode which puts the NIC into a state
+ * where memory BIST tests can be run and not much else can interfere or happen.
+ * A reboot is required to exit this mode.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
+
+ switch (type) {
+ case EFX_BIST_TYPE_PHY_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ case EFX_BIST_TYPE_MC_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_MC_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_SAT_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PORT_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_REG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_REG_BIST);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+
+/* Enable logging of some events (e.g. link state changes) */
+ __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef enum efx_stats_action_e {
+ EFX_STATS_CLEAR,
+ EFX_STATS_UPLOAD,
+ EFX_STATS_ENABLE_NOEVENTS,
+ EFX_STATS_ENABLE_EVENTS,
+ EFX_STATS_DISABLE,
+} efx_stats_action_t;
+
+static __checkReturn efx_rc_t
+efx_mcdi_mac_stats(
+ __in efx_nic_t *enp,
+ __in_opt efsys_mem_t *esmp,
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ int clear = (action == EFX_STATS_CLEAR);
+ int upload = (action == EFX_STATS_UPLOAD);
+ int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
+ int events = (action == EFX_STATS_ENABLE_EVENTS);
+ int disable = (action == EFX_STATS_DISABLE);
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, upload,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
+ MAC_STATS_IN_PERIODIC_NOEVENT, !events,
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
+
+ if (esmp != NULL) {
+ int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
+ EFX_MAC_STATS_SIZE);
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+ } else {
+ EFSYS_ASSERT(!upload && !enable && !events);
+ }
+
+ /*
+ * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
+ * as this may fail (and leave periodic DMA enabled) if the
+ * vadapter has already been deleted.
+ */
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
+ (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /* EF10: Expect ENOENT if no DMA queues are initialised */
+ if ((req.emr_rc != ENOENT) ||
+ (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
+ */
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
+ else if (events)
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
+ else
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+/*
+ * This function returns the pf and vf number of a function. If it is a pf the
+ * vf number is 0xffff. The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+ if (vfp != NULL)
+ *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (flagsp != NULL) {
+ if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flagsp = 0;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (implementedp != NULL) {
+ *implementedp =
+ MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+ }
+
+ if (enabledp != NULL) {
+ *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
new file mode 100644
index 00000000..21727713
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define EFX_MCDI_STATUS_SLEEP_US 10000
+
+struct efx_mcdi_req_s {
+ boolean_t emr_quiet;
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used*/
+ efx_rc_t emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+ /* Internals: low level transport details */
+ unsigned int emr_err_code;
+ unsigned int emr_err_arg;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ uint32_t emr_proxy_handle;
+#endif
+};
+
+typedef struct efx_mcdi_iface_s {
+ unsigned int emi_port;
+ unsigned int emi_max_version;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ boolean_t emi_new_epoch;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+ uint32_t emi_mc_reboot_status;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+extern __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep);
+
+extern void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status);
+#endif
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err);
+
+extern void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+
+#if EFSYS_OPT_BIST
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+
+#if EFSYS_OPT_LOOPBACK
+extern __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \
+ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \
+ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field)
+
+#define MCDI_CMD_DWORD_FIELD(_edp, _field) \
+ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field)
+
+#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \
+ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+
+typedef enum efx_mcdi_feature_id_e {
+ EFX_MCDI_FEATURE_FW_UPDATE = 0,
+ EFX_MCDI_FEATURE_LINK_CONTROL,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE,
+ EFX_MCDI_FEATURE_MAC_SPOOFING,
+ EFX_MCDI_FEATURE_NIDS
+} efx_mcdi_feature_id_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
new file mode 100644
index 00000000..c2f1e97e
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_mon_name[] = {
+ "",
+ "sfx90x0",
+ "sfx91x0",
+ "sfx92x0"
+};
+
+ const char *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_MCDI
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+ __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ switch (emp->em_type) {
+#if EFSYS_OPT_MON_MCDI
+ case EFX_MON_SFC90X0:
+ case EFX_MON_SFC91X0:
+ case EFX_MON_SFC92X0:
+ emop = &__efx_mon_mcdi_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ emp->em_emop = emop;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 5daa2a5725ba734b */
+static const char * const __mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+ "1_2va",
+ "vref",
+ "vaoe",
+ "aoe_temperature",
+ "psu_aoe_temperature",
+ "psu_temperature",
+ "fan0",
+ "fan1",
+ "fan2",
+ "fan3",
+ "fan4",
+ "vaoe_in",
+ "iaoe",
+ "iaoe_in",
+ "nic_power",
+ "0_9v",
+ "i0_9v",
+ "i1_2v",
+ "0_9v_adc",
+ "controller_temperature2",
+ "vreg_temperature",
+ "vreg_0_9v_temperature",
+ "vreg_1_2v_temperature",
+ "int_vptat",
+ "controller_internal_adc_temperature",
+ "ext_vptat",
+ "controller_external_adc_temperature",
+ "ambient_temperature",
+ "airflow",
+ "vdd08d_vss08d_csr",
+ "vdd08d_vss08d_csr_extadc",
+ "hotpoint_temperature",
+ "phy_power_switch_port0",
+ "phy_power_switch_port1",
+ "mum_vcc",
+ "0v9_a",
+ "i0v9_a",
+ "0v9_a_temp",
+ "0v9_b",
+ "i0v9_b",
+ "0v9_b_temp",
+ "ccom_avreg_1v2_supply",
+ "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v8_supply",
+ "ccom_avreg_1v8_supply_ext_adc",
+ "controller_master_vptat",
+ "controller_master_internal_temp",
+ "controller_master_vptat_ext_adc",
+ "controller_master_internal_temp_ext_adc",
+ "controller_slave_vptat",
+ "controller_slave_internal_temp",
+ "controller_slave_vptat_ext_adc",
+ "controller_slave_internal_temp_ext_adc",
+ "sodimm_vout",
+ "sodimm_0_temp",
+ "sodimm_1_temp",
+ "phy0_vcc",
+ "phy1_vcc",
+ "controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
new file mode 100644
index 00000000..76caa744
--- /dev/null
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp)
+{
+ if (venid == EFX_PCI_VENID_SFC) {
+ switch (devid) {
+#if EFSYS_OPT_SIENA
+ case EFX_PCI_DEVID_SIENA_F1_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Siena.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+
+ case EFX_PCI_DEVID_BETHPAGE:
+ case EFX_PCI_DEVID_SIENA:
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Huntington.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE:
+ case EFX_PCI_DEVID_GREENPORT:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE_VF:
+ case EFX_PCI_DEVID_GREENPORT_VF:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD_VF:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD */
+
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
+ default:
+ break;
+ }
+ }
+
+ *efp = EFX_FAMILY_INVALID;
+ return (ENOTSUP);
+}
+
+
+#define EFX_BIU_MAGIC0 0x01234567
+#define EFX_BIU_MAGIC1 0xfedcba98
+
+ __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nic_ops_t __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ NULL, /* eno_board_cfg */
+ NULL, /* eno_set_drv_limits */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+ NULL, /* eno_get_vi_pool */
+ NULL, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
+ ef10_nic_probe, /* eno_probe */
+ hunt_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static const efx_nic_ops_t __efx_nic_medford_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = &__efx_nic_siena_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_TX_SRC_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ enp->en_enop = &__efx_nic_hunt_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ enp->en_enop = &__efx_nic_medford_ops;
+ /*
+ * FW_ASSISTED_TSO omitted as Medford only supports firmware
+ * assisted TSO version 2, not the v1 scheme used on Huntington.
+ */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ enop = enp->en_enop;
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail1;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enop->eno_unprobe(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enop->eno_set_drv_limits != NULL) {
+ if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_bar_region == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = (enop->eno_get_bar_region)(enp,
+ region, offsetp, sizep)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_vi_pool != NULL) {
+ uint32_t vi_count = 0;
+
+ if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
+ goto fail1;
+
+ *evq_countp = vi_count;
+ *rxq_countp = vi_count;
+ *txq_countp = vi_count;
+ } else {
+ /* Use NIC limits as default value */
+ *evq_countp = encp->enc_evq_limit;
+ *rxq_countp = encp->enc_rxq_limit;
+ *txq_countp = encp->enc_txq_limit;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = EFX_FAMILY_INVALID;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON
+ * (which we do not reset here) must have been shut down or never
+ * initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip)
+{
+ uint16_t mc_fw_version[4];
+ efx_rc_t rc;
+
+ if (enfip == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
+ if (rc != 0)
+ goto fail2;
+
+ rc = efx_mcdi_get_capabilities(enp, NULL,
+ &enfip->enfi_rx_dpcpu_fw_id,
+ &enfip->enfi_tx_dpcpu_fw_id,
+ NULL, NULL);
+ if (rc == 0) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
+ } else if (rc == ENOTSUP) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_FALSE;
+ enfip->enfi_rx_dpcpu_fw_id = 0;
+ enfip->enfi_tx_dpcpu_fw_id = 0;
+ } else {
+ goto fail3;
+ }
+
+ memcpy(enfip->enfi_mc_fw_version, mc_fw_version, sizeof(mc_fw_version));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_LOOPBACK
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp)
+{
+ efx_qword_t mask;
+
+ EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
+ EFSYS_ASSERT(maskp != NULL);
+
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
+ EFX_LOOPBACK_XAUI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
+ EFX_LOOPBACK_XAUI_WS_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
+ EFX_LOOPBACK_XFI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
+ EFX_LOOPBACK_PMA_INT_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
+ EFX_LOOPBACK_SD_FEP2_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
+ EFX_LOOPBACK_SD_FEP1_5_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+
+ /* Build bitmask of possible loopback types */
+ EFX_ZERO_QWORD(mask);
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "MAC" grouping has historically been used by drivers to
+ * mean loopbacks supported by on-chip hardware. Keep that
+ * meaning here, and include on-chip PHY layer loopbacks.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "PHY" grouping has historically been used by drivers to
+ * mean loopbacks supported by off-chip hardware. Keep that
+ * meaning here.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
+ }
+
+ *maskp = mask;
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ efx_qword_t mask;
+ efx_qword_t modes;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
+ */
+ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
+
+ EFX_AND_QWORD(mask,
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
+ /* Response includes 40G loopback modes */
+ modes =
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
+ }
+
+ EFX_ZERO_QWORD(modes);
+ EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t lane_bandwidth;
+ uint32_t total_bandwidth;
+ efx_rc_t rc;
+
+ if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
+ !ISP2(pcie_link_width)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (pcie_link_gen) {
+ case EFX_PCIE_LINK_SPEED_GEN1:
+ /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 2000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN2:
+ /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 4000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN3:
+ /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
+ lane_bandwidth = 7877;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ total_bandwidth = lane_bandwidth * pcie_link_width;
+ *bandwidth_mbpsp = total_bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t bandwidth;
+ efx_pcie_link_performance_t result;
+ efx_rc_t rc;
+
+ if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
+ (pcie_link_width == 0) || (pcie_link_width == 32) ||
+ (pcie_link_gen == 0)) {
+ /*
+ * No usable info on what is required and/or in use. In virtual
+ * machines, sometimes the PCIe link width is reported as 0 or
+ * 32, or the speed as 0.
+ */
+ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
+ goto out;
+ }
+
+ /* Calculate the available bandwidth in megabits per second */
+ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
+ pcie_link_gen, &bandwidth);
+ if (rc != 0)
+ goto fail1;
+
+ if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
+ } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
+ /* The link provides enough bandwidth but not optimal latency */
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
+ } else {
+ result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
+ }
+
+out:
+ *resultp = result;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
new file mode 100644
index 00000000..6ee2a71d
--- /dev/null
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_type_to_partn, /* envo_type_to_partn */
+ siena_nvram_partn_size, /* envo_partn_size */
+ siena_nvram_partn_rw_start, /* envo_partn_rw_start */
+ siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_erase, /* envo_partn_erase */
+ siena_nvram_partn_write, /* envo_partn_write */
+ siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ siena_nvram_partn_get_version, /* envo_partn_get_version */
+ siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
+#if EFSYS_OPT_DIAG
+ ef10_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nvram_type_to_partn, /* envo_type_to_partn */
+ ef10_nvram_partn_size, /* envo_partn_size */
+ ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
+ ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_erase, /* envo_partn_erase */
+ ef10_nvram_partn_write, /* envo_partn_write */
+ ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ ef10_nvram_partn_get_version, /* envo_partn_get_version */
+ ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = &__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ *sizep = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_get_version(enp, partn,
+ subtypep, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ unsigned int offset = 0;
+ size_t size = 0;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = envop->envo_partn_erase(enp, partn, offset, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_finish(enp, partn)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_type_to_partn != NULL &&
+ ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0))
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+/*
+ * Internal MCDI request handling
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ unsigned int npartn;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (size < npartn * sizeof (uint32_t)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ *npartnp = npartn;
+
+ memcpy(data,
+ MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID),
+ (npartn * sizeof (uint32_t)));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_METADATA;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_SUBTYPE_VALID)) {
+ *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE);
+ } else {
+ *subtypep = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_VERSION_VALID)) {
+ version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_DESCRIPTION_VALID)) {
+ /* Return optional descrition string */
+ if ((descp != NULL) && (size > 0)) {
+ size_t desclen;
+
+ descp[0] = '\0';
+ desclen = (req.emr_out_length_used
+ - MC_CMD_NVRAM_METADATA_OUT_LEN(0));
+
+ EFSYS_ASSERT3U(desclen, <=,
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM);
+
+ if (size < desclen) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ memcpy(descp, MCDI_OUT2(req, char,
+ NVRAM_METADATA_OUT_DESCRIPTION),
+ desclen);
+
+ /* Ensure string is NUL terminated */
+ descp[desclen] = '\0';
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (sizep)
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ if (addressp)
+ *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR);
+
+ if (erase_sizep)
+ *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE);
+
+ if (write_sizep) {
+ *write_sizep =
+ (req.emr_out_length_used <
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN) ?
+ 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * MC_CMD_NVRAM_UPDATE_START_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_V2_IN_TYPE, partn);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ size);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both
+ * Sienna and EF10 based boards. However EF10 based boards support the use
+ * of this command with payloads up to the maximum MCDI V2 payload length.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
+ MCDI_CTL_SDU_LEN_MAX_V2)];
+ efx_rc_t rc;
+ size_t max_data_size;
+
+ max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
+ - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <,
+ enp->en_nic_cfg.enc_mcdi_max_payload_length);
+
+ if (size > max_data_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size);
+
+ memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ uint32_t result = 0; /* FIXME: use MC_CMD_NVRAM_VERIFY_RC_UNKNOWN */
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_REBOOT, reboot);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (encp->enc_fw_verified_nvram_update_required == B_FALSE) {
+ /* Report success if verified updates are not supported. */
+ result = MC_CMD_NVRAM_VERIFY_RC_SUCCESS;
+ } else {
+ /* Firmware-verified NVRAM updates are required */
+ if (req.emr_out_length_used <
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ result =
+ MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+
+ if (result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) {
+ /* Mandatory verification failed */
+ rc = EINVAL;
+ goto fail3;
+ }
+ }
+
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Always report verification result */
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
new file mode 100644
index 00000000..752cd52e
--- /dev/null
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+static const efx_phy_ops_t __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ NULL, /* epo_bist_enable_offline */
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
+ ef10_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ ef10_phy_reconfigure, /* epo_reconfigure */
+ ef10_phy_verify, /* epo_verify */
+ ef10_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ ef10_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ ef10_bist_enable_offline, /* epo_bist_enable_offline */
+ ef10_bist_start, /* epo_bist_start */
+ ef10_bist_poll, /* epo_bist_poll */
+ ef10_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_phy_ops_t *epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = &__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+ __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t old_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ old_mask = epp->ep_adv_cap_mask;
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_adv_cap_mask = old_mask;
+ /* Reconfigure for robustness */
+ if (epop->epo_reconfigure(enp) != 0) {
+ /*
+ * We may have an inconsistent view of our advertised speed
+ * capabilities.
+ */
+ EFSYS_ASSERT(0);
+ }
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock af9ffa24da3bc100 */
+static const char * const __efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (epop->epo_bist_enable_offline == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_enable_offline(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/drivers/net/sfc/base/efx_phy_ids.h b/drivers/net/sfc/base/efx_phy_ids.h
new file mode 100644
index 00000000..9d9a0f90
--- /dev/null
+++ b/drivers/net/sfc/base/efx_phy_ids.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_PHY_IDS_H
+#define _SYS_EFX_PHY_IDS_H
+
+#define EFX_PHY_NULL 0
+
+typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */
+ EFX_PHY_TXC43128 = 1,
+ EFX_PHY_SFX7101 = 3,
+ EFX_PHY_QT2022C2 = 4,
+ EFX_PHY_PM8358 = 6,
+ EFX_PHY_SFT9001A = 8,
+ EFX_PHY_QT2025C = 9,
+ EFX_PHY_SFT9001B = 10,
+ EFX_PHY_QLX111V = 12,
+ EFX_PHY_QT2025_KR = 17,
+ EFX_PHY_AEL3020 = 18,
+ EFX_PHY_XFI_FARMI = 19,
+} efx_phy_type_t;
+
+
+#endif /* _SYS_EFX_PHY_IDS_H */
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
new file mode 100644
index 00000000..518c2a22
--- /dev/null
+++ b/drivers/net/sfc/base/efx_port.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /* Pick up current phy capababilities */
+ efx_port_poll(enp, NULL);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+ EFSYS_ASSERT(!epp->ep_mac_stats_pending);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
+ loopback_type) == 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+ "XPORT",
+ "XGMII_WS",
+ "XAUI_WS",
+ "XAUI_WS_FAR",
+ "XAUI_WS_NEAR",
+ "GMII_WS",
+ "XFI_WS",
+ "XFI_WS_FAR",
+ "PHYXS_WS",
+ "PMA_INT",
+ "SD_NEAR",
+ "SD_FAR",
+ "PMA_INT_WS",
+ "SD_FEP2_WS",
+ "SD_FEP1_5_WS",
+ "SD_FEP_WS",
+ "SD_FES_WS",
+};
+
+ __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_LOOPBACK_NTYPES);
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/drivers/net/sfc/base/efx_regs.h b/drivers/net/sfc/base/efx_regs.h
new file mode 100644
index 00000000..a1a7f9da
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs.h
@@ -0,0 +1,3870 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+
+#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h
new file mode 100644
index 00000000..11a91848
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_ef10.h
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_TMR_REG_STEP 8192
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2A_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_VLAN_OP_LBN 32
+#define ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* Packed stream magic doorbell command */
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1
+
+#define ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8
+#define ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3
+#define ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0
+
+#define ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0
+#define ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8
+
+/* Packed stream RX packet prefix */
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+
+/*
+ * An extra flag for the packed stream mode,
+ * signalling the start of a new buffer
+ */
+#define ESF_DZ_RX_EV_ROTATE_LBN 53
+#define ESF_DZ_RX_EV_ROTATE_WIDTH 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h
new file mode 100644
index 00000000..66896fbb
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -0,0 +1,15690 @@
+/*-
+ * Copyright 2008-2013 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#ifdef WITH_MCDI_V2
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+#else
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1
+#endif
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+
+/* MC_CMD_FC_IN_CLOCK msgrequest */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+/* Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* Retrieve the clock value of the specified clock */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+/* Set the clock value of the specified clock */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queue id to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 16
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_BIND
+ * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
+ * info in respect to the binding protocol. This MCDI command is only available
+ * over a TLS secure connection between the TSAN and TSAC, and is not available
+ * to host software. Note- The messages definitions that do comprise this MCDI
+ * command deemed as provisional. This MCDI command has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_TSA_BIND 0x119
+#undef MC_CMD_0x119_PRIVILEGE_CTG
+
+#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
+#define MC_CMD_TSA_BIND_IN_LEN 4
+#define MC_CMD_TSA_BIND_IN_OP_OFST 0
+/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
+ * the network adapter. More specifically, TSAN ID equals the MAC address of
+ * the network adapter. TSAN ID is used as part of the TSAN authentication
+ * protocol. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
+/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
+ * of the binding procedure to authorize the binding of an adapter to a TSAID.
+ * Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
+/* enum: Opcode associated with the propagation of a private key that TSAN uses
+ * as part of post-binding authentication procedure. More specifically, TSAN
+ * uses this key for a signing operation. TSAC uses the counterpart public key
+ * to verify the signature. Note - The post-binding authentication occurs when
+ * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
+ * SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
+/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
+ * from the Nvram section.
+ */
+#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
+ * the nonce every time as part of the TSAN post-binding authentication
+ * procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
+ * connect to the TSAC. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16
+
+/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+/* This data blob contains the private key generated by the TSAC. TSAN uses
+ * this key for a signing operation. Note- This private key is used in
+ * conjunction with the post-binding TSAN authentication procedure that occurs
+ * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer
+ * to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+/* Rules engine type. Note- The rules engine type allows TSAC to further
+ * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
+ * proper action accordingly. As an example, TSAC uses the rules engine type to
+ * select the SF key that differs in the case of TSAN vs. NIC Emulator.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+/* enum: Hardware rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
+/* enum: Nic emulator rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2
+/* enum: SSFE. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6
+/* The signature data blob. The signature is computed against the message
+ * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC
+ * for more information also in respect to the private keys that are used to
+ * sign the message based on TSAN pre/post-binding authentication procedure.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238
+
+/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+/* The ticket represents the data blob construct that TSAN sends to TSAC as
+ * part of the binding protocol. From the TSAN perspective the ticket is an
+ * opaque construct. For more info refer to SF-115479-TC.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+
+/***********************************/
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE
+ * Manage the persistent NVRAM cache of security rules created with
+ * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated
+ * as rules are added or removed; the active ruleset must be explicitly
+ * committed to the cache. The cache may also be explicitly invalidated,
+ * without affecting the currently active ruleset. When the cache is valid, it
+ * will be loaded at power on or MC reboot, instead of the default ruleset.
+ * Rollback of the currently active ruleset to the cached version (when it is
+ * valid) is also supported. (Medford-only; for use by SolarSecure apps, not
+ * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
+ * provisional. It has not yet been used in any released code and may change
+ * during development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
+#undef MC_CMD_0x11a_PRIVILEGE_CTG
+
+#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
+/* the operation to perform */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+/* enum: reports the ruleset version that is cached in persistent storage but
+ * performs no other action
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+/* enum: rolls back the active state to the cached version. (May fail with
+ * ENOENT if there is no valid cached version.)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+/* enum: commits the active state to the persistent cache */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+/* enum: invalidates the persistent cache without affecting the active state */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num))
+/* indicates whether the persistent cache is valid (after completion of the
+ * requested operation in the case of rollback, commit, or invalidate)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+/* enum: persistent cache is invalid (the VERSION field will be empty in this
+ * case)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+/* enum: persistent cache is valid */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+/* cached ruleset version (after completion of the requested operation, in the
+ * case of rollback, commit, or invalidate) as an opaque hash value in the same
+ * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+/* The tag to be appended */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+/* The length of the data */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+/* The data to be contained in the TLV structure */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+/* Number of sectors found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+/* Number of bytes found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+/* Length of signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+/* Signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+/* TX datapath to which the Common Pool is connected to. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+/* enum: Extracts information from function */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+/* enum: Extracts information from function */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+/* Port or RX engine to associate the vFIFO egress */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+/* enum: Extracts information from common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+/* enum: Do not check the space available */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+/* Network priority of the vFIFO,if applicable */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+/* enum: Search for the lowest unused priority */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+/* Network priority of the vFIFO */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REKEY
+ * This request causes the NIC to generate a new per-NIC key and program it
+ * into the write-once memory. During the process all flash partitions that are
+ * protected with a CMAC are verified with the old per-NIC key and then signed
+ * with the new per-NIC key. If the NIC has already reached its rekey limit the
+ * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until
+ * completion or it may return 0 and continue processing, therefore the caller
+ * must poll at least once to confirm that the rekeying has completed. The POLL
+ * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running
+ * otherwise it will return the result of the last completed rekey operation,
+ * or 0 if there has not been a previous rekey.
+ */
+#define MC_CMD_REKEY 0x123
+#undef MC_CMD_0x123_PRIVILEGE_CTG
+
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REKEY_IN msgrequest */
+#define MC_CMD_REKEY_IN_LEN 4
+/* the type of operation requested */
+#define MC_CMD_REKEY_IN_OP_OFST 0
+/* enum: Start the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+/* enum: Poll for completion of the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
+
+/* MC_CMD_REKEY_OUT msgresponse */
+#define MC_CMD_REKEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_FUSES
+ * Change the security level of the adapter by setting bits in the write-once
+ * memory. The firmware maps each flag in the message to a set of one or more
+ * hardware-defined or software-defined bits and sets these bits in the write-
+ * once memory. For Medford the hardware-defined bits are defined in
+ * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0
+ * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of
+ * the required bits were not set.
+ */
+#define MC_CMD_SET_SECURITY_FUSES 0x126
+#undef MC_CMD_0x126_PRIVILEGE_CTG
+
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
+#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
+/* Flags specifying what type of security features are being set */
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+
+/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_regs_pci.h b/drivers/net/sfc/base/efx_regs_pci.h
new file mode 100644
index 00000000..f90f9565
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_pci.h
@@ -0,0 +1,2356 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+#define PCFE_DZ_INTPIN_INTD 4
+#define PCFE_DZ_INTPIN_INTC 3
+#define PCFE_DZ_INTPIN_INTB 2
+#define PCFE_DZ_INTPIN_INTA 1
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AZ_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AZ_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AZ_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AZ_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AZ_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AZ_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AZ_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_REG 0x00000072
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CTL_REG 0x00000078
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_STAT_REG 0x0000007a
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CAP_REG 0x0000007c
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CTL_REG 0x00000080
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_STAT_REG 0x00000082
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CAP2_REG(32bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CZ_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_SUPPORTED_LBN 18
+#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2
+#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11
+#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1
+#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CZ_DEV_CTL2_REG 0x00000098
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_ENABLE_LBN 13
+#define PCRF_DZ_OBFF_ENABLE_WIDTH 2
+#define PCRF_DZ_LTR_ENABLE_LBN 10
+#define PCRF_DZ_LTR_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8
+#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CAP2_REG(32bit):
+ * PCIe Link Capability 2
+ */
+
+#define PCR_DZ_LNK_CAP2_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LNK_SPEED_SUP_LBN 1
+#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CZ_LNK_CTL2_REG 0x000000a0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CZ_LNK_STAT2_REG 0x000000a2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_CZ_ARI_CAP_REG 0x00000154
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_CZ_ARI_CTL_REG 0x00000156
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000184
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1
+#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000164
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000188
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000018a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR0_PREF_LBN 3
+#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR0_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR0_IOM_LBN 0
+#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR2_PREF_LBN 3
+#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR2_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR2_IOM_LBN 0
+#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_RSVD_REG(16bit):
+ * Reserved register
+ */
+
+#define PCR_DZ_SRIOV_RSVD_REG 0x00000198
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_VF_RSVD_LBN 0
+#define PCRF_DZ_VF_RSVD_WIDTH 16
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
new file mode 100644
index 00000000..c8156341
--- /dev/null
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -0,0 +1,1315 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+static __checkReturn uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp);
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ siena_rx_qps_update_credits, /* erxo_qps_update_credits */
+ siena_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+ ef10_rx_init, /* erxo_init */
+ ef10_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ ef10_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
+ ef10_rx_scale_key_set, /* erxo_scale_key_set */
+ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ ef10_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ ef10_rx_qpost, /* erxo_qpost */
+ ef10_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ ef10_rx_qps_update_credits, /* erxo_qps_update_credits */
+ ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ ef10_rx_qflush, /* erxo_qflush */
+ ef10_rx_qenable, /* erxo_qenable */
+ ef10_rx_qcreate, /* erxo_qcreate */
+ ef10_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ erxop = &__efx_rx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ if ((rc = erxop->erxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_erxop = erxop;
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+ return (rc);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ erxop->erxo_fini(enp);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to insert RX hash value */
+ *supportp = enp->en_hash_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to support RSS */
+ *supportp = enp->en_rss_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_mode_set != NULL) {
+ if ((rc = erxop->erxo_scale_mode_set(enp, alg,
+ type, insert)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpost(erp, addrp, size, n, completed, added);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qps_update_credits(erp);
+}
+
+ __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ return (erxop->erxo_qps_packet_info(erp, buffer,
+ buffer_length, current_offset, lengthp,
+ next_offsetp, timestamp));
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush(erp, added, pushedp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ if ((rc = erxop->erxo_qflush(erp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qenable(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rxq_t *erp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = n - 1;
+ erp->er_esmp = esmp;
+
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id,
+ eep, erp)) != 0)
+ goto fail2;
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qdestroy(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
+ return (erxop->erxo_prefix_hash(enp, func, buffer));
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ /* The RSS key and indirection table are writable. */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+
+ /* Hardware can insert RX hash with/without RSS */
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ type & EFX_RX_HASH_IPV4,
+ type & EFX_RX_HASH_TCPIPV4);
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ type & EFX_RX_HASH_IPV6,
+ type & EFX_RX_HASH_TCPIPV6,
+ rc);
+ if (rc != 0)
+ goto fail1;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ efx_rc_t rc;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv4 hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv4 hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
+ goto done;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write Toeplitz IPv6 hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write Toeplitz IPv6 hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+/*
+ * Falcon/Siena pseudo-header
+ * --------------------------
+ *
+ * Receive packets are prefixed by an optional 16 byte pseudo-header.
+ * The pseudo-header is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
+ *
+ * where:
+ * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
+ * LL.LL LFSR hash (16-bit big-endian)
+ */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return ((buffer[12] << 24) |
+ (buffer[13] << 16) |
+ (buffer[14] << 8) |
+ buffer[15]);
+
+ case EFX_RX_HASHALG_LFSR:
+ return ((buffer[14] << 8) | buffer[15]);
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp, buffer, lengthp))
+
+ /* Not supported by Falcon/Siena hardware */
+ EFSYS_ASSERT(0);
+ return (ENOTSUP);
+}
+
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ /* All descriptors are pushed */
+ *pushedp = added;
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+}
+
+static uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+
+ return (NULL);
+}
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t jumbo;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
+ (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ jumbo = B_FALSE;
+ break;
+
+#if EFSYS_OPT_RX_SCATTER
+ case EFX_RXQ_TYPE_SCATTER:
+ if (enp->en_family < EFX_FAMILY_SIENA) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_7(oword,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c
new file mode 100644
index 00000000..5f4edea7
--- /dev/null
+++ b/drivers/net/sfc/base/efx_sram.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return (0);
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return;
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (siena_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
new file mode 100644
index 00000000..ceb29206
--- /dev/null
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -0,0 +1,1097 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp);
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp);
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
+ NULL, /* etxo_qpio_enable */
+ NULL, /* etxo_qpio_disable */
+ NULL, /* etxo_qpio_write */
+ NULL, /* etxo_qpio_post */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ NULL, /* etxo_qdesc_tso2_create */
+ NULL, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ siena_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static const efx_tx_ops_t __efx_tx_medford_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etxop = &__efx_tx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etxop = &__efx_tx_hunt_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etxop = &__efx_tx_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ if ((rc = etxop->etxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_etxop = etxop;
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+ return (rc);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ etxop->etxo_fini(enp);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_txq_t *etp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = n - 1;
+ etp->et_esmp = esmp;
+
+ /* Initial descriptor index may be modified by etxo_qcreate */
+ *addedp = 0;
+
+ if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
+ n, id, flags, eep, etp, addedp)) != 0)
+ goto fail2;
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ etxop->etxo_qdestroy(etp);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpost(etp, eb,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qpush(etp, added, pushed);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpace(etp, ns)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qflush(etp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qenable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if (etxop->etxo_qpio_enable == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if ((rc = etxop->etxo_qpio_enable(etp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_disable != NULL)
+ etxop->etxo_qpio_disable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_write != NULL) {
+ if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length,
+ pio_buf_offset)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_post != NULL) {
+ if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed,
+ addedp)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qdesc_post(etp, ed,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
+
+ etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp);
+}
+
+ void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
+
+ etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp);
+}
+
+ void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
+
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+}
+
+ void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
+
+ etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qstats_update(etp, stat);
+}
+#endif
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ return (0);
+}
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, pushed & etp->et_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+#define EFX_MAX_PACE_VALUE 20
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ unsigned int pace_val;
+ unsigned int timer_period;
+ efx_rc_t rc;
+
+ if (ns == 0) {
+ pace_val = 0;
+ } else {
+ /*
+ * The pace_val to write into the table is s.t
+ * ns <= timer_period * (2 ^ pace_val)
+ */
+ timer_period = 104 / encp->enc_clk_mult;
+ for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) {
+ if ((timer_period << pace_val) >= ns)
+ break;
+ }
+ }
+ if (pace_val > EFX_MAX_PACE_VALUE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Update the pacing table */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index,
+ &oword, B_TRUE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ efx_tx_qpace(etp, 0);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
+ (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+
+ EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs));
+ EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0;
+ (1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the new descriptor queue */
+ *addedp = 0;
+
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ FSF_AZ_TX_KER_CONT, eop ? 0 : 1,
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size,
+ FSF_AZ_TX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addr & 0xffffffff),
+ FSF_AZ_TX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addr >> 32));
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */
+static const char * const __efx_tx_qstat_name[] = {
+ "post",
+ "post_pio",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h
new file mode 100644
index 00000000..b8ee14a6
--- /dev/null
+++ b/drivers/net/sfc/base/efx_types.h
@@ -0,0 +1,1647 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+ * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
+ * support field widths larger than 32 bits.
+ */
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_SSE2_M128
+ __m128i eo_u128[1];
+#endif
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> (_low - _min)) : \
+ ((_element) << (_min - _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint64_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << (_low - _min)) : \
+ (((uint64_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << (_low - _min)) : \
+ (((uint32_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_OWORD_BIT64(_oword, _bit) \
+ (((_oword).eo_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u64[1] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64)))))
+
+#define EFX_TEST_OWORD_BIT32(_oword, _bit) \
+ (((_oword).eo_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \
+ ((_oword).eo_u32[2] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \
+ ((_oword).eo_u32[3] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96)))))
+
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_QWORD_BIT64(_qword, _bit) \
+ (((_qword).eq_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0)
+
+#define EFX_TEST_QWORD_BIT32(_qword, _bit) \
+ (((_qword).eq_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_qword).eq_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))))
+
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_DWORD_BIT(_dword, _bit) \
+ (((_dword).ed_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_WORD_BIT(_word, _bit) \
+ (((_word).ew_u16[0] & \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_BYTE_BIT(_byte, _bit) \
+ (((_byte).eb_u8[0] & \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_AND_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_AND_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c
new file mode 100644
index 00000000..1e47df2c
--- /dev/null
+++ b/drivers/net/sfc/base/efx_vpd.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_SIENA
+
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
+ ef10_vpd_init, /* evpdo_init */
+ ef10_vpd_size, /* evpdo_size */
+ ef10_vpd_read, /* evpdo_read */
+ ef10_vpd_verify, /* evpdo_verify */
+ ef10_vpd_reinit, /* evpdo_reinit */
+ ef10_vpd_get, /* evpdo_get */
+ ef10_vpd_set, /* evpdo_set */
+ ef10_vpd_next, /* evpdo_next */
+ ef10_vpd_write, /* evpdo_write */
+ ef10_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = &__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ efx_rc_t rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ efx_rc_t rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ efx_rc_t rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint8_t __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ efx_rc_t rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ efx_rc_t rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+
+ if (tag == EFX_VPD_END) {
+ keyword = 0;
+ paylen = 0;
+ index = 0;
+ break;
+ }
+
+ if (tag == EFX_VPD_ID) {
+ if (index++ == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ keyword = 0;
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index++ == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ *contp = index;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ efx_rc_t rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/hunt_impl.h b/drivers/net/sfc/base/hunt_impl.h
new file mode 100644
index 00000000..0e0c870f
--- /dev/null
+++ b/drivers/net/sfc/base/hunt_impl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define _SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
+
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HUNT_IMPL_H */
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
new file mode 100644
index 00000000..addbf1c5
--- /dev/null
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON
+
+#include "ef10_tlv_layout.h"
+
+static __checkReturn efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t max_port_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * On Huntington, the firmware may not give us the current port mode, so
+ * we need to go by the set of available port modes and assume the most
+ * capable mode is in use.
+ */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode info available */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ /*
+ * This needs the full PCIe bandwidth (and could use
+ * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+ */
+ if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+ EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
+ goto fail1;
+ } else {
+ if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ max_port_mode = TLV_PORT_MODE_40G;
+ } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ } else {
+ /* Assume two 10G ports */
+ max_port_mode = TLV_PORT_MODE_10G_10G;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+ &bandwidth)) != 0)
+ goto fail2;
+ }
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t flags;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Huntington */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+ /*
+ * If the bug35388 workaround is enabled, then use an indirect access
+ * method to avoid unsafe EVQ writes.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug35388_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug35388_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /*
+ * If the bug41750 workaround is enabled, then do not test interrupts,
+ * as the test will fail (seen with Greenport controllers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+ NULL);
+ if (rc == 0) {
+ encp->enc_bug41750_workaround = B_TRUE;
+ } else if (rc == EACCES) {
+ /* Assume a controller with 40G ports needs the workaround. */
+ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ encp->enc_bug41750_workaround = B_TRUE;
+ else
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else {
+ goto fail9;
+ }
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /* Interrupt testing does not work for VFs. See bug50084. */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /*
+ * If the bug26807 workaround is enabled, then firmware has enabled
+ * support for chained multicast filters. Firmware will reset (FLR)
+ * functions which have filters in the hardware filter table when the
+ * workaround is enabled/disabled.
+ *
+ * We must recheck if the workaround is enabled after inserting the
+ * first hardware filter, in case it has been changed since this check.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+ B_TRUE, &flags);
+ if (rc == 0) {
+ encp->enc_bug26807_workaround = B_TRUE;
+ if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+ /*
+ * Other functions had installed filters before the
+ * workaround was enabled, and they have been reset
+ * by firmware.
+ */
+ EFSYS_PROBE(bug26807_workaround_flr_done);
+ /* FIXME: bump MC warm boot count ? */
+ }
+ } else if (rc == EACCES) {
+ /*
+ * Unprivileged functions cannot enable the workaround in older
+ * firmware.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail10;
+ }
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail11;
+
+ /*
+ * The Huntington timer quantum is 1536 sysclk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ if (encp->enc_bug35388_workaround) {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+ } else {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+ }
+
+ encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail12;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The workaround for bug35388 uses the top bit of transmit queue
+ * descriptor writes, preventing the use of 4096 descriptor TXQs.
+ */
+ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail13;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail14;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+ goto fail15;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+ /* All Huntington devices have a PCIe Gen3, 8 lane connector */
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
new file mode 100644
index 00000000..c5360c31
--- /dev/null
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+#define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe)
+#define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd)
+#define MCDI_MON_PAGE_SIZE 0x20
+
+/* Bitmasks of valid port(s) for each sensor */
+#define MCDI_MON_PORT_NONE (0x00)
+#define MCDI_MON_PORT_P1 (0x01)
+#define MCDI_MON_PORT_P2 (0x02)
+#define MCDI_MON_PORT_P3 (0x04)
+#define MCDI_MON_PORT_P4 (0x08)
+#define MCDI_MON_PORT_Px (0xFFFF)
+
+/* Get port mask from one-based MCDI port number */
+#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1))
+
+/* Entry for MCDI sensor in sensor map */
+#define STAT(portmask, stat) \
+ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
+
+/* Entry for sensor next page flag in sensor map */
+#define STAT_NEXT_PAGE() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
+
+/* Placeholder for gaps in the array */
+#define STAT_NO_SENSOR() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
+
+/* Map from MC sensors to monitor statistics */
+static const struct mcdi_sensor_map_s {
+ uint16_t msm_port_mask;
+ uint16_t msm_stat;
+} mcdi_sensor_map[] = {
+ /* Sensor page 0 MC_CMD_SENSOR_xxx */
+ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
+ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
+ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
+ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
+ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
+ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
+ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
+ STAT(Px, 1V), /* 0x07 IN_1V0 */
+ STAT(Px, 1_2V), /* 0x08 IN_1V2 */
+ STAT(Px, 1_8V), /* 0x09 IN_1V8 */
+ STAT(Px, 2_5V), /* 0x0a IN_2V5 */
+ STAT(Px, 3_3V), /* 0x0b IN_3V3 */
+ STAT(Px, 12V), /* 0x0c IN_12V0 */
+ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
+ STAT(Px, VREF), /* 0x0e IN_VREF */
+ STAT(Px, VAOE), /* 0x0f OUT_VAOE */
+ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
+ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
+ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
+ STAT(Px, FAN0), /* 0x13 FAN_0 */
+ STAT(Px, FAN1), /* 0x14 FAN_1 */
+ STAT(Px, FAN2), /* 0x15 FAN_2 */
+ STAT(Px, FAN3), /* 0x16 FAN_3 */
+ STAT(Px, FAN4), /* 0x17 FAN_4 */
+ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
+ STAT(Px, IAOE), /* 0x19 OUT_IAOE */
+ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
+ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
+ STAT(Px, 0_9V), /* 0x1c IN_0V9 */
+ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
+ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
+ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
+
+ /* Sensor page 1 MC_CMD_SENSOR_xxx */
+ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
+ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
+ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
+ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
+ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
+ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
+ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
+ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
+ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
+ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
+ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
+ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
+ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
+ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
+ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
+ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
+ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
+ STAT(Px, 0V9_A), /* 0x31 0V9_A */
+ STAT(Px, I0V9_A), /* 0x32 I0V9_A */
+ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
+ STAT(Px, 0V9_B), /* 0x34 0V9_B */
+ STAT(Px, I0V9_B), /* 0x35 I0V9_B */
+ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
+ /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
+ /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x3b (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3c (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3d (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3e (no sensor) */
+ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
+
+ /* Sensor page 2 MC_CMD_SENSOR_xxx */
+ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
+ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
+ /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
+ /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x48 (no sensor) */
+ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
+ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
+ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
+ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
+ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
+ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+};
+
+#define MCDI_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+static void
+mcdi_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in_bcount(sensor_mask_size) uint32_t *sensor_mask,
+ __in size_t sensor_mask_size,
+ __in_opt efsys_mem_t *esmp,
+ __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp,
+ __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ size_t sensor_max;
+ uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t idx = 0;
+ uint32_t page = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ MCDI_STATIC_SENSOR_ASSERT(OK);
+ MCDI_STATIC_SENSOR_ASSERT(WARNING);
+ MCDI_STATIC_SENSOR_ASSERT(FATAL);
+ MCDI_STATIC_SENSOR_ASSERT(BROKEN);
+ MCDI_STATIC_SENSOR_ASSERT(NO_READING);
+
+ EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
+ EFX_MON_MASK_ELEMENT_SIZE);
+ sensor_max =
+ MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ memset(stat_mask, 0, sizeof (stat_mask));
+
+ /*
+ * The MCDI sensor readings in the DMA buffer are a packed array of
+ * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for
+ * supported sensors (bit set in sensor_mask). The sensor_mask and
+ * sensor readings do not include entries for the per-page NEXT_PAGE
+ * flag.
+ *
+ * sensor_mask may legitimately contain MCDI sensors that the driver
+ * does not understand.
+ */
+ for (sensor = 0; sensor < sensor_max; ++sensor) {
+ efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+
+ if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
+ EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ page++;
+ continue;
+ }
+ if (~(sensor_mask[page]) & (1U << sensor))
+ continue;
+ idx++;
+
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ continue;
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ /*
+ * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic
+ * identifiers from efx_mon_stat_t (without NEXT_PAGE bits).
+ *
+ * If there is an entry in the MCDI sensor to monitor statistic
+ * map then the sensor reading is used for the value of the
+ * monitor statistic.
+ */
+ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |=
+ (1U << (id % EFX_MON_MASK_ELEMENT_SIZE));
+
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+
+ /* Get MCDI sensor reading from DMA buffer */
+ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword);
+
+ /* Update EFX monitor stat from MCDI sensor reading */
+ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (stat_maskp != NULL) {
+ memcpy(stat_maskp, stat_mask, sizeof (stat_mask));
+ }
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint16_t port_mask;
+ uint16_t sensor;
+ uint16_t state;
+ uint16_t value;
+ efx_mon_stat_t id;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this MCDI sensor */
+ EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+
+ /* But we don't have to understand it */
+ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ id = mcdi_sensor_map[sensor].msm_stat;
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = value;
+ valuep->emsv_state = state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_read_sensors(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint32_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ uint32_t addr_lo, addr_hi;
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN;
+
+ addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32);
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ return (req.emr_rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_npages(
+ __in efx_nic_t *enp,
+ __out uint32_t *npagesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ int page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(npagesp != NULL);
+
+ page = 0;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ *npagesp = page;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info(
+ __in efx_nic_t *enp,
+ __out_ecount(npages) uint32_t *sensor_maskp,
+ __in size_t npages)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ uint32_t page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(sensor_maskp != NULL);
+
+ for (page = 0; page < npages; page++) {
+ uint32_t mask;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+
+ if ((page != (npages - 1)) &&
+ ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ sensor_maskp[page] = mask;
+ }
+
+ if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size = encp->enc_mon_stat_dma_buf_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0)
+ goto fail1;
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size);
+
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ esmp, NULL, values);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t npages;
+ efx_rc_t rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+ break;
+#endif
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ encp->enc_mon_type = EFX_MON_SFC91X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get mc sensor mask size */
+ npages = 0;
+ if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0)
+ goto fail2;
+
+ encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE;
+ encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t);
+
+ /* Allocate mc sensor mask */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+ if (encp->enc_mcdi_sensor_maskp == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Read mc sensor mask */
+ if ((rc = efx_mcdi_sensor_info(enp,
+ encp->enc_mcdi_sensor_maskp,
+ npages)) != 0)
+ goto fail4;
+
+ /* Build monitor statistics mask */
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ NULL, encp->enc_mon_stat_mask, NULL);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ if (encp->enc_mcdi_sensor_maskp != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+ }
+}
+
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/drivers/net/sfc/base/mcdi_mon.h b/drivers/net/sfc/base/mcdi_mon.h
new file mode 100644
index 00000000..e07b5280
--- /dev/null
+++ b/drivers/net/sfc/base/mcdi_mon.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MCDI_MON_H
+#define _SYS_MCDI_MON_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp);
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp);
+
+
+extern __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MCDI_MON_H */
diff --git a/drivers/net/sfc/base/medford_impl.h b/drivers/net/sfc/base/medford_impl.h
new file mode 100644
index 00000000..de2f5cf0
--- /dev/null
+++ b/drivers/net/sfc/base/medford_impl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MEDFORD_IMPL_H
+#define _SYS_MEDFORD_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ *
+ * FIXME: Is this the same on Medford as Huntington?
+ */
+#define MEDFORD_RX_WPTR_ALIGN 8
+
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD_PIOBUF_NBUFS (16)
+#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD_IMPL_H */
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
new file mode 100644
index 00000000..07afac1e
--- /dev/null
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+medford_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * FIXME: Likely to be incomplete and incorrect.
+ * Parts of this should be shared with Huntington.
+ */
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /* Disable static config checking for Medford NICs, ONLY
+ * for manufacturing test and setup at the factory, to
+ * allow the static config to be installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Medford */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs. See bug50084.
+ * FIXME: Does this still apply to Medford?
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail9;
+
+ /*
+ * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail10;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail11;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail12;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail13;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Medford stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail14;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h
new file mode 100644
index 00000000..e2700554
--- /dev/null
+++ b/drivers/net/sfc/base/siena_flash.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+
+/*Structures supporting an arbitrary number of binary blobs in the flash image
+ intended to house code and tables for the satellite cpus*/
+/*thanks to random.org for:*/
+#define BLOBS_HEADER_MAGIC (0xBDA3BBD4)
+#define BLOB_HEADER_MAGIC (0xA1478A91)
+
+typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t no_of_blobs;
+} blobs_hdr_t;
+
+typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t cpu_type;
+ efx_dword_t build_variant;
+ efx_dword_t offset;
+ efx_dword_t length;
+ efx_dword_t checksum;
+} blob_hdr_t;
+
+#define BLOB_CPU_TYPE_TXDI_TEXT (0)
+#define BLOB_CPU_TYPE_RXDI_TEXT (1)
+#define BLOB_CPU_TYPE_TXDP_TEXT (2)
+#define BLOB_CPU_TYPE_RXDP_TEXT (3)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7)
+#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8)
+#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9)
+#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10)
+#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11)
+#define BLOB_CPU_TYPE_RXDI_VTBL0 (12)
+#define BLOB_CPU_TYPE_TXDI_VTBL0 (13)
+#define BLOB_CPU_TYPE_RXDI_VTBL1 (14)
+#define BLOB_CPU_TYPE_TXDI_VTBL1 (15)
+#define BLOB_CPU_TYPE_DUMPSPEC (32)
+#define BLOB_CPU_TYPE_MC_XIP (33)
+
+#define BLOB_CPU_TYPE_INVALID (31)
+
+/*
+ * The upper four bits of the CPU type field specify the compression
+ * algorithm used for this blob.
+ */
+#define BLOB_COMPRESSION_MASK (0xf0000000)
+#define BLOB_CPU_TYPE_MASK (0x0fffffff)
+
+#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */
+#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_byte_t mcfw_subtype;
+ efx_byte_t generation; /* Valid for medford, SBZ for earlier chips */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
+ efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
+ efx_word_t xpm_sector; /* The sector that contains the key, or 0xffff if unsigned (medford) SBZ (earlier) */
+ efx_dword_t reserved_c[7]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_BOOT_HDR_PADDING \
+ (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t))
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_word_t calibrated_vref; /* Vref as measured during production */
+ efx_word_t adc_vref; /* Vref as read by ADC */
+ efx_dword_t reserved2[1]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+/* This prefixes a valid XIP partition */
+#define XIP_PARTITION_MAGIC (0x51DEC0DE)
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */
+ union {
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off;/* infoblk offset */
+ efx_word_t infoblk1_off;/* infoblk offset */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v1;
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk_off;/* infoblk start offset */
+ efx_word_t infoblk_count;/* infoblk count */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v2;
+ } data;
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/drivers/net/sfc/base/siena_impl.h b/drivers/net/sfc/base/siena_impl.h
new file mode 100644
index 00000000..ea6de983
--- /dev/null
+++ b/drivers/net/sfc/base/siena_impl.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+extern __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
new file mode 100644
index 00000000..29bbff8a
--- /dev/null
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_oword_t multicast_hash[2];
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN),
+ MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ unsigned int fcntl;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash */
+
+ if (epp->ep_all_mulcst) {
+ /* A hash matching all multicast is all 1s */
+ EFX_SET_OWORD(multicast_hash[0]);
+ EFX_SET_OWORD(multicast_hash[1]);
+ } else if (epp->ep_mulcst) {
+ /* Use the hash set by the multicast list */
+ multicast_hash[0] = epp->ep_multicst_hash[0];
+ multicast_hash[1] = epp->ep_multicst_hash[1];
+ } else {
+ /* A hash matching no traffic is simply 0 */
+ EFX_ZERO_OWORD(multicast_hash[0]);
+ EFX_ZERO_OWORD(multicast_hash[1]);
+ }
+
+ /*
+ * Broadcast packets go through the multicast hash filter.
+ * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask (bit 0x7f in the
+ * second octword).
+ */
+ if (epp->ep_brdcst) {
+ /*
+ * NOTE: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint on Illumos. No good way to fix this without
+ * explicit coding the individual word/bit setting.
+ * So just suppress lint for this one line.
+ */
+ /* LINTED */
+ EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN;
+
+ memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ multicast_hash, sizeof (multicast_hash));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range siena_stats[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ /* EFX_MAC_RX_ERRORS is not supported */
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_TX_EX_DEF_PKTS },
+ };
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c
new file mode 100644
index 00000000..63c29fcb
--- /dev/null
+++ b/drivers/net/sfc/base/siena_mcdi.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI
+
+#define SIENA_MCDI_PDU(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_PDU_OFST >> 2 \
+ : MC_SMEM_P1_PDU_OFST >> 2)
+
+#define SIENA_MCDI_DOORBELL(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \
+ : MC_SMEM_P1_DOORBELL_OFST >> 2)
+
+#define SIENA_MCDI_STATUS(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_STATUS_OFST >> 2 \
+ : MC_SMEM_P1_STATUS_OFST >> 2)
+
+
+ void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+ dbr = SIENA_MCDI_DOORBELL(emip);
+
+ /* Write the header */
+ EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t));
+ dword = *(efx_dword_t *)hdrp;
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+ efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+#if 1
+ /*
+ * XXX Bug 25922, bug 26099: This function is not being used
+ * properly. Until its callers are fixed, it should always
+ * return 0.
+ */
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+#else
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = SIENA_MCDI_STATUS(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+#endif
+}
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr;
+ unsigned int pdur;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int pdur;
+ unsigned int pos;
+ efx_dword_t data;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + ((offset + pos) >> 2), &data, B_FALSE);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(mtp))
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Siena BootROM and firmware only support MCDIv1 */
+ emip->emi_max_version = 1;
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) siena_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ *supportedp = B_TRUE;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Default timeout for MCDI command processing. */
+#define SIENA_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+
+ void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ _NOTE(ARGUNUSED(enp, emrp))
+
+ *timeoutp = SIENA_MCDI_CMD_TIMEOUT_US;
+}
+
+
+#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
new file mode 100644
index 00000000..129b854b
--- /dev/null
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#include "mcdi_mon.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+static __checkReturn efx_rc_t
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+static __checkReturn efx_rc_t
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6];
+ efx_dword_t capabilities;
+ uint32_t board_type;
+ uint32_t nevq, nrxq, ntxq;
+ efx_rc_t rc;
+
+ /* External port identifier using one-based port numbering */
+ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
+
+ /* Board configuration */
+ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type,
+ &capabilities, mac_addr)) != 0)
+ goto fail1;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ encp->enc_board_type = board_type;
+
+ /*
+ * There is no possibility to determine the number of PFs on Siena
+ * by issuing MCDI request, and it is not an easy task to find the
+ * value based on the board type, so 'enc_hw_pf_count' is set to 1
+ */
+ encp->enc_hw_pf_count = 1;
+
+ /* Additional capabilities */
+ encp->enc_clk_mult = 1;
+ if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) {
+ enp->en_features |= EFX_FEATURE_TURBO;
+
+ if (EFX_DWORD_FIELD(capabilities,
+ MC_CMD_CAPABILITIES_TURBO_ACTIVE)) {
+ encp->enc_clk_mult = 2;
+ }
+ }
+
+ encp->enc_evq_timer_quantum_ns =
+ EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult;
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* When hash header insertion is enabled, Siena inserts 16 bytes */
+ encp->enc_rx_prefix_size = 16;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 1;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = 1;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
+ /* Fragments must not span 4k boundaries. */
+ encp->enc_tx_dma_desc_boundary = 4096;
+
+ /* Resource limits */
+ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq);
+ if (rc != 0) {
+ if (rc != ENOTSUP)
+ goto fail2;
+
+ nevq = 1024;
+ nrxq = EFX_RXQ_LIMIT_TARGET;
+ ntxq = EFX_TXQ_LIMIT_TARGET;
+ }
+ encp->enc_evq_limit = nevq;
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq);
+
+ encp->enc_txq_max_ndescs = 4096;
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) -
+ (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ encp->enc_rx_packed_stream_supported = B_FALSE;
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
+ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
+
+ encp->enc_fw_verified_nvram_update_required = B_FALSE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail1;
+
+#if EFSYS_OPT_PHY_STATS
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Test BIU */
+ if ((rc = efx_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ /* Read clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail2;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail3;
+
+ /* Wrestle control from the BMC */
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail4;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail5;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail7;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail8;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail9;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail11;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0)
+ goto fail12;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail12:
+ EFSYS_PROBE(fail12);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
+ * for backwards compatibility with PORT_RESET_IN_LEN.
+ */
+ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static efx_register_set_t __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static efx_register_set_t __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ efx_rc_t rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
new file mode 100644
index 00000000..af4cf172
--- /dev/null
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}
+};
+
+ __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_parttbl_entry_t *entry = &siena_parttbl[i];
+
+ if (entry->port == emip->emi_port && entry->nvtype == type) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_parttbl_entry_t *entry;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ entry = &siena_parttbl[i];
+
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) {
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ efx_word_t *fw_list;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST +
+ (partn + 1) * sizeof (efx_word_t)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ uint32_t dcfg_partn;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail2;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_mc_fw_version_t *verp;
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ entry = &siena_parttbl[i];
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ verp = &dcfg->fw_version[partn];
+ temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ memcpy(version, temp, sizeof (temp));
+
+done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_unlock(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_mc_fw_version_t *fwverp;
+ uint32_t dcfg_partn;
+ size_t dcfg_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ efx_rc_t rc;
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0)
+ goto fail1;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ fwverp = &dcfg->fw_version[partn];
+ EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < partn + 1) {
+ nitems = partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
new file mode 100644
index 00000000..b90ccabc
--- /dev/null
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_ID_LED_OUT_LEN),
+ MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN))];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_BIST_TYPE_PHY_CABLE_SHORT ||
+ type == EFX_BIST_TYPE_PHY_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_sram.c b/drivers/net/sfc/base/siena_sram.c
new file mode 100644
index 00000000..572c2e9a
--- /dev/null
+++ b/drivers/net/sfc/base/siena_sram.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit *
+ EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c
new file mode 100644
index 00000000..4fb2e426
--- /dev/null
+++ b/drivers/net/sfc/base/siena_vpd.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail6;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail8;
+ }
+ memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ caddr_t svpd = NULL;
+ unsigned int partn;
+ size_t size = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail3;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail4;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset);
+ memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
new file mode 100644
index 00000000..0405d02b
--- /dev/null
+++ b/drivers/net/sfc/efsys.h
@@ -0,0 +1,780 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_COMMON_EFSYS_H
+#define _SFC_COMMON_EFSYS_H
+
+#include <stdbool.h>
+
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "sfc_debug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 1
+#define EFSYS_HAS_SSE2_M128 1
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#else
+#error "Cannot determine system endianness"
+#endif
+#include "efx_types.h"
+
+
+#ifndef _NOTE
+#define _NOTE(s)
+#endif
+
+typedef bool boolean_t;
+
+#ifndef B_FALSE
+#define B_FALSE false
+#endif
+#ifndef B_TRUE
+#define B_TRUE true
+#endif
+
+/*
+ * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
+ * expression allowed only inside a function, but MAX() is used as
+ * a number of elements in array.
+ */
+#ifndef MAX
+#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
+#endif
+#ifndef MIN
+#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
+#endif
+
+/* There are macros for alignment in DPDK, but we need to make a proper
+ * correspondence here, if we want to re-use them at all
+ */
+#ifndef IS_P2ALIGNED
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#endif
+
+#ifndef P2ROUNDUP
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#endif
+
+#ifndef P2ALIGN
+#define P2ALIGN(_x, _a) ((_x) & -(_a))
+#endif
+
+#ifndef IS2P
+#define ISP2(x) rte_is_power_of_2(x)
+#endif
+
+#define ENOTACTIVE ENOTCONN
+
+static inline void
+prefetch_read_many(const volatile void *addr)
+{
+ rte_prefetch0(addr);
+}
+
+static inline void
+prefetch_read_once(const volatile void *addr)
+{
+ rte_prefetch_non_temporal(addr);
+}
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
+#define EFSYS_OPT_SIENA 0
+/* Enable SFN7xxx support */
+#define EFSYS_OPT_HUNTINGTON 1
+/* Enable SFN8xxx support */
+#define EFSYS_OPT_MEDFORD 1
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+/* MCDI is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_MCDI 1
+#define EFSYS_OPT_MCDI_LOGGING 1
+#define EFSYS_OPT_MCDI_PROXY_AUTH 1
+
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 0
+
+#define EFSYS_OPT_MON_MCDI 0
+#define EFSYS_OPT_MON_STATS 0
+
+#define EFSYS_OPT_PHY_STATS 0
+#define EFSYS_OPT_BIST 0
+#define EFSYS_OPT_PHY_LED_CONTROL 0
+#define EFSYS_OPT_PHY_FLAGS 0
+
+#define EFSYS_OPT_VPD 0
+#define EFSYS_OPT_NVRAM 0
+#define EFSYS_OPT_BOOTCFG 0
+
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 0
+/* Filters support is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_FILTER 1
+#define EFSYS_OPT_RX_SCATTER 0
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 0
+
+#define EFSYS_OPT_LICENSING 0
+
+#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
+
+#define EFSYS_OPT_RX_PACKED_STREAM 0
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+
+#define EFSYS_PROBE(_name) \
+ do { } while (0)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ do { } while (0)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ do { } while (0)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ do { } while (0)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ do { } while (0)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ do { } while (0)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ do { } while (0)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ do { } while (0)
+
+
+/* DMA */
+
+typedef phys_addr_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ const struct rte_memzone *esm_mz;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esm_base;
+ efsys_dma_addr_t esm_addr;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ do { \
+ (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ (_edp)->ed_u32[0] = _addr[0]; \
+ \
+ EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ (_eqp)->eq_u64[0] = _addr[0]; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ _addr[0] = (_edp)->ed_u32[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ _addr[0] = (_eqp)->eq_u64[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+#define EFSYS_MEM_IS_NULL(_esmp) \
+ ((_esmp)->esm_base == NULL)
+
+#define EFSYS_MEM_PREFETCH(_esmp, _offset) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ \
+ rte_prefetch0(_base + (_offset)); \
+ } while (0)
+
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ rte_spinlock_t esb_lock;
+ int esb_rid;
+ struct rte_pci_device *esb_dev;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esb_base;
+} efsys_bar_t;
+
+#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
+ do { \
+ rte_spinlock_init(&(_esbp)->esb_lock); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
+#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
+#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
+ \
+ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ rte_rmb(); \
+ /* There is no rte_read128_relaxed() yet */ \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
+ rte_wmb(); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/*
+ * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
+ * (required by PIO hardware).
+ *
+ * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
+ * write-combined memory mapped to user-land, so just abort if used.
+ */
+#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ rte_panic("Write-combined BAR access not supported"); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ /* There is no rte_write128_relaxed() yet */ \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* Use the standard octo-word write for doorbell writes */
+#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
+ do { \
+ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ do { \
+ rte_delay_us(_us); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+#define EFSYS_MEM_READ_BARRIER() rte_rmb()
+#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
+
+/* DMA SYNC */
+
+/*
+ * DPDK does not provide any DMA syncing API, and no PMD drivers
+ * have any traces of explicit DMA syncing.
+ * DMA mapping is assumed to be coherent.
+ */
+
+#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
+
+/* Just avoid store and compiler (impliciltly) reordering */
+#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
+
+/* TIMESTAMP */
+
+typedef uint64_t efsys_timestamp_t;
+
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ *(_usp) = rte_get_timer_cycles() * 1000000 / \
+ rte_get_timer_hz(); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = rte_zmalloc("sfc", (_size), 0); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (void)(_esip); \
+ (void)(_size); \
+ rte_free((_p)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef rte_spinlock_t efsys_lock_t;
+
+#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
+ rte_spinlock_init((_eslp))
+#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
+#define SFC_EFSYS_LOCK(_eslp) \
+ rte_spinlock_lock((_eslp))
+#define SFC_EFSYS_UNLOCK(_eslp) \
+ rte_spinlock_unlock((_eslp))
+#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
+ SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
+
+typedef int efsys_lock_state_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ SFC_EFSYS_LOCK(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
+ SFC_EFSYS_UNLOCK(_lockp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef uint64_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ *(_knp) += (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ *(_knp) -= (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ *(_knp) = (_val); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* ERR */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ do { \
+ (void)(_esip); \
+ RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
+ (_code), (_dword0), (_dword1)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#endif
+
+/* ASSERT */
+
+/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
+ * so we re-implement it here
+ */
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_ASSERT(_exp) \
+ do { \
+ if (unlikely(!(_exp))) \
+ rte_panic("line %d\tassert \"%s\" failed\n", \
+ __LINE__, (#_exp)); \
+ } while (0)
+#else
+#define EFSYS_ASSERT(_exp) (void)(_exp)
+#endif
+
+#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
+
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+
+/* ROTATE */
+
+#define EFSYS_HAS_ROTL_DWORD 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_COMMON_EFSYS_H */
diff --git a/drivers/net/sfc/rte_pmd_sfc_efx_version.map b/drivers/net/sfc/rte_pmd_sfc_efx_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/drivers/net/sfc/rte_pmd_sfc_efx_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
new file mode 100644
index 00000000..4e241b22
--- /dev/null
+++ b/drivers/net/sfc/sfc.c
@@ -0,0 +1,750 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* sysconf() */
+#include <unistd.h>
+
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+
+
+int
+sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp)
+{
+ const struct rte_memzone *mz;
+
+ sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
+ name, id, len, socket_id);
+
+ mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
+ sysconf(_SC_PAGESIZE), socket_id);
+ if (mz == NULL) {
+ sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
+ name, (unsigned int)id, (unsigned int)len, socket_id,
+ rte_strerror(rte_errno));
+ return ENOMEM;
+ }
+
+ esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
+ (void)rte_memzone_free(mz);
+ return EFAULT;
+ }
+
+ esmp->esm_mz = mz;
+ esmp->esm_base = mz->addr;
+
+ return 0;
+}
+
+void
+sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
+{
+ int rc;
+
+ sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
+
+ rc = rte_memzone_free(esmp->esm_mz);
+ if (rc != 0)
+ sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
+
+ memset(esmp, 0, sizeof(*esmp));
+}
+
+static uint32_t
+sfc_phy_cap_from_link_speeds(uint32_t speeds)
+{
+ uint32_t phy_caps = 0;
+
+ if (~speeds & ETH_LINK_SPEED_FIXED) {
+ phy_caps |= (1 << EFX_PHY_CAP_AN);
+ /*
+ * If no speeds are specified in the mask, any supported
+ * may be negotiated
+ */
+ if (speeds == ETH_LINK_SPEED_AUTONEG)
+ phy_caps |=
+ (1 << EFX_PHY_CAP_1000FDX) |
+ (1 << EFX_PHY_CAP_10000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX);
+ }
+ if (speeds & ETH_LINK_SPEED_1G)
+ phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
+ if (speeds & ETH_LINK_SPEED_10G)
+ phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_40G)
+ phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+
+ return phy_caps;
+}
+
+/*
+ * Check requested device level configuration.
+ * Receive and transmit configuration is checked in corresponding
+ * modules.
+ */
+static int
+sfc_check_conf(struct sfc_adapter *sa)
+{
+ const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
+ int rc = 0;
+
+ sa->port.phy_adv_cap =
+ sfc_phy_cap_from_link_speeds(conf->link_speeds) &
+ sa->port.phy_adv_cap_mask;
+ if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
+ sfc_err(sa, "No link speeds from mask %#x are supported",
+ conf->link_speeds);
+ rc = EINVAL;
+ }
+
+ if (conf->lpbk_mode != 0) {
+ sfc_err(sa, "Loopback not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->dcb_capability_en != 0) {
+ sfc_err(sa, "Priority-based flow control not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ sfc_err(sa, "Flow Director not supported");
+ rc = EINVAL;
+ }
+
+ if ((conf->intr_conf.lsc != 0) &&
+ (sa->intr.type != EFX_INTR_LINE) &&
+ (sa->intr.type != EFX_INTR_MESSAGE)) {
+ sfc_err(sa, "Link status change interrupt not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->intr_conf.rxq != 0) {
+ sfc_err(sa, "Receive queue interrupt not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Find out maximum number of receive and transmit queues which could be
+ * advertised.
+ *
+ * NIC is kept initialized on success to allow other modules acquire
+ * defaults and capabilities.
+ */
+static int
+sfc_estimate_resource_limits(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ efx_drv_limits_t limits;
+ int rc;
+ uint32_t evq_allocated;
+ uint32_t rxq_allocated;
+ uint32_t txq_allocated;
+
+ memset(&limits, 0, sizeof(limits));
+
+ /* Request at least one Rx and Tx queue */
+ limits.edl_min_rxq_count = 1;
+ limits.edl_min_txq_count = 1;
+ /* Management event queue plus event queue for each Tx and Rx queue */
+ limits.edl_min_evq_count =
+ 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
+
+ /* Divide by number of functions to guarantee that all functions
+ * will get promised resources
+ */
+ /* FIXME Divide by number of functions (not 2) below */
+ limits.edl_max_evq_count = encp->enc_evq_limit / 2;
+ SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
+
+ /* Split equally between receive and transmit */
+ limits.edl_max_rxq_count =
+ MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
+ SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
+
+ limits.edl_max_txq_count =
+ MIN(encp->enc_txq_limit,
+ limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
+
+ if (sa->tso)
+ limits.edl_max_txq_count =
+ MIN(limits.edl_max_txq_count,
+ encp->enc_fw_assisted_tso_v2_n_contexts /
+ encp->enc_hw_pf_count);
+
+ SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
+
+ /* Configure the minimum required resources needed for the
+ * driver to operate, and the maximum desired resources that the
+ * driver is capable of using.
+ */
+ efx_nic_set_drv_limits(sa->nic, &limits);
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ /* Find resource dimensions assigned by firmware to this function */
+ rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
+ &txq_allocated);
+ if (rc != 0)
+ goto fail_get_vi_pool;
+
+ /* It still may allocate more than maximum, ensure limit */
+ evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
+ rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
+ txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
+
+ /* Subtract management EVQ not used for traffic */
+ SFC_ASSERT(evq_allocated > 0);
+ evq_allocated--;
+
+ /* Right now we use separate EVQ for Rx and Tx */
+ sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
+ sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+
+ /* Keep NIC initialized */
+ return 0;
+
+fail_get_vi_pool:
+fail_nic_init:
+ efx_nic_fini(sa->nic);
+ return rc;
+}
+
+static int
+sfc_set_drv_limits(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *data = sa->eth_dev->data;
+ efx_drv_limits_t lim;
+
+ memset(&lim, 0, sizeof(lim));
+
+ /* Limits are strict since take into account initial estimation */
+ lim.edl_min_evq_count = lim.edl_max_evq_count =
+ 1 + data->nb_rx_queues + data->nb_tx_queues;
+ lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+ lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
+
+ return efx_nic_set_drv_limits(sa->nic, &lim);
+}
+
+int
+sfc_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ break;
+ case SFC_ADAPTER_STARTED:
+ sfc_info(sa, "already started");
+ return 0;
+ default:
+ rc = EINVAL;
+ goto fail_bad_state;
+ }
+
+ sa->state = SFC_ADAPTER_STARTING;
+
+ sfc_log_init(sa, "set resource limits");
+ rc = sfc_set_drv_limits(sa);
+ if (rc != 0)
+ goto fail_set_drv_limits;
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ rc = sfc_intr_start(sa);
+ if (rc != 0)
+ goto fail_intr_start;
+
+ rc = sfc_ev_start(sa);
+ if (rc != 0)
+ goto fail_ev_start;
+
+ rc = sfc_port_start(sa);
+ if (rc != 0)
+ goto fail_port_start;
+
+ rc = sfc_rx_start(sa);
+ if (rc != 0)
+ goto fail_rx_start;
+
+ rc = sfc_tx_start(sa);
+ if (rc != 0)
+ goto fail_tx_start;
+
+ rc = sfc_flow_start(sa);
+ if (rc != 0)
+ goto fail_flows_insert;
+
+ sa->state = SFC_ADAPTER_STARTED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_flows_insert:
+ sfc_tx_stop(sa);
+
+fail_tx_start:
+ sfc_rx_stop(sa);
+
+fail_rx_start:
+ sfc_port_stop(sa);
+
+fail_port_start:
+ sfc_ev_stop(sa);
+
+fail_ev_start:
+ sfc_intr_stop(sa);
+
+fail_intr_start:
+ efx_nic_fini(sa->nic);
+
+fail_nic_init:
+fail_set_drv_limits:
+ sa->state = SFC_ADAPTER_CONFIGURED;
+fail_bad_state:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ break;
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_info(sa, "already stopped");
+ return;
+ default:
+ sfc_err(sa, "stop in unexpected state %u", sa->state);
+ SFC_ASSERT(B_FALSE);
+ return;
+ }
+
+ sa->state = SFC_ADAPTER_STOPPING;
+
+ sfc_flow_stop(sa);
+ sfc_tx_stop(sa);
+ sfc_rx_stop(sa);
+ sfc_port_stop(sa);
+ sfc_ev_stop(sa);
+ sfc_intr_stop(sa);
+ efx_nic_fini(sa->nic);
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_configure(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
+ sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CONFIGURING;
+
+ rc = sfc_check_conf(sa);
+ if (rc != 0)
+ goto fail_check_conf;
+
+ rc = sfc_intr_configure(sa);
+ if (rc != 0)
+ goto fail_intr_configure;
+
+ rc = sfc_port_configure(sa);
+ if (rc != 0)
+ goto fail_port_configure;
+
+ rc = sfc_rx_configure(sa);
+ if (rc != 0)
+ goto fail_rx_configure;
+
+ rc = sfc_tx_configure(sa);
+ if (rc != 0)
+ goto fail_tx_configure;
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_tx_configure:
+ sfc_rx_close(sa);
+
+fail_rx_configure:
+ sfc_port_close(sa);
+
+fail_port_configure:
+ sfc_intr_close(sa);
+
+fail_intr_configure:
+fail_check_conf:
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CLOSING;
+
+ sfc_tx_close(sa);
+ sfc_rx_close(sa);
+ sfc_port_close(sa);
+ sfc_intr_close(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_mem_bar_init(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = sa->eth_dev;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
+ efsys_bar_t *ebp = &sa->mem_bar;
+ unsigned int i;
+ struct rte_mem_resource *res;
+
+ for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
+ res = &pci_dev->mem_resource[i];
+ if ((res->len != 0) && (res->phys_addr != 0)) {
+ /* Found first memory BAR */
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = i;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
+ }
+ }
+
+ return EFAULT;
+}
+
+static void
+sfc_mem_bar_fini(struct sfc_adapter *sa)
+{
+ efsys_bar_t *ebp = &sa->mem_bar;
+
+ SFC_BAR_LOCK_DESTROY(ebp);
+ memset(ebp, 0, sizeof(*ebp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+/*
+ * A fixed RSS key which has a property of being symmetric
+ * (symmetrical flows are distributed to the same CPU)
+ * and also known to give a uniform distribution
+ * (a good distribution of traffic between different CPUs)
+ */
+static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+};
+#endif
+
+static int
+sfc_set_rss_defaults(struct sfc_adapter *sa)
+{
+#if EFSYS_OPT_RX_SCALE
+ int rc;
+
+ rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = efx_rx_scale_support_get(sa->nic, &sa->rss_support);
+ if (rc != 0)
+ goto fail_scale_support_get;
+
+ rc = efx_rx_hash_support_get(sa->nic, &sa->hash_support);
+ if (rc != 0)
+ goto fail_hash_support_get;
+
+ efx_rx_fini(sa->nic);
+ efx_ev_fini(sa->nic);
+ efx_intr_fini(sa->nic);
+
+ sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
+
+ rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
+
+ return 0;
+
+fail_hash_support_get:
+fail_scale_support_get:
+fail_rx_init:
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+int
+sfc_attach(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp;
+ efx_nic_t *enp = sa->nic;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ efx_mcdi_new_epoch(enp);
+
+ sfc_log_init(sa, "reset nic");
+ rc = efx_nic_reset(enp);
+ if (rc != 0)
+ goto fail_nic_reset;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
+ if (!sa->tso)
+ sfc_warn(sa,
+ "TSO support isn't available on this adapter");
+ }
+
+ sfc_log_init(sa, "estimate resource limits");
+ rc = sfc_estimate_resource_limits(sa);
+ if (rc != 0)
+ goto fail_estimate_rsrc_limits;
+
+ sa->txq_max_entries = encp->enc_txq_max_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
+
+ rc = sfc_intr_attach(sa);
+ if (rc != 0)
+ goto fail_intr_attach;
+
+ rc = sfc_ev_attach(sa);
+ if (rc != 0)
+ goto fail_ev_attach;
+
+ rc = sfc_port_attach(sa);
+ if (rc != 0)
+ goto fail_port_attach;
+
+ rc = sfc_set_rss_defaults(sa);
+ if (rc != 0)
+ goto fail_set_rss_defaults;
+
+ rc = sfc_filter_attach(sa);
+ if (rc != 0)
+ goto fail_filter_attach;
+
+ sfc_log_init(sa, "fini nic");
+ efx_nic_fini(enp);
+
+ sfc_flow_init(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_filter_attach:
+fail_set_rss_defaults:
+ sfc_port_detach(sa);
+
+fail_port_attach:
+ sfc_ev_detach(sa);
+
+fail_ev_attach:
+ sfc_intr_detach(sa);
+
+fail_intr_attach:
+ efx_nic_fini(sa->nic);
+
+fail_estimate_rsrc_limits:
+fail_nic_reset:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_flow_fini(sa);
+
+ sfc_filter_detach(sa);
+ sfc_port_detach(sa);
+ sfc_ev_detach(sa);
+ sfc_intr_detach(sa);
+
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
+
+int
+sfc_probe(struct sfc_adapter *sa)
+{
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ efx_nic_t *enp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sa->socket_id = rte_socket_id();
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa);
+ if (rc != 0)
+ goto fail_mem_bar_init;
+
+ sfc_log_init(sa, "get family");
+ rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
+ &sa->family);
+ if (rc != 0)
+ goto fail_family;
+ sfc_log_init(sa, "family is %u", sa->family);
+
+ sfc_log_init(sa, "create nic");
+ rte_spinlock_init(&sa->nic_lock);
+ rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
+ &sa->mem_bar, &sa->nic_lock, &enp);
+ if (rc != 0)
+ goto fail_nic_create;
+ sa->nic = enp;
+
+ rc = sfc_mcdi_init(sa);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ sfc_log_init(sa, "probe nic");
+ rc = efx_nic_probe(enp);
+ if (rc != 0)
+ goto fail_nic_probe;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_nic_probe:
+ sfc_mcdi_fini(sa);
+
+fail_mcdi_init:
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+fail_nic_create:
+fail_family:
+ sfc_mem_bar_fini(sa);
+
+fail_mem_bar_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_unprobe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_log_init(sa, "unprobe nic");
+ efx_nic_unprobe(enp);
+
+ sfc_mcdi_fini(sa);
+
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+ sfc_mem_bar_fini(sa);
+
+ sfc_flow_fini(sa);
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
new file mode 100644
index 00000000..fad0ce04
--- /dev/null
+++ b/drivers/net/sfc/sfc.h
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_H
+#define _SFC_H
+
+#include <stdbool.h>
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#include "efx.h"
+
+#include "sfc_filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+#if EFSYS_OPT_RX_SCALE
+/** RSS key length (bytes) */
+#define SFC_RSS_KEY_SIZE 40
+/** RSS hash offloads mask */
+#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
+#endif
+
+/*
+ * +---------------+
+ * | UNINITIALIZED |<-----------+
+ * +---------------+ |
+ * |.eth_dev_init |.eth_dev_uninit
+ * V |
+ * +---------------+------------+
+ * | INITIALIZED |
+ * +---------------+<-----------<---------------+
+ * |.dev_configure | |
+ * V |failed |
+ * +---------------+------------+ |
+ * | CONFIGURING | |
+ * +---------------+----+ |
+ * |success | |
+ * | | +---------------+
+ * | | | CLOSING |
+ * | | +---------------+
+ * | | ^
+ * V |.dev_configure |
+ * +---------------+----+ |.dev_close
+ * | CONFIGURED |----------------------------+
+ * +---------------+<-----------+
+ * |.dev_start |
+ * V |
+ * +---------------+ |
+ * | STARTING |------------^
+ * +---------------+ failed |
+ * |success |
+ * | +---------------+
+ * | | STOPPING |
+ * | +---------------+
+ * | ^
+ * V |.dev_stop
+ * +---------------+------------+
+ * | STARTED |
+ * +---------------+
+ */
+enum sfc_adapter_state {
+ SFC_ADAPTER_UNINITIALIZED = 0,
+ SFC_ADAPTER_INITIALIZED,
+ SFC_ADAPTER_CONFIGURING,
+ SFC_ADAPTER_CONFIGURED,
+ SFC_ADAPTER_CLOSING,
+ SFC_ADAPTER_STARTING,
+ SFC_ADAPTER_STARTED,
+ SFC_ADAPTER_STOPPING,
+
+ SFC_ADAPTER_NSTATES
+};
+
+enum sfc_dev_filter_mode {
+ SFC_DEV_FILTER_MODE_PROMISC = 0,
+ SFC_DEV_FILTER_MODE_ALLMULTI,
+
+ SFC_DEV_FILTER_NMODES
+};
+
+enum sfc_mcdi_state {
+ SFC_MCDI_UNINITIALIZED = 0,
+ SFC_MCDI_INITIALIZED,
+ SFC_MCDI_BUSY,
+ SFC_MCDI_COMPLETED,
+
+ SFC_MCDI_NSTATES
+};
+
+struct sfc_mcdi {
+ rte_spinlock_t lock;
+ efsys_mem_t mem;
+ enum sfc_mcdi_state state;
+ efx_mcdi_transport_t transport;
+ bool logging;
+ uint32_t proxy_handle;
+ efx_rc_t proxy_result;
+};
+
+struct sfc_intr {
+ efx_intr_type_t type;
+ rte_intr_callback_fn handler;
+ boolean_t lsc_intr;
+};
+
+struct sfc_rxq_info;
+struct sfc_txq_info;
+struct sfc_dp_rx;
+
+struct sfc_port {
+ unsigned int lsc_seq;
+
+ uint32_t phy_adv_cap_mask;
+ uint32_t phy_adv_cap;
+
+ unsigned int flow_ctrl;
+ boolean_t flow_ctrl_autoneg;
+ size_t pdu;
+
+ boolean_t promisc;
+ boolean_t allmulti;
+
+ unsigned int max_mcast_addrs;
+ unsigned int nb_mcast_addrs;
+ uint8_t *mcast_addrs;
+
+ rte_spinlock_t mac_stats_lock;
+ uint64_t *mac_stats_buf;
+ efsys_mem_t mac_stats_dma_mem;
+ boolean_t mac_stats_reset_pending;
+ uint16_t mac_stats_update_period_ms;
+ uint32_t mac_stats_update_generation;
+ boolean_t mac_stats_periodic_dma_supported;
+ uint64_t mac_stats_last_request_timestamp;
+
+ uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
+};
+
+/* Adapter private data */
+struct sfc_adapter {
+ /*
+ * PMD setup and configuration is not thread safe. Since it is not
+ * performance sensitive, it is better to guarantee thread-safety
+ * and add device level lock. Adapter control operations which
+ * change its state should acquire the lock.
+ */
+ rte_spinlock_t lock;
+ enum sfc_adapter_state state;
+ struct rte_eth_dev *eth_dev;
+ struct rte_kvargs *kvargs;
+ bool debug_init;
+ int socket_id;
+ efsys_bar_t mem_bar;
+ efx_family_t family;
+ efx_nic_t *nic;
+ rte_spinlock_t nic_lock;
+
+ struct sfc_mcdi mcdi;
+ struct sfc_intr intr;
+ struct sfc_port port;
+ struct sfc_filter filter;
+
+ unsigned int rxq_max;
+ unsigned int txq_max;
+
+ unsigned int txq_max_entries;
+
+ uint32_t evq_flags;
+ unsigned int evq_count;
+
+ unsigned int mgmt_evq_index;
+ rte_spinlock_t mgmt_evq_lock;
+ struct sfc_evq *mgmt_evq;
+
+ unsigned int rxq_count;
+ struct sfc_rxq_info *rxq_info;
+
+ unsigned int txq_count;
+ struct sfc_txq_info *txq_info;
+
+ boolean_t tso;
+
+ unsigned int rss_channels;
+
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_scale_support_t rss_support;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_type_t rss_hash_types;
+ unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
+ uint8_t rss_key[SFC_RSS_KEY_SIZE];
+#endif
+
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+};
+
+/*
+ * Add wrapper functions to acquire/release lock to be able to remove or
+ * change the lock in one place.
+ */
+
+static inline void
+sfc_adapter_lock_init(struct sfc_adapter *sa)
+{
+ rte_spinlock_init(&sa->lock);
+}
+
+static inline int
+sfc_adapter_is_locked(struct sfc_adapter *sa)
+{
+ return rte_spinlock_is_locked(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock(struct sfc_adapter *sa)
+{
+ rte_spinlock_lock(&sa->lock);
+}
+
+static inline int
+sfc_adapter_trylock(struct sfc_adapter *sa)
+{
+ return rte_spinlock_trylock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_unlock(struct sfc_adapter *sa)
+{
+ rte_spinlock_unlock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
+{
+ /* Just for symmetry of the API */
+}
+
+/** Get the number of milliseconds since boot from the default timer */
+static inline uint64_t
+sfc_get_system_msecs(void)
+{
+ return rte_get_timer_cycles() * MS_PER_S / rte_get_timer_hz();
+}
+
+int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp);
+void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+
+int sfc_probe(struct sfc_adapter *sa);
+void sfc_unprobe(struct sfc_adapter *sa);
+int sfc_attach(struct sfc_adapter *sa);
+void sfc_detach(struct sfc_adapter *sa);
+int sfc_start(struct sfc_adapter *sa);
+void sfc_stop(struct sfc_adapter *sa);
+
+int sfc_mcdi_init(struct sfc_adapter *sa);
+void sfc_mcdi_fini(struct sfc_adapter *sa);
+
+int sfc_configure(struct sfc_adapter *sa);
+void sfc_close(struct sfc_adapter *sa);
+
+int sfc_intr_attach(struct sfc_adapter *sa);
+void sfc_intr_detach(struct sfc_adapter *sa);
+int sfc_intr_configure(struct sfc_adapter *sa);
+void sfc_intr_close(struct sfc_adapter *sa);
+int sfc_intr_start(struct sfc_adapter *sa);
+void sfc_intr_stop(struct sfc_adapter *sa);
+
+int sfc_port_attach(struct sfc_adapter *sa);
+void sfc_port_detach(struct sfc_adapter *sa);
+int sfc_port_configure(struct sfc_adapter *sa);
+void sfc_port_close(struct sfc_adapter *sa);
+int sfc_port_start(struct sfc_adapter *sa);
+void sfc_port_stop(struct sfc_adapter *sa);
+void sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info);
+int sfc_port_update_mac_stats(struct sfc_adapter *sa);
+int sfc_port_reset_mac_stats(struct sfc_adapter *sa);
+int sfc_set_rx_mode(struct sfc_adapter *sa);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_H */
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
new file mode 100644
index 00000000..c0b48677
--- /dev/null
+++ b/drivers/net/sfc/sfc_debug.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DEBUG_H_
+#define _SFC_DEBUG_H_
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+/* Avoid dependency from RTE_LOG_LEVEL to be able to enable debug check
+ * in the driver only.
+ */
+#define SFC_ASSERT(exp) RTE_VERIFY(exp)
+#else
+/* If the driver debug is not enabled, follow DPDK debug/non-debug */
+#define SFC_ASSERT(exp) RTE_ASSERT(exp)
+#endif
+
+/* Log PMD message, automatically add prefix and \n */
+#define sfc_panic(sa, fmt, args...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ _pci_dev->addr.domain, _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, _pci_dev->addr.function,\
+ _dev->data->port_id, ##args); \
+ } while (0)
+
+#endif /* _SFC_DEBUG_H_ */
diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c
new file mode 100644
index 00000000..860aa921
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp.c
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_log.h>
+
+#include "sfc_dp.h"
+
+void
+sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr)
+{
+ dpq->port_id = port_id;
+ dpq->queue_id = queue_id;
+ dpq->pci_addr = *pci_addr;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type,
+ const char *name)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ if (strcmp(entry->name, name) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type,
+ unsigned int avail_caps)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ /* Take the first matching */
+ if (sfc_dp_match_hw_fw_caps(entry, avail_caps))
+ return entry;
+ }
+
+ return NULL;
+}
+
+int
+sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+{
+ if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
+ "sfc %s dapapath '%s' already registered\n",
+ entry->type == SFC_DP_RX ? "Rx" :
+ entry->type == SFC_DP_TX ? "Tx" :
+ "unknown",
+ entry->name);
+ return EEXIST;
+ }
+
+ TAILQ_INSERT_TAIL(head, entry, links);
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
new file mode 100644
index 00000000..eff0aa87
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp.h
@@ -0,0 +1,125 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_H
+#define _SFC_DP_H
+
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DIV_ROUND_UP(a, b) \
+ __extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ \
+ (_a + (_b - 1)) / _b; \
+ })
+
+/**
+ * Datapath exception handler to be provided by the control path.
+ */
+typedef void (sfc_dp_exception_t)(void *ctrl);
+
+enum sfc_dp_type {
+ SFC_DP_RX = 0, /**< Receive datapath */
+ SFC_DP_TX, /**< Transmit datapath */
+};
+
+
+/** Datapath queue run-time information */
+struct sfc_dp_queue {
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_pci_addr pci_addr;
+};
+
+void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
+ uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr);
+
+/*
+ * Helper macro to define datapath logging macros and have uniform
+ * logging.
+ */
+#define SFC_DP_LOG(dp_name, level, dpq, ...) \
+ do { \
+ const struct sfc_dp_queue *_dpq = (dpq); \
+ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s " PCI_PRI_FMT \
+ " #%" PRIu16 ".%" PRIu16 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dp_name, \
+ _addr->domain, _addr->bus, \
+ _addr->devid, _addr->function, \
+ _dpq->port_id, _dpq->queue_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+
+/** Datapath definition */
+struct sfc_dp {
+ TAILQ_ENTRY(sfc_dp) links;
+ const char *name;
+ enum sfc_dp_type type;
+ /* Mask of required hardware/firmware capabilities */
+ unsigned int hw_fw_caps;
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+};
+
+/** List of datapath variants */
+TAILQ_HEAD(sfc_dp_list, sfc_dp);
+
+/* Check if available HW/FW capabilities are sufficient for the datapath */
+static inline bool
+sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)
+{
+ return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps;
+}
+
+struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head,
+ enum sfc_dp_type type, const char *name);
+struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head,
+ enum sfc_dp_type type,
+ unsigned int avail_caps);
+int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_H */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
new file mode 100644
index 00000000..9d05a4b3
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -0,0 +1,197 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_RX_H
+#define _SFC_DP_RX_H
+
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic receive queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_rxq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath receive queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_rx_qcreate_info {
+ /** Memory pool to allocate Rx buffer from */
+ struct rte_mempool *refill_mb_pool;
+ /** Minimum number of unused Rx descriptors to do refill */
+ unsigned int refill_threshold;
+ /**
+ * Usable mbuf data space in accordance with alignment and
+ * padding requirements imposed by HW.
+ */
+ unsigned int buf_size;
+
+ /**
+ * Maximum number of Rx descriptors completed in one Rx event.
+ * Just for sanity checks if datapath would like to do.
+ */
+ unsigned int batch_max;
+
+ /** Pseudo-header size */
+ unsigned int prefix_size;
+
+ /** Receive queue flags initializer */
+ unsigned int flags;
+#define SFC_RXQ_FLAG_RSS_HASH 0x1
+
+ /** Rx queue size */
+ unsigned int rxq_entries;
+ /** DMA-mapped Rx descriptors ring */
+ void *rxq_hw_ring;
+
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /**
+ * Virtual address of the memory-mapped BAR to push Rx refill
+ * doorbell
+ */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath receive queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Receive queue information
+ * @param dp_rxqp Location for generic datapath receive queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp);
+
+/**
+ * Free resources allocated for datapath recevie queue.
+ */
+typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int evq_read_ptr);
+
+/**
+ * Receive queue stop function called before flush.
+ */
+typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
+
+/**
+ * Receive queue purge function called after queue flush.
+ *
+ * Should be used to free unused recevie buffers.
+ */
+typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Get packet types recognized/classified */
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+
+/** Get number of pending Rx descriptors */
+typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Receive datapath definition */
+struct sfc_dp_rx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_RX_FEAT_SCATTER 0x1
+ sfc_dp_rx_qcreate_t *qcreate;
+ sfc_dp_rx_qdestroy_t *qdestroy;
+ sfc_dp_rx_qstart_t *qstart;
+ sfc_dp_rx_qstop_t *qstop;
+ sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qpurge_t *qpurge;
+ sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
+ sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ eth_rx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+extern struct sfc_dp_rx sfc_efx_rx;
+extern struct sfc_dp_rx sfc_ef10_rx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_RX_H */
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
new file mode 100644
index 00000000..2bb9a2e7
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -0,0 +1,170 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_TX_H
+#define _SFC_DP_TX_H
+
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic transmit queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_txq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath transmit queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_tx_qcreate_info {
+ /** Minimum number of unused Tx descriptors to do reap */
+ unsigned int free_thresh;
+ /** Transmit queue configuration flags */
+ unsigned int flags;
+ /** Tx queue size */
+ unsigned int txq_entries;
+ /** Maximum size of data in the DMA descriptor */
+ uint16_t dma_desc_size_max;
+ /** DMA-mapped Tx descriptors ring */
+ void *txq_hw_ring;
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /** Virtual address of the memory-mapped BAR to push Tx doorbell */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath transmit queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Tx queue details wrapped in structure
+ * @param dp_txqp Location for generic datapath transmit queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp);
+
+/**
+ * Free resources allocated for datapath transmit queue.
+ */
+typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int evq_read_ptr,
+ unsigned int txq_desc_index);
+
+/**
+ * Transmit queue stop function called before the queue flush.
+ *
+ * It returns EvQ to the control path.
+ */
+typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Transmit event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
+
+/**
+ * Transmit queue function called after the queue flush.
+ */
+typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
+
+/** Transmit datapath definition */
+struct sfc_dp_tx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
+#define SFC_DP_TX_FEAT_TSO 0x2
+#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+ sfc_dp_tx_qcreate_t *qcreate;
+ sfc_dp_tx_qdestroy_t *qdestroy;
+ sfc_dp_tx_qstart_t *qstart;
+ sfc_dp_tx_qstop_t *qstop;
+ sfc_dp_tx_qtx_ev_t *qtx_ev;
+ sfc_dp_tx_qreap_t *qreap;
+ eth_tx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+extern struct sfc_dp_tx sfc_efx_tx;
+extern struct sfc_dp_tx sfc_ef10_tx;
+extern struct sfc_dp_tx sfc_ef10_simple_tx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_TX_H */
diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h
new file mode 100644
index 00000000..060d8fef
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EF10_H
+#define _SFC_EF10_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of events in one cache line */
+#define SFC_EF10_EV_PER_CACHE_LINE \
+ (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+#define SFC_EF10_EV_QCLEAR_MASK (~(SFC_EF10_EV_PER_CACHE_LINE - 1))
+
+#if defined(SFC_EF10_EV_QCLEAR_USE_EFX)
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ efx_qword_t *entry = ptr;
+ unsigned int i;
+
+ for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i)
+ EFX_SET_QWORD(entry[i]);
+}
+#else
+/*
+ * It is possible to do it using AVX2 and AVX512F, but it shows less
+ * performance.
+ */
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ const __m128i val = _mm_set1_epi64x(UINT64_MAX);
+ __m128i *addr = ptr;
+ unsigned int i;
+
+ RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE);
+ RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0);
+
+ for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i)
+ _mm_store_si128(&addr[i], val);
+}
+#endif
+
+static inline void
+sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask,
+ unsigned int old_read_ptr, unsigned int read_ptr)
+{
+ const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+ unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+
+ while (old_clear_ptr != clear_ptr) {
+ sfc_ef10_ev_qclear_cache_line(
+ &hw_ring[old_clear_ptr & ptr_mask]);
+ old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE;
+ }
+
+ /*
+ * No barriers here.
+ * Functions which push doorbell should care about correct
+ * ordering: store instructions which fill in EvQ ring should be
+ * retired from CPU and DMA sync before doorbell which will allow
+ * to use these event entries.
+ */
+}
+
+static inline bool
+sfc_ef10_ev_present(const efx_qword_t ev)
+{
+ return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) |
+ ~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_H */
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
new file mode 100644
index 00000000..1484baba
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -0,0 +1,712 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* EF10 native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Rx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Rx queue entries (one event per Rx buffer in the worst case) plus
+ * Rx error and flush events.
+ */
+#define SFC_EF10_RXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_RXQ_STARTED 0x1
+#define SFC_EF10_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_RXQ_EXCEPTION 0x4
+#define SFC_EF10_RXQ_RSS_HASH 0x8
+ unsigned int ptr_mask;
+ unsigned int prepared;
+ unsigned int completed;
+ unsigned int evq_read_ptr;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_rx_sw_desc *sw_ring;
+ uint64_t rearm_data;
+ uint16_t prefix_size;
+
+ /* Used on refill */
+ uint16_t buf_size;
+ unsigned int added;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_rxq *
+sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
+}
+
+static void
+sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR,
+ rxq->added & rxq->ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], rxq->doorbell);
+}
+
+static void
+sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ const uint32_t buf_size = rxq->buf_size;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ unsigned int added = rxq->added;
+
+ free_space = SFC_EF10_RXQ_LIMIT(ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->dp.dpq.port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & ptr_mask;
+ i < RTE_DIM(objs);
+ ++i, ++id) {
+ struct rte_mbuf *m = objs[i];
+ struct sfc_ef10_rx_sw_desc *rxd;
+ phys_addr_t phys_addr;
+
+ SFC_ASSERT((id & ~ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->mbuf = m;
+
+ /*
+ * Avoid writing to mbuf. It is cheaper to do it
+ * when we receive packet and fill in nearby
+ * structure members.
+ */
+
+ phys_addr = rte_mbuf_data_dma_addr_default(m);
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT, buf_size,
+ ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
+ }
+
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq);
+}
+
+static void
+sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
+{
+ struct rte_mbuf *next_mbuf;
+
+ /* Prefetch next bunch of software descriptors */
+ if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
+ rte_prefetch0(&rxq->sw_ring[next_id]);
+
+ /*
+ * It looks strange to prefetch depending on previous prefetch
+ * data, but measurements show that it is really efficient and
+ * increases packet rate.
+ */
+ next_mbuf = rxq->sw_ring[next_id].mbuf;
+ if (likely(next_mbuf != NULL)) {
+ /* Prefetch the next mbuf structure */
+ rte_mbuf_prefetch_part1(next_mbuf);
+
+ /* Prefetch pseudo header of the next packet */
+ /* data_off is not filled in yet */
+ /* Yes, data could be not ready yet, but we hope */
+ rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+static uint16_t
+sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
+ unsigned int completed = rxq->completed;
+ unsigned int i;
+
+ rxq->prepared -= n_rx_pkts;
+ rxq->completed = completed + n_rx_pkts;
+
+ for (i = 0; i < n_rx_pkts; ++i, ++completed)
+ rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
+
+ return n_rx_pkts;
+}
+
+static void
+sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
+ struct rte_mbuf *m)
+{
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
+ goto done;
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = RTE_PTYPE_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = RTE_PTYPE_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = RTE_PTYPE_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype |= RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
+ case ESE_DZ_L4_CLASS_TCP:
+ l4_ptype = RTE_PTYPE_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UDP:
+ l4_ptype = RTE_PTYPE_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UNKNOWN:
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+ /* Remove RSS hash offload flag if RSS is not enabled */
+ if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH)
+ ol_flags &= ~PKT_RX_RSS_HASH;
+
+done:
+ m->ol_flags = ol_flags;
+ m->packet_type = l2_ptype | l3_ptype | l4_ptype;
+}
+
+static uint16_t
+sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
+}
+
+static uint32_t
+sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
+}
+
+static uint16_t
+sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ unsigned int completed = rxq->completed;
+ unsigned int ready;
+ struct sfc_ef10_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ struct rte_mbuf *m0;
+ uint16_t n_rx_pkts;
+ const uint8_t *pseudo_hdr;
+ uint16_t pkt_len;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ SFC_ASSERT(ready > 0);
+
+ if (rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
+ SFC_ASSERT(rxq->prepared == 0);
+ rxq->completed += ready;
+ while (ready-- > 0) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ }
+ return 0;
+ }
+
+ n_rx_pkts = RTE_MIN(ready, nb_pkts);
+ rxq->prepared = ready - n_rx_pkts;
+ rxq->completed += n_rx_pkts;
+
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Classify packet based on Rx event */
+ sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m);
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ if (ready == 1)
+ pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
+ rxq->prefix_size;
+ else
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+
+ /* Remember mbuf to copy offload flags and packet type from */
+ m0 = m;
+ for (--ready; ready > 0; --ready) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ if (ready > rxq->prepared)
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
+ sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Event-dependent information is the same */
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+ }
+
+ return n_rx_pkts;
+}
+
+static bool
+sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
+ sfc_ef10_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static uint16_t
+sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
+ unsigned int evq_old_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags &
+ (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+
+ evq_old_read_ptr = rxq->evq_read_ptr;
+ while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
+ rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static const uint32_t *
+sfc_ef10_supported_ptypes_get(void)
+{
+ static const uint32_t ef10_native_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ef10_native_ptypes;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+ /*
+ * Correct implementation requires EvQ polling and events
+ * processing (keeping all ready mbufs in prepared).
+ */
+ return -ENOTSUP;
+}
+
+
+static uint64_t
+sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
+{
+ struct rte_mbuf m;
+
+ memset(&m, 0, sizeof(m));
+
+ rte_mbuf_refcnt_set(&m, 1);
+ m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
+ m.nb_segs = 1;
+ m.port = port_id;
+
+ /* rearm_data covers structure members filled in above */
+ rte_compiler_barrier();
+ RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
+ return m.rearm_data[0];
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
+static int
+sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_ef10_rxq *rxq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->rxq_entries != info->evq_entries)
+ goto fail_rxq_args;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->rearm_data =
+ sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
+ rxq->prefix_size = info->prefix_size;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_rxq_args:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
+static void
+sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
+static int
+sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->prepared = 0;
+ rxq->completed = rxq->added = 0;
+
+ sfc_ef10_rx_qrefill(rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ rxq->flags |= SFC_EF10_RXQ_STARTED;
+ rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
+static void
+sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
+static bool
+sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
+static void
+sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_ef10_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ }
+
+ rxq->flags &= ~SFC_EF10_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_rx_qcreate,
+ .qdestroy = sfc_ef10_rx_qdestroy,
+ .qstart = sfc_ef10_rx_qstart,
+ .qstop = sfc_ef10_rx_qstop,
+ .qrx_ev = sfc_ef10_rx_qrx_ev,
+ .qpurge = sfc_ef10_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .pkt_burst = sfc_ef10_recv_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
new file mode 100644
index 00000000..bac9baa9
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -0,0 +1,560 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_dp_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_tx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/** Maximum length of the DMA descriptor data */
+#define SFC_EF10_TX_DMA_DESC_LEN_MAX \
+ ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
+
+/**
+ * Maximum number of descriptors/buffers in the Tx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Tx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Tx queue entries (one event per Tx buffer in the worst case) plus
+ * Tx error and flush events.
+ */
+#define SFC_EF10_TXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_txq {
+ unsigned int flags;
+#define SFC_EF10_TXQ_STARTED 0x1
+#define SFC_EF10_TXQ_NOT_RUNNING 0x2
+#define SFC_EF10_TXQ_EXCEPTION 0x4
+
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int completed;
+ unsigned int free_thresh;
+ unsigned int evq_read_ptr;
+ struct sfc_ef10_tx_sw_desc *sw_ring;
+ efx_qword_t *txq_hw_ring;
+ volatile void *doorbell;
+ efx_qword_t *evq_hw_ring;
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_ef10_txq *
+sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_ef10_txq, dp);
+}
+
+static bool
+sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
+{
+ volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
+
+ /*
+ * Exception flag is set when reap is done.
+ * It is never done twice per packet burst get and absence of
+ * the flag is checked on burst get entry.
+ */
+ SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
+
+ *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*tx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_TX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ txq->flags |= SFC_EF10_TXQ_EXCEPTION;
+ sfc_ef10_tx_err(&txq->dp.dpq,
+ "TxQ exception at EvQ read ptr %#x",
+ txq->evq_read_ptr);
+ return false;
+ }
+
+ txq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+ const unsigned int curr_done = pending - 1;
+ unsigned int anew_done = curr_done;
+ efx_qword_t tx_ev;
+
+ while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ /* Update the latest done descriptor */
+ anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+ }
+ pending += (anew_done - curr_done) & ptr_mask;
+
+ if (pending != completed) {
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ } while (++completed != pending);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+static void
+sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+ efx_qword_t *edp)
+{
+ EFX_POPULATE_QWORD_4(*edp,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, !eop,
+ ESF_DZ_TX_KER_BYTE_CNT, size,
+ ESF_DZ_TX_KER_BUF_ADDR, addr);
+}
+
+static inline void
+sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
+ unsigned int pushed)
+{
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ /*
+ * This improves performance by pushing a TX descriptor at the same
+ * time as the doorbell. The descriptor must be added to the TXQ,
+ * so that can be used if the hardware decides not to use the pushed
+ * descriptor.
+ */
+ desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_io_wmb() which guarantees that the STORE operations
+ * (i.e. Tx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_io_wmb();
+
+ *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
+}
+
+static unsigned int
+sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
+{
+ unsigned int extra_descs_per_seg;
+ unsigned int extra_descs_per_pkt;
+
+ /*
+ * VLAN offload is not supported yet, so no extra descriptors
+ * are required for VLAN option descriptor.
+ */
+
+/** Maximum length of the mbuf segment data */
+#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
+
+ /*
+ * Each segment is already counted once below. So, calculate
+ * how many extra DMA descriptors may be required per segment in
+ * the worst case because of maximum DMA descriptor length limit.
+ * If maximum segment length is less or equal to maximum DMA
+ * descriptor length, no extra DMA descriptors are required.
+ */
+ extra_descs_per_seg =
+ (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+/** Maximum length of the packet */
+#define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
+
+ /*
+ * One more limitation on maximum number of extra DMA descriptors
+ * comes from slicing entire packet because of DMA descriptor length
+ * limit taking into account that there is at least one segment
+ * which is already counted below (so division of the maximum
+ * packet length minus one with round down).
+ * TSO is not supported yet, so packet length is limited by
+ * maximum PDU size.
+ */
+ extra_descs_per_pkt =
+ (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
+ SFC_MBUF_PKT_LEN_MAX) - 1) /
+ SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+ return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
+ extra_descs_per_pkt);
+}
+
+static uint16_t
+sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < txq->free_thresh);
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
+ pktp != pktp_end;
+ ++pktp) {
+ struct rte_mbuf *m_seg = *pktp;
+ unsigned int pkt_start = added;
+ uint32_t pkt_len;
+
+ if (likely(pktp + 1 != pktp_end))
+ rte_mbuf_prefetch_part1(pktp[1]);
+
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
+ if (reap_done)
+ break;
+
+ /* Push already prepared descriptors before polling */
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ reap_done = true;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
+ break;
+ }
+
+ pkt_len = m_seg->pkt_len;
+ do {
+ phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+
+ SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ pkt_len -= seg_len;
+
+ sfc_ef10_tx_qdesc_dma_create(seg_addr,
+ seg_len, (pkt_len == 0),
+ &txq->txq_hw_ring[added & ptr_mask]);
+ ++added;
+
+ } while ((m_seg = m_seg->next) != 0);
+
+ dma_desc_space -= (added - pkt_start);
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+static uint16_t
+sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
+ for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
+ struct rte_mbuf *pkt = *pktp;
+ unsigned int id = added & ptr_mask;
+
+ SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ rte_pktmbuf_data_len(pkt),
+ true, &txq->txq_hw_ring[id]);
+
+ txq->sw_ring[id].mbuf = pkt;
+
+ ++added;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+
+static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
+static int
+sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_ef10_txq *txq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->txq_entries != info->evq_entries)
+ goto fail_bad_args;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->txq_hw_ring = info->txq_hw_ring;
+ txq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_TX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+ txq->evq_hw_ring = info->evq_hw_ring;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_sw_ring_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+fail_bad_args:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
+static void
+sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
+static int
+sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->evq_read_ptr = evq_read_ptr;
+ txq->added = txq->completed = txq_desc_index;
+
+ txq->flags |= SFC_EF10_TXQ_STARTED;
+ txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
+static void
+sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
+
+ *evq_read_ptr = txq->evq_read_ptr;
+}
+
+static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
+static bool
+sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Tx event since we reap all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
+static void
+sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ for (txds = 0; txds <= txq->ptr_mask; ++txds) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EF10_TXQ_STARTED;
+}
+
+struct sfc_dp_tx sfc_ef10_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_xmit_pkts,
+};
+
+struct sfc_dp_tx sfc_ef10_simple_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
+ .type = SFC_DP_TX,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_simple_xmit_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
new file mode 100644
index 00000000..4c9335f3
--- /dev/null
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -0,0 +1,1642 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_flow.h"
+#include "sfc_dp.h"
+#include "sfc_dp_rx.h"
+
+static struct sfc_dp_list sfc_dp_head =
+ TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static int
+sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_nic_fw_info_t enfi;
+ int ret;
+ int rc;
+
+ /*
+ * Return value of the callback is likely supposed to be
+ * equal to or greater than 0, nevertheless, if an error
+ * occurs, it will be desirable to pass it to the caller
+ */
+ if ((fw_version == NULL) || (fw_size == 0))
+ return -EINVAL;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return -rc;
+
+ ret = snprintf(fw_version, fw_size,
+ "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
+ enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
+ enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
+ if (ret < 0)
+ return ret;
+
+ if (enfi.enfi_dpcpu_fw_ids_valid) {
+ size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
+ int ret_extra;
+
+ ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
+ fw_size - dpcpu_fw_ids_offset,
+ " rx%" PRIx16 " tx%" PRIx16,
+ enfi.enfi_rx_dpcpu_fw_id,
+ enfi.enfi_tx_dpcpu_fw_id);
+ if (ret_extra < 0)
+ return ret_extra;
+
+ ret += ret_extra;
+ }
+
+ if (fw_size < (size_t)(++ret))
+ return ret;
+ else
+ return 0;
+}
+
+static void
+sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ sfc_log_init(sa, "entry");
+
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+
+ dev_info->max_rx_queues = sa->rxq_max;
+ dev_info->max_tx_queues = sa->txq_max;
+
+ /* By default packets are dropped if no descriptors are available */
+ dev_info->default_rxconf.rx_drop_en = 1;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
+ !encp->enc_hw_tx_insert_vlan_enabled)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ else
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ dev_info->reta_size = EFX_RSS_TBL_SIZE;
+ dev_info->hash_key_size = SFC_RSS_KEY_SIZE;
+ dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
+ }
+#endif
+
+ if (sa->tso)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
+ dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ /* The RXQ hardware requires that the descriptor count is a power
+ * of 2, but rx_desc_lim cannot properly describe that constraint.
+ */
+ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+
+ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
+ dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ /*
+ * The TXQ hardware requires that the descriptor count is a power
+ * of 2, but tx_desc_lim cannot properly describe that constraint
+ */
+ dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+}
+
+static const uint32_t *
+sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ return sa->dp_rx->supported_ptypes_get();
+}
+
+static int
+sfc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct sfc_adapter *sa = dev_data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
+ dev_data->nb_rx_queues, dev_data->nb_tx_queues);
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ rc = sfc_configure(sa);
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u to configure",
+ sa->state);
+ rc = EINVAL;
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_start(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct rte_eth_link old_link;
+ struct rte_eth_link current_link;
+
+ sfc_log_init(sa, "entry");
+
+retry:
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else if (wait_to_complete) {
+ efx_link_mode_t link_mode;
+
+ if (efx_port_poll(sa->nic, &link_mode) != 0)
+ link_mode = EFX_LINK_UNKNOWN;
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else {
+ sfc_ev_mgmt_qpoll(sa);
+ *(int64_t *)&current_link =
+ rte_atomic64_read((rte_atomic64_t *)dev_link);
+ }
+
+ if (old_link.link_status != current_link.link_status)
+ sfc_info(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
+
+ return old_link.link_status == current_link.link_status ? 0 : -1;
+}
+
+static void
+sfc_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static void
+sfc_dev_close(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ sfc_stop(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_close(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u on close", sa->state);
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static void
+sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
+ boolean_t enabled)
+{
+ struct sfc_port *port;
+ boolean_t *toggle;
+ struct sfc_adapter *sa = dev->data->dev_private;
+ boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
+ const char *desc = (allmulti) ? "all-multi" : "promiscuous";
+
+ sfc_adapter_lock(sa);
+
+ port = &sa->port;
+ toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
+
+ if (*toggle != enabled) {
+ *toggle = enabled;
+
+ if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
+ *toggle = !(enabled);
+ sfc_warn(sa, "Failed to %s %s mode",
+ ((enabled) ? "enable" : "disable"), desc);
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+}
+
+static void
+sfc_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+}
+
+static void
+sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+}
+
+static void
+sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+}
+
+static int
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
+ rx_queue_id, nb_rx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc != 0)
+ goto fail_rx_qinit;
+
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_release(void *queue)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq;
+ struct sfc_adapter *sa;
+ unsigned int sw_index;
+
+ if (dp_rxq == NULL)
+ return;
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ sa = rxq->evq->sa;
+ sfc_adapter_lock(sa);
+
+ sw_index = sfc_rxq_sw_index(rxq);
+
+ sfc_log_init(sa, "RxQ=%u", sw_index);
+
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
+
+ sfc_rx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
+ tx_queue_id, nb_tx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ if (rc != 0)
+ goto fail_tx_qinit;
+
+ dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_tx_queue_release(void *queue)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq;
+ unsigned int sw_index;
+ struct sfc_adapter *sa;
+
+ if (dp_txq == NULL)
+ return;
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ sw_index = sfc_txq_sw_index(txq);
+
+ SFC_ASSERT(txq->evq != NULL);
+ sa = txq->evq->sa;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
+ sfc_tx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ if (sfc_port_update_mac_stats(sa) != 0)
+ goto unlock;
+
+ mac_stats = port->mac_stats_buf;
+
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
+ stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
+ stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+ } else {
+ stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+ /*
+ * Take into account stats which are whenever supported
+ * on EF10. If some stat is not supported by current
+ * firmware variant or HW revision, it is guaranteed
+ * to be zero in mac_stats.
+ */
+ stats->imissed =
+ mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
+ mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_TRUNC_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
+ mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
+ mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
+ stats->ierrors =
+ mac_stats[EFX_MAC_RX_FCS_ERRORS] +
+ mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
+ mac_stats[EFX_MAC_RX_JABBER_PKTS];
+ /* no oerrors counters supported on EF10 */
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+}
+
+static void
+sfc_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ /*
+ * The operation cannot be done if port is not started; it
+ * will be scheduled to be done during the next port start
+ */
+ port->mac_stats_reset_pending = B_TRUE;
+ return;
+ }
+
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+}
+
+static int
+sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ int rc;
+ unsigned int i;
+ int nstats = 0;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ nstats = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats != NULL && nstats < (int)xstats_count) {
+ xstats[nstats].id = nstats;
+ xstats[nstats].value = mac_stats[i];
+ }
+ nstats++;
+ }
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return nstats;
+}
+
+static int
+sfc_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int i;
+ unsigned int nstats = 0;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats_names != NULL && nstats < xstats_count)
+ strncpy(xstats_names[nstats].name,
+ efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ nstats++;
+ }
+ }
+
+ return nstats;
+}
+
+static int
+sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int wanted_fc, link_fc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED)
+ efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
+ else
+ link_fc = sa->port.flow_ctrl;
+
+ switch (link_fc) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case EFX_FCNTL_RESPOND:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case EFX_FCNTL_GENERATE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ default:
+ sfc_err(sa, "%s: unexpected flow control value %#x",
+ __func__, link_fc);
+ }
+
+ fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int fcntl;
+ int rc;
+
+ if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
+ fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
+ fc_conf->mac_ctrl_frame_fwd != 0) {
+ sfc_err(sa, "unsupported flow control settings specified");
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ fcntl = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ fcntl = EFX_FCNTL_RESPOND;
+ break;
+ case RTE_FC_TX_PAUSE:
+ fcntl = EFX_FCNTL_GENERATE;
+ break;
+ case RTE_FC_FULL:
+ fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+ }
+
+ port->flow_ctrl = fcntl;
+ port->flow_ctrl_autoneg = fc_conf->autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_mac_fcntl_set:
+ sfc_adapter_unlock(sa);
+fail_inval:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ size_t pdu = EFX_MAC_PDU(mtu);
+ size_t old_pdu;
+ int rc;
+
+ sfc_log_init(sa, "mtu=%u", mtu);
+
+ rc = EINVAL;
+ if (pdu < EFX_MAC_PDU_MIN) {
+ sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MIN);
+ goto fail_inval;
+ }
+ if (pdu > EFX_MAC_PDU_MAX) {
+ sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MAX);
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (pdu != sa->port.pdu) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ sfc_stop(sa);
+
+ old_pdu = sa->port.pdu;
+ sa->port.pdu = pdu;
+ rc = sfc_start(sa);
+ if (rc != 0)
+ goto fail_start;
+ } else {
+ sa->port.pdu = pdu;
+ }
+ }
+
+ /*
+ * The driver does not use it, but other PMDs update jumbo_frame
+ * flag and max_rx_pkt_len when MTU is set.
+ */
+ dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_start:
+ sa->port.pdu = old_pdu;
+ if (sfc_start(sa) != 0)
+ sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
+ "PDU max size - port is stopped",
+ (unsigned int)pdu, (unsigned int)old_pdu);
+ sfc_adapter_unlock(sa);
+
+fail_inval:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+static void
+sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_info(sa, "the port is not started");
+ sfc_info(sa, "the new MAC address will be set on port start");
+
+ goto unlock;
+ }
+
+ if (encp->enc_allow_set_mac_with_installed_filters) {
+ rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
+ if (rc != 0) {
+ sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
+ goto unlock;
+ }
+
+ /*
+ * Changing the MAC address by means of MCDI request
+ * has no effect on received traffic, therefore
+ * we also need to update unicast filters
+ */
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ } else {
+ sfc_warn(sa, "cannot set MAC address with filters installed");
+ sfc_warn(sa, "adapter will be restarted to pick the new MAC");
+ sfc_warn(sa, "(some traffic may be dropped)");
+
+ /*
+ * Since setting MAC address with filters installed is not
+ * allowed on the adapter, one needs to simply restart adapter
+ * so that the new MAC address will be taken from an outer
+ * storage and set flawlessly by means of sfc_start() call
+ */
+ sfc_stop(sa);
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
+ }
+
+unlock:
+ sfc_adapter_unlock(sa);
+}
+
+
+static int
+sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint8_t *mc_addrs = port->mcast_addrs;
+ int rc;
+ unsigned int i;
+
+ if (mc_addrs == NULL)
+ return -ENOBUFS;
+
+ if (nb_mc_addr > port->max_mcast_addrs) {
+ sfc_err(sa, "too many multicast addresses: %u > %u",
+ nb_mc_addr, port->max_mcast_addrs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_mc_addr; ++i) {
+ (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ mc_addrs += EFX_MAC_ADDR_LEN;
+ }
+
+ port->nb_mcast_addrs = nb_mc_addr;
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return 0;
+
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
+
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(rx_queue_id < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[rx_queue_id];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
+
+ qinfo->mp = rxq->refill_mb_pool;
+ qinfo->conf.rx_free_thresh = rxq->refill_threshold;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
+ qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
+ qinfo->nb_desc = rxq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_txq_info *txq_info;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(tx_queue_id < sa->txq_count);
+
+ txq_info = &sa->txq_info[tx_queue_id];
+ SFC_ASSERT(txq_info->txq != NULL);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->conf.txq_flags = txq_info->txq->flags;
+ qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
+ qinfo->conf.tx_deferred_start = txq_info->deferred_start;
+ qinfo->nb_desc = txq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static uint32_t
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ return sfc_rx_qdesc_npending(sa, rx_queue_id);
+}
+
+static int
+sfc_rx_descriptor_done(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+
+ return sfc_rx_qdesc_done(dp_rxq, offset);
+}
+
+static int
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_rx_qstart(sa, rx_queue_id);
+ if (rc != 0)
+ goto fail_rx_qstart;
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qstart:
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+ sfc_rx_qstop(sa, rx_queue_id);
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_tx_qstart(sa, tx_queue_id);
+ if (rc != 0)
+ goto fail_tx_qstart;
+
+ sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qstart:
+
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ sfc_tx_qstop(sa, tx_queue_id);
+
+ sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+}
+
+#if EFSYS_OPT_RX_SCALE
+static int
+sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ /*
+ * Mapping of hash configuration between RTE and EFX is not one-to-one,
+ * hence, conversion is done here to derive a correct set of ETH_RSS
+ * flags which corresponds to the active EFX configuration stored
+ * locally in 'sfc_adapter' and kept up-to-date
+ */
+ rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
+ rss_conf->rss_key_len = SFC_RSS_KEY_SIZE;
+ if (rss_conf->rss_key != NULL)
+ rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE);
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int efx_hash_types;
+ int rc = 0;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_key != NULL) &&
+ (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(sa->rss_key));
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return -EINVAL;
+ }
+
+ sfc_adapter_lock(sa);
+
+ efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
+
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ efx_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ if (rss_conf->rss_key != NULL) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+ }
+
+ rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
+ }
+
+ sa->rss_hash_types = efx_hash_types;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_scale_key_set:
+ if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE) != 0)
+ sfc_err(sa, "failed to restore RSS mode");
+
+fail_scale_mode_set:
+ sfc_adapter_unlock(sa);
+ return -rc;
+}
+
+static int
+sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int entry;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ if (reta_size != EFX_RSS_TBL_SIZE)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp = entry / RTE_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+
+ if ((reta_conf[grp].mask >> grp_idx) & 1)
+ reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int *rss_tbl_new;
+ uint16_t entry;
+ int rc;
+
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if (reta_size != EFX_RSS_TBL_SIZE) {
+ sfc_err(sa, "RETA size is wrong (should be %u)",
+ EFX_RSS_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
+ if (rss_tbl_new == NULL)
+ return -ENOMEM;
+
+ sfc_adapter_lock(sa);
+
+ rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ struct rte_eth_rss_reta_entry64 *grp;
+
+ grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+
+ if (grp->mask & (1ull << grp_idx)) {
+ if (grp->reta[grp_idx] >= sa->rss_channels) {
+ rc = EINVAL;
+ goto bad_reta_entry;
+ }
+ rss_tbl_new[entry] = grp->reta[grp_idx];
+ }
+ }
+
+ rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE);
+ if (rc == 0)
+ rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+
+bad_reta_entry:
+ sfc_adapter_unlock(sa);
+
+ rte_free(rss_tbl_new);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+#endif
+
+static int
+sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc = ENOTSUP;
+
+ sfc_log_init(sa, "entry");
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ sfc_err(sa, "Global filters configuration not supported");
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ sfc_err(sa, "MACVLAN filters not supported");
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ sfc_err(sa, "EtherType filters not supported");
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ sfc_err(sa, "Flexible filters not supported");
+ break;
+ case RTE_ETH_FILTER_SYN:
+ sfc_err(sa, "SYN filters not supported");
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ sfc_err(sa, "NTUPLE filters not supported");
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ sfc_err(sa, "Tunnel filters not supported");
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ sfc_err(sa, "Flow Director filters not supported");
+ break;
+ case RTE_ETH_FILTER_HASH:
+ sfc_err(sa, "Hash filters not supported");
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rc = EINVAL;
+ } else {
+ *(const void **)arg = &sfc_flow_ops;
+ rc = 0;
+ }
+ break;
+ default:
+ sfc_err(sa, "Unknown filter type %u", filter_type);
+ break;
+ }
+
+ sfc_log_init(sa, "exit: %d", -rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_ops = {
+ .dev_configure = sfc_dev_configure,
+ .dev_start = sfc_dev_start,
+ .dev_stop = sfc_dev_stop,
+ .dev_set_link_up = sfc_dev_set_link_up,
+ .dev_set_link_down = sfc_dev_set_link_down,
+ .dev_close = sfc_dev_close,
+ .promiscuous_enable = sfc_dev_promisc_enable,
+ .promiscuous_disable = sfc_dev_promisc_disable,
+ .allmulticast_enable = sfc_dev_allmulti_enable,
+ .allmulticast_disable = sfc_dev_allmulti_disable,
+ .link_update = sfc_dev_link_update,
+ .stats_get = sfc_stats_get,
+ .stats_reset = sfc_stats_reset,
+ .xstats_get = sfc_xstats_get,
+ .xstats_reset = sfc_stats_reset,
+ .xstats_get_names = sfc_xstats_get_names,
+ .dev_infos_get = sfc_dev_infos_get,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .mtu_set = sfc_dev_set_mtu,
+ .rx_queue_start = sfc_rx_queue_start,
+ .rx_queue_stop = sfc_rx_queue_stop,
+ .tx_queue_start = sfc_tx_queue_start,
+ .tx_queue_stop = sfc_tx_queue_stop,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
+ .rx_queue_count = sfc_rx_queue_count,
+ .rx_descriptor_done = sfc_rx_descriptor_done,
+ .tx_queue_setup = sfc_tx_queue_setup,
+ .tx_queue_release = sfc_tx_queue_release,
+ .flow_ctrl_get = sfc_flow_ctrl_get,
+ .flow_ctrl_set = sfc_flow_ctrl_set,
+ .mac_addr_set = sfc_mac_addr_set,
+#if EFSYS_OPT_RX_SCALE
+ .reta_update = sfc_dev_rss_reta_update,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_update = sfc_dev_rss_hash_update,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+#endif
+ .filter_ctrl = sfc_dev_filter_ctrl,
+ .set_mc_addr_list = sfc_set_mc_addr_list,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+ .fw_version_get = sfc_fw_version_get,
+};
+
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ const char *tx_name = NULL;
+ int rc;
+
+ switch (sa->family) {
+ case EFX_FAMILY_HUNTINGTON:
+ case EFX_FAMILY_MEDFORD:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ break;
+ default:
+ break;
+ }
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx;
+ }
+ } else {
+ sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+
+ dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx;
+ }
+ } else {
+ sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
+
+ dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
+
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct ether_addr *from;
+
+ sfc_register_dp();
+
+ /* Required for logging */
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
+ sfc_kvarg_bool_handler, &sa->debug_init);
+ if (rc != 0)
+ goto fail_kvarg_debug_init;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;
+
+ sfc_log_init(sa, "attaching");
+ rc = sfc_attach(sa);
+ if (rc != 0)
+ goto fail_attach;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ /*
+ * The arguments are really reverse order in comparison to
+ * Linux kernel. Copy from NIC config to Ethernet device data.
+ */
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &dev->data->mac_addrs[0]);
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_attach:
+fail_set_ops:
+ sfc_unprobe(sa);
+
+fail_probe:
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+fail_mac_addrs:
+fail_kvarg_debug_init:
+ sfc_kvargs_cleanup(sa);
+
+fail_kvargs_parse:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ sfc_kvargs_cleanup(sa);
+
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+
+ sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+ return 0;
+}
+
+static const struct rte_pci_id pci_id_sfc_efx_map[] = {
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { .vendor_id = 0 /* sentinel */ }
+};
+
+static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct sfc_adapter), sfc_eth_dev_init);
+}
+
+static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
+}
+
+static struct rte_pci_driver sfc_efx_pmd = {
+ .id_table = pci_id_sfc_efx_map,
+ .drv_flags =
+ RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_NEED_MAPPING,
+ .probe = sfc_eth_dev_pci_probe,
+ .remove = sfc_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
+ SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
+ SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
+ SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
+ SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
new file mode 100644
index 00000000..160d39f9
--- /dev/null
+++ b/drivers/net/sfc/sfc_ev.c
@@ -0,0 +1,921 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+
+
+/* Initial delay when waiting for event queue init complete event */
+#define SFC_EVQ_INIT_BACKOFF_START_US (1)
+/* Maximum delay between event queue polling attempts */
+#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
+/* Event queue init approx timeout */
+#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
+
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
+
+static const char *
+sfc_evq_type2str(enum sfc_evq_type type)
+{
+ switch (type) {
+ case SFC_EVQ_TYPE_MGMT:
+ return "mgmt-evq";
+ case SFC_EVQ_TYPE_RX:
+ return "rx-evq";
+ case SFC_EVQ_TYPE_TX:
+ return "tx-evq";
+ default:
+ SFC_ASSERT(B_FALSE);
+ return NULL;
+ }
+}
+
+static boolean_t
+sfc_ev_initialized(void *arg)
+{
+ struct sfc_evq *evq = arg;
+
+ /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
+ evq->init_state == SFC_EVQ_STARTED);
+
+ evq->init_state = SFC_EVQ_STARTED;
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
+ evq->evq_index, label, id, size, flags);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_efx_rxq *rxq;
+ unsigned int stop;
+ unsigned int pending_id;
+ unsigned int delta;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ if (unlikely(evq->exception))
+ goto done;
+
+ rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
+
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->evq == evq);
+ SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
+
+ stop = (id + 1) & rxq->ptr_mask;
+ pending_id = rxq->pending & rxq->ptr_mask;
+ delta = (stop >= pending_id) ? (stop - pending_id) :
+ (rxq->ptr_mask + 1 - pending_id + stop);
+
+ if (delta == 0) {
+ /*
+ * Rx event with no new descriptors done and zero length
+ * is used to abort scattered packet when there is no room
+ * for the tail.
+ */
+ if (unlikely(size != 0)) {
+ evq->exception = B_TRUE;
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u invalid RX abort "
+ "(id=%#x size=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, size, flags);
+ goto done;
+ }
+
+ /* Add discard flag to the first fragment */
+ rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
+ /* Remove continue flag from the last fragment */
+ rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
+ } else if (unlikely(delta > rxq->batch_max)) {
+ evq->exception = B_TRUE;
+
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u completion out of order "
+ "(id=%#x delta=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, delta, flags);
+
+ goto done;
+ }
+
+ for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
+ rxd = &rxq->sw_desc[i];
+
+ rxd->flags = flags;
+
+ SFC_ASSERT(size < (1 << 16));
+ rxd->size = (uint16_t)size;
+ }
+
+ rxq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t size, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
+ return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+}
+
+static boolean_t
+sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
+ evq->evq_index, label, id);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_efx_txq *txq;
+ unsigned int stop;
+ unsigned int delta;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq->evq == evq);
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
+ goto done;
+
+ stop = (id + 1) & txq->ptr_mask;
+ id = txq->pending & txq->ptr_mask;
+
+ delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
+
+ txq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
+ return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+}
+
+static boolean_t
+sfc_ev_exception(void *arg, __rte_unused uint32_t code,
+ __rte_unused uint32_t data)
+{
+ struct sfc_evq *evq = arg;
+
+ if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
+ return B_FALSE;
+
+ evq->exception = B_TRUE;
+ sfc_warn(evq->sa,
+ "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
+ " needs recovery",
+ (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
+ (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
+ (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
+ (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
+ (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
+ (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
+ "UNKNOWN",
+ code, data, evq->evq_index);
+
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_done(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_failed(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
+ evq->evq_index, txq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_txq *txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->hw_index == txq_hw_index);
+ SFC_ASSERT(txq->evq == evq);
+ sfc_tx_qflush_done(txq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_software(void *arg, uint16_t magic)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
+ evq->evq_index, magic);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_sram(void *arg, uint32_t code)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
+ evq->evq_index, code);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_wake_up(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_timer(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected link change event",
+ evq->evq_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_adapter *sa = evq->sa;
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ struct rte_eth_link new_link;
+ uint64_t new_link_u64;
+ uint64_t old_link_u64;
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+
+ sfc_port_link_mode_to_info(link_mode, &new_link);
+
+ new_link_u64 = *(uint64_t *)&new_link;
+ do {
+ old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
+ if (old_link_u64 == new_link_u64)
+ break;
+
+ if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ old_link_u64, new_link_u64)) {
+ evq->sa->port.lsc_seq++;
+ break;
+ }
+ } while (B_TRUE);
+
+ return B_FALSE;
+}
+
+static const efx_ev_callbacks_t sfc_ev_callbacks = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_efx_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_dp_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_dp_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+
+void
+sfc_ev_qpoll(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
+ evq->init_state == SFC_EVQ_STARTING);
+
+ /* Synchronize the DMA memory for reading not required */
+
+ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
+
+ if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
+ struct sfc_adapter *sa = evq->sa;
+ int rc;
+
+ if (evq->dp_rxq != NULL) {
+ unsigned int rxq_sw_index;
+
+ rxq_sw_index = evq->dp_rxq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart RxQ %u because of exception on its EvQ %u",
+ rxq_sw_index, evq->evq_index);
+
+ sfc_rx_qstop(sa, rxq_sw_index);
+ rc = sfc_rx_qstart(sa, rxq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart RxQ %u",
+ rxq_sw_index);
+ }
+
+ if (evq->dp_txq != NULL) {
+ unsigned int txq_sw_index;
+
+ txq_sw_index = evq->dp_txq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart TxQ %u because of exception on its EvQ %u",
+ txq_sw_index, evq->evq_index);
+
+ sfc_tx_qstop(sa, txq_sw_index);
+ rc = sfc_tx_qstart(sa, txq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart TxQ %u",
+ txq_sw_index);
+ }
+
+ if (evq->exception)
+ sfc_panic(sa, "unrecoverable exception on EvQ %u",
+ evq->evq_index);
+
+ sfc_adapter_unlock(sa);
+ }
+
+ /* Poll-mode driver does not re-prime the event queue for interrupts */
+}
+
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ struct sfc_evq *mgmt_evq = sa->mgmt_evq;
+
+ if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+ sfc_ev_qpoll(mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
+int
+sfc_ev_qprime(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
+ return efx_ev_qprime(evq->common, evq->read_ptr);
+}
+
+/* Event queue HW index allocation scheme is described in sfc_ev.h. */
+int
+sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
+{
+ struct sfc_adapter *sa = evq->sa;
+ efsys_mem_t *esmp;
+ uint32_t evq_flags = sa->evq_flags;
+ unsigned int total_delay_us;
+ unsigned int delay_us;
+ int rc;
+
+ sfc_log_init(sa, "hw_index=%u", hw_index);
+
+ esmp = &evq->mem;
+
+ evq->evq_index = hw_index;
+
+ /* Clear all events */
+ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
+
+ if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ else
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
+
+ /* Create the common code event queue */
+ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
+ 0 /* unused on EF10 */, 0, evq_flags,
+ &evq->common);
+ if (rc != 0)
+ goto fail_ev_qcreate;
+
+ SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
+ if (evq->dp_rxq != 0) {
+ if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_rx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_rx;
+ } else if (evq->dp_txq != 0) {
+ if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_tx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_tx;
+ } else {
+ evq->callbacks = &sfc_ev_callbacks;
+ }
+
+ evq->init_state = SFC_EVQ_STARTING;
+
+ /* Wait for the initialization event */
+ total_delay_us = 0;
+ delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
+ do {
+ (void)sfc_ev_qpoll(evq);
+
+ /* Check to see if the initialization complete indication
+ * posted by the hardware.
+ */
+ if (evq->init_state == SFC_EVQ_STARTED)
+ goto done;
+
+ /* Give event queue some time to init */
+ rte_delay_us(delay_us);
+
+ total_delay_us += delay_us;
+
+ /* Exponential backoff */
+ delay_us *= 2;
+ if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
+ delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
+
+ } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
+
+ rc = ETIMEDOUT;
+ goto fail_timedout;
+
+done:
+ return 0;
+
+fail_timedout:
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ efx_ev_qdestroy(evq->common);
+
+fail_ev_qcreate:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qstop(struct sfc_evq *evq)
+{
+ if (evq == NULL)
+ return;
+
+ sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
+
+ if (evq->init_state != SFC_EVQ_STARTED)
+ return;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ evq->callbacks = NULL;
+ evq->read_ptr = 0;
+ evq->exception = B_FALSE;
+
+ efx_ev_qdestroy(evq->common);
+
+ evq->evq_index = 0;
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+ int rc;
+
+ sfc_ev_mgmt_qpoll(sa);
+
+ rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+ sfc_ev_mgmt_periodic_qpoll, sa);
+ if (rc == -ENOTSUP) {
+ sfc_warn(sa, "alarms are not supported");
+ sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
+ } else if (rc != 0) {
+ sfc_err(sa,
+ "cannot rearm management EVQ polling alarm (rc=%d)",
+ rc);
+ }
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+ sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+ rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
+int
+sfc_ev_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ /* Start management EVQ used for global events */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ if (sa->intr.lsc_intr) {
+ rc = sfc_ev_qprime(sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_evq0_prime;
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ /*
+ * Start management EVQ polling. If interrupts are disabled
+ * (not used), it is required to process link status change
+ * and other device level events to avoid unrecoverable
+ * error because the event queue overflow.
+ */
+ sfc_ev_mgmt_periodic_qpoll_start(sa);
+
+ /*
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
+ */
+
+ return 0;
+
+fail_evq0_prime:
+ sfc_ev_qstop(sa->mgmt_evq);
+
+fail_mgmt_evq_start:
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa->mgmt_evq);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ efx_ev_fini(sa->nic);
+}
+
+int
+sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp)
+{
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "type=%s type_index=%u",
+ sfc_evq_type2str(type), type_index);
+
+ SFC_ASSERT(rte_is_power_of_2(entries));
+
+ rc = ENOMEM;
+ evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (evq == NULL)
+ goto fail_evq_alloc;
+
+ evq->sa = sa;
+ evq->type = type;
+ evq->entries = entries;
+
+ /* Allocate DMA space */
+ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
+ EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+
+ sa->evq_count++;
+
+ *evqp = evq;
+
+ return 0;
+
+fail_dma_alloc:
+ rte_free(evq);
+
+fail_evq_alloc:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qfini(struct sfc_evq *evq)
+{
+ struct sfc_adapter *sa = evq->sa;
+
+ SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
+
+ sfc_dma_free(sa, &evq->mem);
+
+ rte_free(evq);
+
+ SFC_ASSERT(sa->evq_count > 0);
+ sa->evq_count--;
+}
+
+static int
+sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint64_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_AUTO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_ev_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
+ sfc_kvarg_perf_profile_handler,
+ &sa->evq_flags);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value",
+ SFC_KVARG_PERF_PROFILE);
+ goto fail_kvarg_perf_profile;
+ }
+
+ sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id, &sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
+ /*
+ * Rx/Tx event queues are created/destroyed when corresponding
+ * Rx/Tx queue is created/destroyed.
+ */
+
+ return 0;
+
+fail_mgmt_evq_init:
+
+fail_kvarg_perf_profile:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_qfini(sa->mgmt_evq);
+
+ if (sa->evq_count != 0)
+ sfc_err(sa, "%u EvQs are not destroyed before detach",
+ sa->evq_count);
+}
diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h
new file mode 100644
index 00000000..065defe0
--- /dev/null
+++ b/drivers/net/sfc/sfc_ev.h
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EV_H_
+#define _SFC_EV_H_
+
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of entries in the management event queue */
+#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
+
+struct sfc_adapter;
+struct sfc_dp_rxq;
+struct sfc_dp_txq;
+
+enum sfc_evq_state {
+ SFC_EVQ_UNINITIALIZED = 0,
+ SFC_EVQ_INITIALIZED,
+ SFC_EVQ_STARTING,
+ SFC_EVQ_STARTED,
+
+ SFC_EVQ_NSTATES
+};
+
+enum sfc_evq_type {
+ SFC_EVQ_TYPE_MGMT = 0,
+ SFC_EVQ_TYPE_RX,
+ SFC_EVQ_TYPE_TX,
+
+ SFC_EVQ_NTYPES
+};
+
+struct sfc_evq {
+ /* Used on datapath */
+ efx_evq_t *common;
+ const efx_ev_callbacks_t *callbacks;
+ unsigned int read_ptr;
+ boolean_t exception;
+ efsys_mem_t mem;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_dp_txq *dp_txq;
+
+ /* Not used on datapath */
+ struct sfc_adapter *sa;
+ unsigned int evq_index;
+ enum sfc_evq_state init_state;
+ enum sfc_evq_type type;
+ unsigned int entries;
+};
+
+/*
+ * Functions below define event queue to transmit/receive queue and vice
+ * versa mapping.
+ * Own event queue is allocated for management, each Rx and each Tx queue.
+ * Zero event queue is used for management events.
+ * Rx event queues from 1 to RxQ number follow management event queue.
+ * Tx event queues follow Rx event queues.
+ */
+
+static inline unsigned int
+sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
+ unsigned int rxq_sw_index)
+{
+ return 1 + rxq_sw_index;
+}
+
+static inline unsigned int
+sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index)
+{
+ return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+}
+
+int sfc_ev_attach(struct sfc_adapter *sa);
+void sfc_ev_detach(struct sfc_adapter *sa);
+int sfc_ev_start(struct sfc_adapter *sa);
+void sfc_ev_stop(struct sfc_adapter *sa);
+
+int sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp);
+void sfc_ev_qfini(struct sfc_evq *evq);
+int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
+void sfc_ev_qstop(struct sfc_evq *evq);
+
+int sfc_ev_qprime(struct sfc_evq *evq);
+void sfc_ev_qpoll(struct sfc_evq *evq);
+
+void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EV_H_ */
diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c
new file mode 100644
index 00000000..58b74de7
--- /dev/null
+++ b/drivers/net/sfc/sfc_filter.c
@@ -0,0 +1,137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+
+boolean_t
+sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t i;
+
+ for (i = 0; i < filter->supported_match_num; ++i) {
+ if (match == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_filter_cache_match_supported(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t num = filter->supported_match_num;
+ uint32_t *buf = filter->supported_match;
+ unsigned int retry;
+ int rc;
+
+ /* Just a guess of possibly sufficient entries */
+ if (num == 0)
+ num = 16;
+
+ for (retry = 0; retry < 2; ++retry) {
+ if (num != filter->supported_match_num) {
+ rc = ENOMEM;
+ buf = rte_realloc(buf, num * sizeof(*buf), 0);
+ if (buf == NULL)
+ goto fail_realloc;
+ }
+
+ rc = efx_filter_supported_filters(sa->nic, buf, num, &num);
+ if (rc == 0) {
+ filter->supported_match_num = num;
+ filter->supported_match = buf;
+
+ return 0;
+ } else if (rc != ENOSPC) {
+ goto fail_efx_filter_supported_filters;
+ }
+ }
+
+ SFC_ASSERT(rc == ENOSPC);
+
+fail_efx_filter_supported_filters:
+fail_realloc:
+ /* Original pointer is not freed by rte_realloc() on failure */
+ rte_free(buf);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+ return rc;
+}
+
+int
+sfc_filter_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ rc = sfc_filter_cache_match_supported(sa);
+ if (rc != 0)
+ goto fail_cache_match_supported;
+
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_cache_match_supported:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_filter_detach(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+
+ sfc_log_init(sa, "entry");
+
+ rte_free(filter->supported_match);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h
new file mode 100644
index 00000000..d884f37d
--- /dev/null
+++ b/drivers/net/sfc/sfc_filter.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FILTER_H
+#define _SFC_FILTER_H
+
+#include "efx.h"
+
+#include "sfc_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_filter {
+ /** Number of elements in match_supported array */
+ size_t supported_match_num;
+ /** Driver cache of supported filter match masks */
+ uint32_t *supported_match;
+ /** List of flow rules */
+ struct sfc_flow_list flow_list;
+};
+
+struct sfc_adapter;
+
+int sfc_filter_attach(struct sfc_adapter *sa);
+void sfc_filter_detach(struct sfc_adapter *sa);
+
+boolean_t sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FILTER_H */
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
new file mode 100644
index 00000000..c3ea43a6
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow.c
@@ -0,0 +1,1175 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_tailq.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_filter.h"
+#include "sfc_flow.h"
+#include "sfc_log.h"
+
+/*
+ * At now flow API is implemented in such a manner that each
+ * flow rule is converted to a hardware filter.
+ * All elements of flow rule (attributes, pattern items, actions)
+ * correspond to one or more fields in the efx_filter_spec_s structure
+ * that is responsible for the hardware filter.
+ */
+
+enum sfc_flow_item_layers {
+ SFC_FLOW_ITEM_ANY_LAYER,
+ SFC_FLOW_ITEM_START_LAYER,
+ SFC_FLOW_ITEM_L2,
+ SFC_FLOW_ITEM_L3,
+ SFC_FLOW_ITEM_L4,
+};
+
+typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
+ efx_filter_spec_t *spec,
+ struct rte_flow_error *error);
+
+struct sfc_flow_item {
+ enum rte_flow_item_type type; /* Type of item */
+ enum sfc_flow_item_layers layer; /* Layer of item */
+ enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
+ sfc_flow_item_parse *parse; /* Parsing function */
+};
+
+static sfc_flow_item_parse sfc_flow_parse_void;
+static sfc_flow_item_parse sfc_flow_parse_eth;
+static sfc_flow_item_parse sfc_flow_parse_vlan;
+static sfc_flow_item_parse sfc_flow_parse_ipv4;
+static sfc_flow_item_parse sfc_flow_parse_ipv6;
+static sfc_flow_item_parse sfc_flow_parse_tcp;
+static sfc_flow_item_parse sfc_flow_parse_udp;
+
+static boolean_t
+sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
+{
+ uint8_t sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ sum |= buf[i];
+
+ return (sum == 0) ? B_TRUE : B_FALSE;
+}
+
+/*
+ * Validate item and prepare structures spec and mask for parsing
+ */
+static int
+sfc_flow_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *supp_mask,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t match;
+ uint8_t supp;
+ unsigned int i;
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL)
+ goto exit;
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored
+ */
+ if (last != NULL &&
+ !sfc_flow_is_zero(last, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ranging is not supported");
+ return -rte_errno;
+ }
+
+ if (supp_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Supported mask for item should be specified");
+ return -rte_errno;
+ }
+
+ /* Check that mask and spec not asks for more match than supp_mask */
+ for (i = 0; i < size; i++) {
+ match = spec[i] | mask[i];
+ supp = ((const uint8_t *)supp_mask)[i];
+
+ if ((match | supp) != supp) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Item's field is not supported");
+ return -rte_errno;
+ }
+ }
+
+exit:
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+ return 0;
+}
+
+/*
+ * Protocol parsers.
+ * Masking is not supported, so masks in items should be either
+ * full or empty (zeroed) and set only for supported fields which
+ * are specified in the supp_mask.
+ */
+
+static int
+sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
+ __rte_unused efx_filter_spec_t *efx_spec,
+ __rte_unused struct rte_flow_error *error)
+{
+ return 0;
+}
+
+/**
+ * Convert Ethernet item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * Ethernet type fields are supported. In addition to full and
+ * empty masks of destination address, individual/group mask is
+ * also supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_eth(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_eth *spec = NULL;
+ const struct rte_flow_item_eth *mask = NULL;
+ const struct rte_flow_item_eth supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .type = 0xffff,
+ };
+ const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /* If "spec" is not set, could be any Ethernet */
+ if (spec == NULL)
+ return 0;
+
+ if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (memcmp(mask->dst.addr_bytes, ig_mask,
+ EFX_MAC_ADDR_LEN) == 0) {
+ if (is_unicast_ether_addr(&spec->dst))
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ else
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ } else if (!is_zero_ether_addr(&mask->dst)) {
+ goto fail_bad_mask;
+ }
+
+ if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (!is_zero_ether_addr(&mask->src)) {
+ goto fail_bad_mask;
+ }
+
+ /*
+ * Ether type is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->type == supp_mask.type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->type);
+ } else if (mask->type != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the ETH pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert VLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VID field is supported.
+ * The mask can not be NULL. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ uint16_t vid;
+ const struct rte_flow_item_vlan *spec = NULL;
+ const struct rte_flow_item_vlan *mask = NULL;
+ const struct rte_flow_item_vlan supp_mask = {
+ .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ NULL,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * VID is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used.
+ * If two VLAN items are included, the first matches
+ * the outer tag and the next matches the inner tag.
+ */
+ if (mask->tci == supp_mask.tci) {
+ vid = rte_bswap16(spec->tci);
+
+ if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_OUTER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ efx_spec->efs_outer_vid = vid;
+ } else if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_INNER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
+ efx_spec->efs_inner_vid = vid;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "More than two VLAN items");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN ID in TCI match is required");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * protocol fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv4(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv4 *spec = NULL;
+ const struct rte_flow_item_ipv4 *mask = NULL;
+ const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
+ const struct rte_flow_item_ipv4 supp_mask = {
+ .hdr = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ .next_proto_id = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv4 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv4;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV4 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv4 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
+ } else if (mask->hdr.src_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
+ } else if (mask->hdr.dst_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
+ } else if (mask->hdr.next_proto_id != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV4 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert IPv6 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * next header fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv6(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv6 *spec = NULL;
+ const struct rte_flow_item_ipv6 *mask = NULL;
+ const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
+ const struct rte_flow_item_ipv6 supp_mask = {
+ .hdr = {
+ .src_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .dst_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .proto = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv6 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv6;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV6 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv6 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+ sizeof(mask->hdr.src_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
+ sizeof(spec->hdr.src_addr));
+ rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+ sizeof(efx_spec->efs_rem_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+ sizeof(mask->hdr.src_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
+ sizeof(spec->hdr.dst_addr));
+ rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+ sizeof(efx_spec->efs_loc_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.proto == supp_mask.hdr.proto) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.proto;
+ } else if (mask->hdr.proto != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV6 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert TCP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_tcp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_tcp *spec = NULL;
+ const struct rte_flow_item_tcp *mask = NULL;
+ const struct rte_flow_item_tcp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by TCP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with TCP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the TCP pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert UDP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_udp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_udp *spec = NULL;
+ const struct rte_flow_item_udp *mask = NULL;
+ const struct rte_flow_item_udp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by UDP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with UDP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the UDP pattern item");
+ return -rte_errno;
+}
+
+static const struct sfc_flow_item sfc_flow_items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .parse = sfc_flow_parse_void,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .prev_layer = SFC_FLOW_ITEM_START_LAYER,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_eth,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_vlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv4,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv6,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_tcp,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_udp,
+ },
+};
+
+/*
+ * Protocol-independent flow API support
+ */
+static int
+sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+
+ return 0;
+}
+
+/* Get item from array sfc_flow_items */
+static const struct sfc_flow_item *
+sfc_flow_get_item(enum rte_flow_item_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
+ if (sfc_flow_items[i].type == type)
+ return &sfc_flow_items[i];
+
+ return NULL;
+}
+
+static int
+sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ const struct sfc_flow_item *item;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ item = sfc_flow_get_item(pattern->type);
+ if (item == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unsupported pattern item");
+ return -rte_errno;
+ }
+
+ /*
+ * Omitting one or several protocol layers at the beginning
+ * of pattern is supported
+ */
+ if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ item->prev_layer != prev_layer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unexpected sequence of pattern items");
+ return -rte_errno;
+ }
+
+ rc = item->parse(pattern, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+
+ if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
+ prev_layer = item->layer;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_queue(struct sfc_adapter *sa,
+ const struct rte_flow_action_queue *queue,
+ struct rte_flow *flow)
+{
+ struct sfc_rxq *rxq;
+
+ if (queue->index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[queue->index].rxq;
+ flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_actions(struct sfc_adapter *sa,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ boolean_t is_specified = B_FALSE;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ rc = sfc_flow_parse_queue(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ is_specified = B_TRUE;
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action is not supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!is_specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
+ "Action is unspecified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ memset(&flow->spec, 0, sizeof(flow->spec));
+
+ rc = sfc_flow_parse_attr(attr, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_pattern(pattern, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_actions(sa, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Flow rule pattern is not supported");
+ return -rte_errno;
+ }
+
+fail_bad_value:
+ return rc;
+}
+
+static int
+sfc_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow flow;
+
+ return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ goto fail_no_mem;
+ }
+
+ rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to insert filter");
+ goto fail_filter_insert;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return flow;
+
+fail_filter_insert:
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+
+fail_bad_value:
+ rte_free(flow);
+ sfc_adapter_unlock(sa);
+
+fail_no_mem:
+ return NULL;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_remove(sa->nic, &flow->spec);
+ if (rc != 0)
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to destroy flow rule");
+ }
+
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+
+ return rc;
+}
+
+static int
+sfc_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow_ptr;
+ int rc = EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+ if (flow_ptr == flow)
+ rc = 0;
+ }
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to find flow rule to destroy");
+ goto fail_bad_value;
+ }
+
+ rc = sfc_flow_remove(sa, flow, error);
+
+fail_bad_value:
+ sfc_adapter_unlock(sa);
+
+ return -rc;
+}
+
+static int
+sfc_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow;
+ int rc = 0;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ rc = sfc_flow_remove(sa, flow, error);
+ if (rc != 0)
+ ret = rc;
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return -ret;
+}
+
+const struct rte_flow_ops sfc_flow_ops = {
+ .validate = sfc_flow_validate,
+ .create = sfc_flow_create,
+ .destroy = sfc_flow_destroy,
+ .flush = sfc_flow_flush,
+ .query = NULL,
+};
+
+void
+sfc_flow_init(struct sfc_adapter *sa)
+{
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_INIT(&sa->filter.flow_list);
+}
+
+void
+sfc_flow_fini(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+ }
+}
+
+void
+sfc_flow_stop(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
+ efx_filter_remove(sa->nic, &flow->spec);
+}
+
+int
+sfc_flow_start(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+ int rc = 0;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0)
+ goto fail_bad_flow;
+ }
+
+ sfc_log_init(sa, "done");
+
+fail_bad_flow:
+ return rc;
+}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
new file mode 100644
index 00000000..bfc34364
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow.h
@@ -0,0 +1,64 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FLOW_H
+#define _SFC_FLOW_H
+
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* PMD-specific definition of the opaque type from rte_flow.h */
+struct rte_flow {
+ efx_filter_spec_t spec; /* filter specification */
+ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
+};
+
+TAILQ_HEAD(sfc_flow_list, rte_flow);
+
+extern const struct rte_flow_ops sfc_flow_ops;
+
+struct sfc_adapter;
+
+void sfc_flow_init(struct sfc_adapter *sa);
+void sfc_flow_fini(struct sfc_adapter *sa);
+int sfc_flow_start(struct sfc_adapter *sa);
+void sfc_flow_stop(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_H */
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
new file mode 100644
index 00000000..7eb4b86c
--- /dev/null
+++ b/drivers/net/sfc/sfc_intr.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * At the momemt of writing DPDK v16.07 has notion of two types of
+ * interrupts: LSC (link status change) and RXQ (receive indication).
+ * It allows to register interrupt callback for entire device which is
+ * not intended to be used for receive indication (i.e. link status
+ * change indication only). The handler has no information which HW
+ * interrupt has triggered it, so we don't know which event queue should
+ * be polled/reprimed (except qmask in the case of legacy line interrupt).
+ */
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+
+static void
+sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa)
+{
+ struct sfc_evq *evq;
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ evq = sa->mgmt_evq;
+
+ if (evq->init_state != SFC_EVQ_STARTED) {
+ sfc_log_init(sa, "interrupt on stopped EVQ %u", evq->evq_index);
+ } else {
+ sfc_ev_qpoll(evq);
+
+ if (sfc_ev_qprime(evq) != 0)
+ sfc_err(sa, "cannot prime EVQ %u", evq->evq_index);
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+}
+
+static void
+sfc_intr_line_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ uint32_t qmask;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa,
+ "interrupt on stopped adapter, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_line(enp, &fatal, &qmask);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ if (qmask & (1 << sa->mgmt_evq_index))
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event: link %s",
+ sa->eth_dev->data->dev_link.link_status ?
+ "UP" : "DOWN");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+static void
+sfc_intr_message_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa, "adapter not-started, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_message(enp, sa->mgmt_evq_index, &fatal);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+int
+sfc_intr_start(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_intr_handle *intr_handle;
+ struct rte_pci_device *pci_dev;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ /*
+ * The EFX common code event queue module depends on the interrupt
+ * module. Ensure that the interrupt module is always initialized
+ * (even if interrupts are not used). Status memory is required
+ * for Siena only and may be NULL for EF10.
+ */
+ sfc_log_init(sa, "efx_intr_init");
+ rc = efx_intr_init(sa->nic, intr->type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ if (intr->handler != NULL) {
+ sfc_log_init(sa, "rte_intr_callback_register");
+ rc = rte_intr_callback_register(intr_handle, intr->handler,
+ (void *)sa);
+ if (rc != 0) {
+ sfc_err(sa,
+ "cannot register interrupt handler (rc=%d)",
+ rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_cb_reg;
+ }
+
+ sfc_log_init(sa, "rte_intr_enable");
+ rc = rte_intr_enable(intr_handle);
+ if (rc != 0) {
+ sfc_err(sa, "cannot enable interrupts (rc=%d)", rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_enable;
+ }
+
+ sfc_log_init(sa, "efx_intr_enable");
+ efx_intr_enable(sa->nic);
+ }
+
+ sfc_log_init(sa, "done type=%u max_intr=%d nb_efd=%u vec=%p",
+ intr_handle->type, intr_handle->max_intr,
+ intr_handle->nb_efd, intr_handle->intr_vec);
+ return 0;
+
+fail_rte_intr_enable:
+ rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa);
+
+fail_rte_intr_cb_reg:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_intr_stop(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (intr->handler != NULL) {
+ struct rte_intr_handle *intr_handle;
+ int rc;
+
+ efx_intr_disable(sa->nic);
+
+ intr_handle = &pci_dev->intr_handle;
+ if (rte_intr_disable(intr_handle) != 0)
+ sfc_err(sa, "cannot disable interrupts");
+
+ while ((rc = rte_intr_callback_unregister(intr_handle,
+ intr->handler, (void *)sa)) == -EAGAIN)
+ ;
+ if (rc != 1)
+ sfc_err(sa,
+ "cannot unregister interrupt handler %d",
+ rc);
+ }
+
+ efx_intr_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_configure(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+
+ sfc_log_init(sa, "entry");
+
+ intr->handler = NULL;
+ intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
+ if (!intr->lsc_intr) {
+ sfc_info(sa, "LSC tracking using interrupts is disabled");
+ goto done;
+ }
+
+ switch (intr->type) {
+ case EFX_INTR_MESSAGE:
+ intr->handler = sfc_intr_message_handler;
+ break;
+ case EFX_INTR_LINE:
+ intr->handler = sfc_intr_line_handler;
+ break;
+ case EFX_INTR_INVALID:
+ sfc_warn(sa, "interrupts are not supported");
+ break;
+ default:
+ sfc_panic(sa, "unexpected EFX interrupt type %u\n", intr->type);
+ break;
+ }
+
+done:
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_attach(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ switch (pci_dev->intr_handle.type) {
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ intr->type = EFX_INTR_LINE;
+ break;
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ intr->type = EFX_INTR_MESSAGE;
+ break;
+#endif
+ default:
+ intr->type = EFX_INTR_INVALID;
+ break;
+ }
+
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sa->intr.type = EFX_INTR_INVALID;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c
new file mode 100644
index 00000000..7bcd5951
--- /dev/null
+++ b/drivers/net/sfc/sfc_kvargs.c
@@ -0,0 +1,145 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+#include <strings.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include "sfc.h"
+#include "sfc_kvargs.h"
+
+int
+sfc_kvargs_parse(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = (sa)->eth_dev;
+ struct rte_devargs *devargs = eth_dev->device->devargs;
+ const char **params = (const char *[]){
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ SFC_KVARG_DEBUG_INIT,
+ SFC_KVARG_MCDI_LOGGING,
+ SFC_KVARG_PERF_PROFILE,
+ SFC_KVARG_RX_DATAPATH,
+ SFC_KVARG_TX_DATAPATH,
+ NULL,
+ };
+
+ if (devargs == NULL)
+ return 0;
+
+ sa->kvargs = rte_kvargs_parse(devargs->args, params);
+ if (sa->kvargs == NULL)
+ return EINVAL;
+
+ return 0;
+}
+
+void
+sfc_kvargs_cleanup(struct sfc_adapter *sa)
+{
+ rte_kvargs_free(sa->kvargs);
+}
+
+static int
+sfc_kvarg_match_value(const char *value, const char * const *values,
+ unsigned int n_values)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_values; ++i)
+ if (strcasecmp(value, values[i]) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
+sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg)
+{
+ if (sa->kvargs == NULL)
+ return 0;
+
+ return -rte_kvargs_process(sa->kvargs, key_match, handler, opaque_arg);
+}
+
+int
+sfc_kvarg_bool_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ const char * const true_strs[] = {
+ "1", "y", "yes", "on", "true"
+ };
+ const char * const false_strs[] = {
+ "0", "n", "no", "off", "false"
+ };
+ bool *value = opaque;
+
+ if (sfc_kvarg_match_value(value_str, true_strs,
+ RTE_DIM(true_strs)))
+ *value = true;
+ else if (sfc_kvarg_match_value(value_str, false_strs,
+ RTE_DIM(false_strs)))
+ *value = false;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_kvarg_long_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ long value;
+ char *endptr;
+
+ if (!value_str || !opaque)
+ return -EINVAL;
+
+ value = strtol(value_str, &endptr, 0);
+ if (endptr == value_str)
+ return -EINVAL;
+
+ *(long *)opaque = value;
+
+ return 0;
+}
+
+int
+sfc_kvarg_string_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ *(const char **)opaque = value_str;
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
new file mode 100644
index 00000000..d9c3b1da
--- /dev/null
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_KVARGS_H
+#define _SFC_KVARGS_H
+
+#include <rte_kvargs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
+
+#define SFC_KVARG_DEBUG_INIT "debug_init"
+
+#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
+
+#define SFC_KVARG_PERF_PROFILE "perf_profile"
+
+#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
+#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput"
+#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency"
+#define SFC_KVARG_VALUES_PERF_PROFILE \
+ "[" SFC_KVARG_PERF_PROFILE_AUTO "|" \
+ SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \
+ SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]"
+
+#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms"
+
+#define SFC_KVARG_DATAPATH_EFX "efx"
+#define SFC_KVARG_DATAPATH_EF10 "ef10"
+#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+
+#define SFC_KVARG_RX_DATAPATH "rx_datapath"
+#define SFC_KVARG_VALUES_RX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "]"
+
+#define SFC_KVARG_TX_DATAPATH "tx_datapath"
+#define SFC_KVARG_VALUES_TX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+
+struct sfc_adapter;
+
+int sfc_kvargs_parse(struct sfc_adapter *sa);
+void sfc_kvargs_cleanup(struct sfc_adapter *sa);
+
+int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg);
+
+int sfc_kvarg_bool_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_long_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_string_handler(const char *key, const char *value_str,
+ void *opaque);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_KVARGS_H */
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
new file mode 100644
index 00000000..8a5e2302
--- /dev/null
+++ b/drivers/net/sfc/sfc_log.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_LOG_H_
+#define _SFC_LOG_H_
+
+/* Log PMD message, automatically add prefix and \n */
+#define SFC_LOG(sa, level, ...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ _pci_dev->addr.domain, \
+ _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, \
+ _pci_dev->addr.function, \
+ _dev->data->port_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#define sfc_err(sa, ...) \
+ SFC_LOG(sa, ERR, __VA_ARGS__)
+
+#define sfc_warn(sa, ...) \
+ SFC_LOG(sa, WARNING, __VA_ARGS__)
+
+#define sfc_notice(sa, ...) \
+ SFC_LOG(sa, NOTICE, __VA_ARGS__)
+
+#define sfc_info(sa, ...) \
+ SFC_LOG(sa, INFO, __VA_ARGS__)
+
+#define sfc_log_init(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ if (_sa->debug_init) \
+ SFC_LOG(_sa, INFO, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#endif /* _SFC_LOG_H_ */
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
new file mode 100644
index 00000000..0faad3ed
--- /dev/null
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -0,0 +1,331 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cycles.h>
+
+#include "efx.h"
+#include "efx_mcdi.h"
+#include "efx_regs_mcdi.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+
+#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
+#define SFC_MCDI_POLL_INTERVAL_MAX_US (US_PER_S / 10) /* 100ms in 1us units */
+#define SFC_MCDI_WATCHDOG_INTERVAL_US (10 * US_PER_S) /* 10s in 1us units */
+
+static void
+sfc_mcdi_timeout(struct sfc_adapter *sa)
+{
+ sfc_warn(sa, "MC TIMEOUT");
+
+ sfc_panic(sa, "MCDI timeout handling is not implemented\n");
+}
+
+static inline boolean_t
+sfc_mcdi_proxy_event_available(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = 0;
+ mcdi->proxy_result = ETIMEDOUT;
+ sfc_ev_mgmt_qpoll(sa);
+ if (mcdi->proxy_result != ETIMEDOUT)
+ return B_TRUE;
+
+ return B_FALSE;
+}
+
+static void
+sfc_mcdi_poll(struct sfc_adapter *sa, boolean_t proxy)
+{
+ efx_nic_t *enp;
+ unsigned int delay_total;
+ unsigned int delay_us;
+ boolean_t aborted __rte_unused;
+
+ delay_total = 0;
+ delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
+ enp = sa->nic;
+
+ do {
+ boolean_t poll_completed;
+
+ poll_completed = (proxy) ? sfc_mcdi_proxy_event_available(sa) :
+ efx_mcdi_request_poll(enp);
+ if (poll_completed)
+ return;
+
+ if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
+ if (!proxy) {
+ aborted = efx_mcdi_request_abort(enp);
+ SFC_ASSERT(aborted);
+ sfc_mcdi_timeout(sa);
+ }
+
+ return;
+ }
+
+ rte_delay_us(delay_us);
+
+ delay_total += delay_us;
+
+ /* Exponentially back off the poll frequency */
+ RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
+ delay_us *= 2;
+ if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
+ delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;
+
+ } while (1);
+}
+
+static void
+sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+ uint32_t proxy_handle;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+
+ if (efx_mcdi_get_proxy_handle(sa->nic, emrp, &proxy_handle) == 0) {
+ /*
+ * Authorization is required for the MCDI request;
+ * wait for an MCDI proxy response event to bring
+ * a non-zero proxy handle (should be the same as
+ * the value obtained above) and operation status
+ */
+ sfc_mcdi_poll(sa, B_TRUE);
+
+ if ((mcdi->proxy_handle != 0) &&
+ (mcdi->proxy_handle != proxy_handle)) {
+ sfc_err(sa, "Unexpected MCDI proxy event");
+ emrp->emr_rc = EFAULT;
+ } else if (mcdi->proxy_result == 0) {
+ /*
+ * Authorization succeeded; re-issue the original
+ * request and poll for an ordinary MCDI response
+ */
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+ } else {
+ emrp->emr_rc = mcdi->proxy_result;
+ sfc_err(sa, "MCDI proxy authorization failed "
+ "(handle=%08x, result=%d)",
+ proxy_handle, mcdi->proxy_result);
+ }
+ }
+
+ rte_spinlock_unlock(&mcdi->lock);
+}
+
+static void
+sfc_mcdi_ev_cpl(void *arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi __rte_unused;
+
+ mcdi = &sa->mcdi;
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ /* MCDI is polled, completions are not expected */
+ SFC_ASSERT(0);
+}
+
+static void
+sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+
+ sfc_warn(sa, "MC %s",
+ (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" :
+ (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN");
+
+ sfc_panic(sa, "MCDI exceptions handling is not implemented\n");
+}
+
+#define SFC_MCDI_LOG_BUF_SIZE 128
+
+static size_t
+sfc_mcdi_do_log(const struct sfc_adapter *sa,
+ char *buffer, void *data, size_t data_size,
+ size_t pfxsize, size_t position)
+{
+ uint32_t *words = data;
+ /* Space separator plus 2 characters per byte */
+ const size_t word_str_space = 1 + 2 * sizeof(*words);
+ size_t i;
+
+ for (i = 0; i < data_size; i += sizeof(*words)) {
+ if (position + word_str_space >=
+ SFC_MCDI_LOG_BUF_SIZE) {
+ /* Flush at SFC_MCDI_LOG_BUF_SIZE with backslash
+ * at the end which is required by netlogdecode.
+ */
+ buffer[position] = '\0';
+ sfc_info(sa, "%s \\", buffer);
+ /* Preserve prefix for the next log message */
+ position = pfxsize;
+ }
+ position += snprintf(buffer + position,
+ SFC_MCDI_LOG_BUF_SIZE - position,
+ " %08x", *words);
+ words++;
+ }
+ return position;
+}
+
+static void
+sfc_mcdi_logger(void *arg, efx_log_msg_t type,
+ void *header, size_t header_size,
+ void *data, size_t data_size)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ char buffer[SFC_MCDI_LOG_BUF_SIZE];
+ size_t pfxsize;
+ size_t start;
+
+ if (!sa->mcdi.logging)
+ return;
+
+ /* The format including prefix added by sfc_info() is the format
+ * consumed by the Solarflare netlogdecode tool.
+ */
+ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
+ type == EFX_LOG_MCDI_REQUEST ? "REQ" :
+ type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???");
+ start = sfc_mcdi_do_log(sa, buffer, header, header_size,
+ pfxsize, pfxsize);
+ start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
+ if (start != pfxsize) {
+ buffer[start] = '\0';
+ sfc_info(sa, "%s", buffer);
+ }
+}
+
+static void
+sfc_mcdi_ev_proxy_response(void *arg, uint32_t handle, efx_rc_t result)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = handle;
+ mcdi->proxy_result = result;
+}
+
+int
+sfc_mcdi_init(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ size_t max_msg_size;
+ efx_mcdi_transport_t *emtp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED);
+
+ rte_spinlock_init(&mcdi->lock);
+
+ mcdi->state = SFC_MCDI_INITIALIZED;
+
+ max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
+ rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id,
+ &mcdi->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ /* Convert negative error to positive used in the driver */
+ rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
+ sfc_kvarg_bool_handler, &mcdi->logging);
+ if (rc != 0)
+ goto fail_kvargs_process;
+
+ emtp = &mcdi->transport;
+ emtp->emt_context = sa;
+ emtp->emt_dma_mem = &mcdi->mem;
+ emtp->emt_execute = sfc_mcdi_execute;
+ emtp->emt_ev_cpl = sfc_mcdi_ev_cpl;
+ emtp->emt_exception = sfc_mcdi_exception;
+ emtp->emt_logger = sfc_mcdi_logger;
+ emtp->emt_ev_proxy_response = sfc_mcdi_ev_proxy_response;
+
+ sfc_log_init(sa, "init MCDI");
+ rc = efx_mcdi_init(sa->nic, emtp);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ return 0;
+
+fail_mcdi_init:
+ memset(emtp, 0, sizeof(*emtp));
+
+fail_kvargs_process:
+ sfc_dma_free(sa, &mcdi->mem);
+
+fail_dma_alloc:
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+ return rc;
+}
+
+void
+sfc_mcdi_fini(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ efx_mcdi_transport_t *emtp;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+ emtp = &mcdi->transport;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+
+ sfc_log_init(sa, "fini MCDI");
+ efx_mcdi_fini(sa->nic);
+ memset(emtp, 0, sizeof(*emtp));
+
+ rte_spinlock_unlock(&mcdi->lock);
+
+ sfc_dma_free(sa, &mcdi->mem);
+}
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
new file mode 100644
index 00000000..ee96bcde
--- /dev/null
+++ b/drivers/net/sfc/sfc_port.c
@@ -0,0 +1,475 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+
+/** Default MAC statistics update period is 1 second */
+#define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S
+
+/** The number of microseconds to sleep on attempt to get statistics update */
+#define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10
+
+/** The number of attempts to await arrival of freshly generated statistics */
+#define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50
+
+/**
+ * Update MAC statistics in the buffer.
+ *
+ * @param sa Adapter
+ *
+ * @return Status code
+ * @retval 0 Success
+ * @retval EAGAIN Try again
+ * @retval ENOMEM Memory allocation failure
+ */
+int
+sfc_port_update_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ efsys_mem_t *esmp = &port->mac_stats_dma_mem;
+ uint32_t *genp = NULL;
+ uint32_t gen_old;
+ unsigned int nb_attempts = 0;
+ int rc;
+
+ SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ /*
+ * If periodic statistics DMA'ing is off or if not supported,
+ * make a manual request and keep an eye on timer if need be
+ */
+ if (!port->mac_stats_periodic_dma_supported ||
+ (port->mac_stats_update_period_ms == 0)) {
+ if (port->mac_stats_update_period_ms != 0) {
+ uint64_t timestamp = sfc_get_system_msecs();
+
+ if ((timestamp -
+ port->mac_stats_last_request_timestamp) <
+ port->mac_stats_update_period_ms)
+ return 0;
+
+ port->mac_stats_last_request_timestamp = timestamp;
+ }
+
+ rc = efx_mac_stats_upload(sa->nic, esmp);
+ if (rc != 0)
+ return rc;
+
+ genp = &port->mac_stats_update_generation;
+ gen_old = *genp;
+ }
+
+ do {
+ if (nb_attempts > 0)
+ rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US);
+
+ rc = efx_mac_stats_update(sa->nic, esmp,
+ port->mac_stats_buf, genp);
+ if (rc != 0)
+ return rc;
+
+ } while ((genp != NULL) && (*genp == gen_old) &&
+ (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS));
+
+ return 0;
+}
+
+int
+sfc_port_reset_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+ rc = efx_mac_stats_clear(sa->nic);
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return rc;
+}
+
+static int
+sfc_port_init_dev_link(struct sfc_adapter *sa)
+{
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ int rc;
+ efx_link_mode_t link_mode;
+ struct rte_eth_link current_link;
+
+ rc = efx_port_poll(sa->nic, &link_mode);
+ if (rc != 0)
+ return rc;
+
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ rte_atomic64_set((rte_atomic64_t *)dev_link,
+ *(uint64_t *)&current_link);
+
+ return 0;
+}
+
+int
+sfc_port_start(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+ uint32_t phy_adv_cap;
+ const uint32_t phy_pause_caps =
+ ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "init filters");
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ sfc_log_init(sa, "init port");
+ rc = efx_port_init(sa->nic);
+ if (rc != 0)
+ goto fail_port_init;
+
+ sfc_log_init(sa, "set flow control to %#x autoneg=%u",
+ port->flow_ctrl, port->flow_ctrl_autoneg);
+ rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
+ port->flow_ctrl_autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+
+ /* Preserve pause capabilities set by above efx_mac_fcntl_set() */
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
+ SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
+ phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+
+ sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
+ rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
+ if (rc != 0)
+ goto fail_phy_adv_cap_set;
+
+ sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
+ rc = efx_mac_pdu_set(sa->nic, port->pdu);
+ if (rc != 0)
+ goto fail_mac_pdu_set;
+
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic,
+ sa->eth_dev->data->mac_addrs[0].addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+
+ if (port->mac_stats_reset_pending) {
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "statistics reset failed (requested "
+ "before the port was started)");
+
+ port->mac_stats_reset_pending = B_FALSE;
+ }
+
+ efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
+ sizeof(port->mac_stats_mask));
+
+ port->mac_stats_update_generation = 0;
+
+ if (port->mac_stats_update_period_ms != 0) {
+ /*
+ * Update MAC stats using periodic DMA;
+ * any positive update interval different from
+ * 1000 ms can be set only on SFN8xxx provided
+ * that FW version is 6.2.1.1033 or higher
+ */
+ sfc_log_init(sa, "request MAC stats DMA'ing");
+ rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ port->mac_stats_update_period_ms,
+ B_FALSE);
+ if (rc == 0) {
+ port->mac_stats_periodic_dma_supported = B_TRUE;
+ } else if (rc == EOPNOTSUPP) {
+ port->mac_stats_periodic_dma_supported = B_FALSE;
+ port->mac_stats_last_request_timestamp = 0;
+ } else {
+ goto fail_mac_stats_periodic;
+ }
+ }
+
+ sfc_log_init(sa, "disable MAC drain");
+ rc = efx_mac_drain(sa->nic, B_FALSE);
+ if (rc != 0)
+ goto fail_mac_drain;
+
+ /* Synchronize link status knowledge */
+ rc = sfc_port_init_dev_link(sa);
+ if (rc != 0)
+ goto fail_port_init_dev_link;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_port_init_dev_link:
+ (void)efx_mac_drain(sa->nic, B_TRUE);
+
+fail_mac_drain:
+ (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ 0, B_FALSE);
+
+fail_mac_stats_periodic:
+fail_mcast_address_list_set:
+fail_mac_filter_set:
+fail_mac_addr_set:
+fail_mac_pdu_set:
+fail_phy_adv_cap_set:
+fail_mac_fcntl_set:
+ efx_port_fini(sa->nic);
+
+fail_port_init:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ efx_mac_drain(sa->nic, B_TRUE);
+
+ (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
+ 0, B_FALSE);
+
+ efx_port_fini(sa->nic);
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_port_configure(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ if (dev_data->dev_conf.rxmode.jumbo_frame)
+ port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+ else
+ port->pdu = EFX_MAC_PDU(dev_data->mtu);
+
+ return 0;
+}
+
+void
+sfc_port_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+}
+
+int
+sfc_port_attach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ long kvarg_stats_update_period_ms;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask);
+
+ /* Enable flow control by default */
+ port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ port->flow_ctrl_autoneg = B_TRUE;
+
+ port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
+ port->nb_mcast_addrs = 0;
+ port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
+ port->max_mcast_addrs,
+ EFX_MAC_ADDR_LEN, 0,
+ sa->socket_id);
+ if (port->mcast_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mcast_addr_list_buf_alloc;
+ }
+
+ rte_spinlock_init(&port->mac_stats_lock);
+
+ rc = ENOMEM;
+ port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
+ sizeof(uint64_t), 0,
+ sa->socket_id);
+ if (port->mac_stats_buf == NULL)
+ goto fail_mac_stats_buf_alloc;
+
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
+ sa->socket_id, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_dma_alloc;
+
+ port->mac_stats_reset_pending = B_FALSE;
+
+ kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ sfc_kvarg_long_handler,
+ &kvarg_stats_update_period_ms);
+ if ((rc == 0) &&
+ ((kvarg_stats_update_period_ms < 0) ||
+ (kvarg_stats_update_period_ms > UINT16_MAX))) {
+ sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' "
+ "was set (%ld);", kvarg_stats_update_period_ms);
+ sfc_err(sa, "it must not be less than 0 "
+ "or greater than %" PRIu16, UINT16_MAX);
+ rc = EINVAL;
+ goto fail_kvarg_stats_update_period_ms;
+ } else if (rc != 0) {
+ goto fail_kvarg_stats_update_period_ms;
+ }
+
+ port->mac_stats_update_period_ms = kvarg_stats_update_period_ms;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_kvarg_stats_update_period_ms:
+fail_mac_stats_dma_alloc:
+ rte_free(port->mac_stats_buf);
+fail_mac_stats_buf_alloc:
+fail_mcast_addr_list_buf_alloc:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_detach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+ rte_free(port->mac_stats_buf);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_set_rx_mode(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rc = efx_mac_filter_set(sa->nic, port->promisc, B_TRUE,
+ port->promisc || port->allmulti, B_TRUE);
+
+ return rc;
+}
+
+void
+sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info)
+{
+ SFC_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ memset(link_info, 0, sizeof(*link_info));
+ if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
+ link_info->link_status = ETH_LINK_DOWN;
+ else
+ link_info->link_status = ETH_LINK_UP;
+
+ switch (link_mode) {
+ case EFX_LINK_10HDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_10FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100HDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_100FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_1000HDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_1000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_10000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_40000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_40G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ default:
+ SFC_ASSERT(B_FALSE);
+ /* FALLTHROUGH */
+ case EFX_LINK_UNKNOWN:
+ case EFX_LINK_DOWN:
+ link_info->link_speed = ETH_SPEED_NUM_NONE;
+ link_info->link_duplex = 0;
+ break;
+ }
+
+ link_info->link_autoneg = ETH_LINK_AUTONEG;
+}
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
new file mode 100644
index 00000000..2ecd6f26
--- /dev/null
+++ b/drivers/net/sfc/sfc_rx.c
@@ -0,0 +1,1327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mempool.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
+
+/*
+ * Maximum number of Rx queue flush attempt in the case of failure or
+ * flush timeout
+ */
+#define SFC_RX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for Rx
+ * queue flush done or failed events.
+ */
+#define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for Rx queue
+ * flush done or failed events. It defines Rx queue flush attempt timeout
+ * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
+ */
+#define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
+
+void
+sfc_rx_qflush_done(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSHED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+void
+sfc_rx_qflush_failed(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+static void
+sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
+{
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ efsys_dma_addr_t addr[RTE_DIM(objs)];
+ unsigned int added = rxq->added;
+ unsigned int id;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ uint16_t port_id = rxq->dp.dpq.port_id;
+
+ free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ id = added & rxq->ptr_mask;
+ do {
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
+ RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0; i < RTE_DIM(objs);
+ ++i, id = (id + 1) & rxq->ptr_mask) {
+ m = objs[i];
+
+ rxd = &rxq->sw_desc[id];
+ rxd->mbuf = m;
+
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
+ m->port = port_id;
+
+ addr[i] = rte_pktmbuf_mtophys(m);
+ }
+
+ efx_rx_qpost(rxq->common, addr, rxq->buf_size,
+ RTE_DIM(objs), rxq->completed, added);
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(added != rxq->added);
+ rxq->added = added;
+ efx_rx_qpush(rxq->common, added, &rxq->pushed);
+}
+
+static uint64_t
+sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
+{
+ uint64_t mbuf_flags = 0;
+
+ switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
+ case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case EFX_PKT_IPV4:
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
+ PKT_RX_IP_CKSUM_UNKNOWN);
+ break;
+ }
+
+ switch ((desc_flags &
+ (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
+ case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
+ case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case EFX_PKT_TCP:
+ case EFX_PKT_UDP:
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ break;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
+{
+ return RTE_PTYPE_L2_ETHER |
+ ((desc_flags & EFX_PKT_IPV4) ?
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_IPV6) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
+ ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
+}
+
+static const uint32_t *
+sfc_efx_supported_ptypes_get(void)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static void
+sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
+ struct rte_mbuf *m)
+{
+#if EFSYS_OPT_RX_SCALE
+ uint8_t *mbuf_data;
+
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
+ return;
+
+ mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
+
+ if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
+ m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
+ EFX_RX_HASHALG_TOEPLITZ,
+ mbuf_data);
+
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ }
+#endif
+}
+
+static uint16_t
+sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int completed;
+ unsigned int prefix_size = rxq->prefix_size;
+ unsigned int done_pkts = 0;
+ boolean_t discard_next = B_FALSE;
+ struct rte_mbuf *scatter_pkt = NULL;
+
+ if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ completed = rxq->completed;
+ while (completed != rxq->pending && done_pkts < nb_pkts) {
+ unsigned int id;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int seg_len;
+ unsigned int desc_flags;
+
+ id = completed++ & rxq->ptr_mask;
+ rxd = &rxq->sw_desc[id];
+ m = rxd->mbuf;
+ desc_flags = rxd->flags;
+
+ if (discard_next)
+ goto discard;
+
+ if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ if (desc_flags & EFX_PKT_PREFIX_LEN) {
+ uint16_t tmp_size;
+ int rc __rte_unused;
+
+ rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
+ rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
+ SFC_ASSERT(rc == 0);
+ seg_len = tmp_size;
+ } else {
+ seg_len = rxd->size - prefix_size;
+ }
+
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
+
+ if (scatter_pkt != NULL) {
+ if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
+ rte_pktmbuf_free(scatter_pkt);
+ goto discard;
+ }
+ /* The packet to deliver */
+ m = scatter_pkt;
+ }
+
+ if (desc_flags & EFX_PKT_CONT) {
+ /* The packet is scattered, more fragments to come */
+ scatter_pkt = m;
+ /* Futher fragments have no prefix */
+ prefix_size = 0;
+ continue;
+ }
+
+ /* Scattered packet is done */
+ scatter_pkt = NULL;
+ /* The first fragment of the packet has prefix */
+ prefix_size = rxq->prefix_size;
+
+ m->ol_flags =
+ sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
+ m->packet_type =
+ sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
+
+ /*
+ * Extract RSS hash from the packet prefix and
+ * set the corresponding field (if needed and possible)
+ */
+ sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
+
+ m->data_off += prefix_size;
+
+ *rx_pkts++ = m;
+ done_pkts++;
+ continue;
+
+discard:
+ discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ rxd->mbuf = NULL;
+ }
+
+ /* pending is only moved when entire packet is received */
+ SFC_ASSERT(scatter_pkt == NULL);
+
+ rxq->completed = completed;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ return done_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
+static unsigned int
+sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ return rxq->pending - rxq->completed;
+}
+
+struct sfc_rxq *
+sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
+{
+ const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ rxq = sa->rxq_info[dpq->queue_id].rxq;
+
+ SFC_ASSERT(rxq != NULL);
+ return rxq;
+}
+
+static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
+static int
+sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_efx_rxq *rxq;
+ int rc;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
+ info->rxq_entries,
+ sizeof(*rxq->sw_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_desc == NULL)
+ goto fail_desc_alloc;
+
+ /* efx datapath is bound to efx control path */
+ rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->batch_max = info->batch_max;
+ rxq->prefix_size = info->prefix_size;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
+static void
+sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_desc);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
+static int
+sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int evq_read_ptr)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->common = crxq->common;
+
+ rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static void
+sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
+
+ /* libefx-based datapath is bound to libefx-based PMD and uses
+ * event queue structure directly. So, there is no necessity to
+ * return EvQ read pointer.
+ */
+}
+
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+static void
+sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_desc[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ /* Packed stream relies on 0 in inactive SW desc.
+ * Rx queue stop is not performance critical, so
+ * there is no harm to do it always.
+ */
+ rxd->flags = 0;
+ rxd->size = 0;
+ }
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_rx sfc_efx_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_RX_FEAT_SCATTER,
+ .qcreate = sfc_efx_rx_qcreate,
+ .qdestroy = sfc_efx_rx_qdestroy,
+ .qstart = sfc_efx_rx_qstart,
+ .qstop = sfc_efx_rx_qstop,
+ .qpurge = sfc_efx_rx_qpurge,
+ .supported_ptypes_get = sfc_efx_supported_ptypes_get,
+ .qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .pkt_burst = sfc_efx_recv_pkts,
+};
+
+unsigned int
+sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq = sa->rxq_info[sw_index].rxq;
+
+ if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
+ return 0;
+
+ return sa->dp_rx->qdesc_npending(rxq->dp);
+}
+
+int
+sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
+{
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
+}
+
+static void
+sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ rxq = sa->rxq_info[sw_index].rxq;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /*
+ * Retry Rx queue flushing in the case of flush failed or
+ * timeout. In the worst case it can delay for 6 seconds.
+ */
+ for (retry_count = 0;
+ ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_rx_qflush(rxq->common) != 0) {
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ break;
+ }
+ rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
+ rxq->state |= SFC_RXQ_FLUSHING;
+
+ /*
+ * Wait for Rx queue flush done or failed event at least
+ * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(rxq->evq);
+ } while ((rxq->state & SFC_RXQ_FLUSHING) &&
+ (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
+
+ if (rxq->state & SFC_RXQ_FLUSHING)
+ sfc_err(sa, "RxQ %u flush timed out", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSH_FAILED)
+ sfc_err(sa, "RxQ %u flush failed", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSHED)
+ sfc_info(sa, "RxQ %u flushed", sw_index);
+ }
+
+ sa->dp_rx->qpurge(rxq->dp);
+}
+
+static int
+sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
+{
+ boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ /*
+ * If promiscuous or all-multicast mode has been requested, setting
+ * filter for the default Rx queue might fail, in particular, while
+ * running over PCI function which is not a member of corresponding
+ * privilege groups; if this occurs, few iterations will be made to
+ * repeat this step without promiscuous and all-multicast flags set
+ */
+retry:
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ if (rc == 0)
+ return 0;
+ else if (rc != EOPNOTSUPP)
+ return rc;
+
+ if (port->promisc) {
+ sfc_warn(sa, "promiscuous mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "promiscuous mode will be disabled");
+
+ port->promisc = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ if (port->allmulti) {
+ sfc_warn(sa, "all-multicast mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "all-multicast mode will be disabled");
+
+ port->allmulti = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ return rc;
+}
+
+int
+sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ evq = rxq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries,
+ 0 /* not used on EF10 */, evq->common,
+ &rxq->common);
+ if (rc != 0)
+ goto fail_rx_qcreate;
+
+ efx_rx_qenable(rxq->common);
+
+ rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ rxq->state |= SFC_RXQ_STARTED;
+
+ if (sw_index == 0) {
+ rc = sfc_rx_default_rxq_set_filter(sa, rxq);
+ if (rc != 0)
+ goto fail_mac_filter_default_rxq_set;
+ }
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_mac_filter_default_rxq_set:
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+fail_dp_qstart:
+ sfc_rx_qflush(sa, sw_index);
+
+fail_rx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+
+ if (rxq->state == SFC_RXQ_INITIALIZED)
+ return;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+ if (sw_index == 0)
+ efx_mac_filter_default_rxq_clear(sa->nic);
+
+ sfc_rx_qflush(sa, sw_index);
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ efx_rx_qdestroy(rxq->common);
+
+ sfc_ev_qstop(rxq->evq);
+}
+
+static int
+sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
+ const struct rte_eth_rxconf *rx_conf)
+{
+ const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
+ int rc = 0;
+
+ if (rx_conf->rx_thresh.pthresh != 0 ||
+ rx_conf->rx_thresh.hthresh != 0 ||
+ rx_conf->rx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "RxQ prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
+ sfc_err(sa,
+ "RxQ free threshold too large: %u vs maximum %u",
+ rx_conf->rx_free_thresh, rx_free_thresh_max);
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_drop_en == 0) {
+ sfc_err(sa, "RxQ drop disable is not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+static unsigned int
+sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
+{
+ uint32_t data_off;
+ uint32_t order;
+
+ /* The mbuf object itself is always cache line aligned */
+ order = rte_bsf32(RTE_CACHE_LINE_SIZE);
+
+ /* Data offset from mbuf object start */
+ data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
+ RTE_PKTMBUF_HEADROOM;
+
+ order = MIN(order, rte_bsf32(data_off));
+
+ return 1u << (order - 1);
+}
+
+static uint16_t
+sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
+ const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
+ uint16_t buf_size;
+ unsigned int buf_aligned;
+ unsigned int start_alignment;
+ unsigned int end_padding_alignment;
+
+ /* Below it is assumed that both alignments are power of 2 */
+ SFC_ASSERT(rte_is_power_of_2(nic_align_start));
+ SFC_ASSERT(rte_is_power_of_2(nic_align_end));
+
+ /*
+ * mbuf is always cache line aligned, double-check
+ * that it meets rx buffer start alignment requirements.
+ */
+
+ /* Start from mbuf pool data room size */
+ buf_size = rte_pktmbuf_data_room_size(mb_pool);
+
+ /* Remove headroom */
+ if (buf_size <= RTE_PKTMBUF_HEADROOM) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
+ mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
+ return 0;
+ }
+ buf_size -= RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate guaranteed data start alignment */
+ buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
+
+ /* Reserve space for start alignment */
+ if (buf_aligned < nic_align_start) {
+ start_alignment = nic_align_start - buf_aligned;
+ if (buf_size <= start_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment);
+ return 0;
+ }
+ buf_aligned = nic_align_start;
+ buf_size -= start_alignment;
+ } else {
+ start_alignment = 0;
+ }
+
+ /* Make sure that end padding does not write beyond the buffer */
+ if (buf_aligned < nic_align_end) {
+ /*
+ * Estimate space which can be lost. If guarnteed buffer
+ * size is odd, lost space is (nic_align_end - 1). More
+ * accurate formula is below.
+ */
+ end_padding_alignment = nic_align_end -
+ MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
+ if (buf_size <= end_padding_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment,
+ end_padding_alignment);
+ return 0;
+ }
+ buf_size -= end_padding_alignment;
+ } else {
+ /*
+ * Start is aligned the same or better than end,
+ * just align length.
+ */
+ buf_size = P2ALIGN(buf_size, nic_align_end);
+ }
+
+ return buf_size;
+}
+
+int
+sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+ uint16_t buf_size;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_evq *evq;
+ struct sfc_rxq *rxq;
+ struct sfc_dp_rx_qcreate_info info;
+
+ rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
+ if (buf_size == 0) {
+ sfc_err(sa, "RxQ %u mbuf pool object size is too small",
+ sw_index);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
+ !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
+ sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
+ "object size is too small", sw_index);
+ sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ "PDU size %u plus Rx prefix %u bytes",
+ sw_index, buf_size, (unsigned int)sa->port.pdu,
+ encp->enc_rx_prefix_size);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq_info = &sa->rxq_info[sw_index];
+
+ SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
+ rxq_info->entries = nb_rx_desc;
+ rxq_info->type =
+ sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
+ EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ rxq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ rxq_info->rxq = rxq;
+
+ rxq->evq = evq;
+ rxq->hw_index = sw_index;
+ rxq->refill_threshold =
+ RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
+ rxq->refill_mb_pool = mb_pool;
+
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ socket_id, &rxq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.refill_mb_pool = rxq->refill_mb_pool;
+ info.refill_threshold = rxq->refill_threshold;
+ info.buf_size = buf_size;
+ info.batch_max = encp->enc_rx_batch_max;
+ info.prefix_size = encp->enc_rx_prefix_size;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ info.flags |= SFC_RXQ_FLAG_RSS_HASH;
+#endif
+
+ info.rxq_entries = rxq_info->entries;
+ info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_entries = rxq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = rxq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
+ if (rc != 0)
+ goto fail_dp_rx_qcreate;
+
+ evq->dp_rxq = rxq->dp;
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_rx_qcreate:
+ sfc_dma_free(sa, &rxq->mem);
+
+fail_dma_alloc:
+ rxq_info->rxq = NULL;
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ rxq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ sa->dp_rx->qdestroy(rxq->dp);
+ rxq->dp = NULL;
+
+ rxq_info->rxq = NULL;
+ rxq_info->entries = 0;
+
+ sfc_dma_free(sa, &rxq->mem);
+
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
+}
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t
+sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+{
+ efx_rx_hash_type_t efx_hash_types = 0;
+
+ if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV4;
+
+ if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+
+ if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV6;
+
+ if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+
+ return efx_hash_types;
+}
+
+uint64_t
+sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+{
+ uint64_t rss_hf = 0;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
+ rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
+ rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
+ rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+
+ return rss_hf;
+}
+#endif
+
+static int
+sfc_rx_rss_config(struct sfc_adapter *sa)
+{
+ int rc = 0;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
+ sizeof(sa->rss_tbl));
+ }
+
+finish:
+#endif
+ return rc;
+}
+
+int
+sfc_rx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = sfc_rx_rss_config(sa);
+ if (rc != 0)
+ goto fail_rss_config;
+
+ for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
+ if ((!sa->rxq_info[sw_index].deferred_start ||
+ sa->rxq_info[sw_index].deferred_started)) {
+ rc = sfc_rx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_rx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_rx_qstart:
+ while (sw_index-- > 0)
+ sfc_rx_qstop(sa, sw_index);
+
+fail_rss_config:
+ efx_rx_fini(sa->nic);
+
+fail_rx_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (sw_index-- > 0) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qstop(sa, sw_index);
+ }
+
+ efx_rx_fini(sa->nic);
+}
+
+static int
+sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
+ unsigned int max_entries;
+
+ max_entries = EFX_RXQ_MAXNDESCS;
+ SFC_ASSERT(rte_is_power_of_2(max_entries));
+
+ rxq_info->max_entries = max_entries;
+
+ return 0;
+}
+
+static int
+sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
+{
+ int rc = 0;
+
+ switch (rxmode->mq_mode) {
+ case ETH_MQ_RX_NONE:
+ /* No special checks are required */
+ break;
+#if EFSYS_OPT_RX_SCALE
+ case ETH_MQ_RX_RSS:
+ if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ sfc_err(sa, "RSS is not available");
+ rc = EINVAL;
+ }
+ break;
+#endif
+ default:
+ sfc_err(sa, "Rx multi-queue mode %u not supported",
+ rxmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ if (rxmode->header_split) {
+ sfc_err(sa, "Header split on Rx not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_filter) {
+ sfc_err(sa, "HW VLAN filtering not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_strip) {
+ sfc_err(sa, "HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_extend) {
+ sfc_err(sa,
+ "Q-in-Q HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc) {
+ sfc_warn(sa,
+ "FCS stripping control not supported - always stripped");
+ rxmode->hw_strip_crc = 1;
+ }
+
+ if (rxmode->enable_scatter &&
+ (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
+ sfc_err(sa, "Rx scatter not supported by %s datapath",
+ sa->dp_rx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (rxmode->enable_lro) {
+ sfc_err(sa, "LRO not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (--sw_index >= (int)nb_rx_queues) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qfini(sa, sw_index);
+ }
+
+ sa->rxq_count = nb_rx_queues;
+}
+
+/**
+ * Initialize Rx subsystem.
+ *
+ * Called at device (re)configuration stage when number of receive queues is
+ * specified together with other device level receive configuration.
+ *
+ * It should be used to allocate NUMA-unaware resources.
+ */
+int
+sfc_rx_configure(struct sfc_adapter *sa)
+{
+ struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
+ nb_rx_queues, sa->rxq_count);
+
+ rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_rx_queues == sa->rxq_count)
+ goto done;
+
+ if (sa->rxq_info == NULL) {
+ rc = ENOMEM;
+ sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sa->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sa->rxq_info == NULL)
+ goto fail_rxqs_alloc;
+ } else {
+ struct sfc_rxq_info *new_rxq_info;
+
+ if (nb_rx_queues < sa->rxq_count)
+ sfc_rx_fini_queues(sa, nb_rx_queues);
+
+ rc = ENOMEM;
+ new_rxq_info =
+ rte_realloc(sa->rxq_info,
+ nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rx_queues > 0)
+ goto fail_rxqs_realloc;
+
+ sa->rxq_info = new_rxq_info;
+ if (nb_rx_queues > sa->rxq_count)
+ memset(&sa->rxq_info[sa->rxq_count], 0,
+ (nb_rx_queues - sa->rxq_count) *
+ sizeof(sa->rxq_info[0]));
+ }
+
+ while (sa->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sa->rxq_count);
+ if (rc != 0)
+ goto fail_rx_qinit_info;
+
+ sa->rxq_count++;
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (sa->rss_channels > 0) {
+ for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
+ sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ }
+#endif
+
+done:
+ return 0;
+
+fail_rx_qinit_info:
+fail_rxqs_realloc:
+fail_rxqs_alloc:
+ sfc_rx_close(sa);
+
+fail_check_mode:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+/**
+ * Shutdown Rx subsystem.
+ *
+ * Called at device close stage, for example, before device shutdown.
+ */
+void
+sfc_rx_close(struct sfc_adapter *sa)
+{
+ sfc_rx_fini_queues(sa, 0);
+
+ sa->rss_channels = 0;
+
+ rte_free(sa->rxq_info);
+ sa->rxq_info = NULL;
+}
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
new file mode 100644
index 00000000..9e6282ea
--- /dev/null
+++ b/drivers/net/sfc/sfc_rx.h
@@ -0,0 +1,180 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_RX_H
+#define _SFC_RX_H
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_rx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Rx descriptor information associated with hardware Rx
+ * descriptor.
+ */
+struct sfc_efx_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+ unsigned int size;
+};
+
+/** Receive queue state bits */
+enum sfc_rxq_state_bit {
+ SFC_RXQ_INITIALIZED_BIT = 0,
+#define SFC_RXQ_INITIALIZED (1 << SFC_RXQ_INITIALIZED_BIT)
+ SFC_RXQ_STARTED_BIT,
+#define SFC_RXQ_STARTED (1 << SFC_RXQ_STARTED_BIT)
+ SFC_RXQ_FLUSHING_BIT,
+#define SFC_RXQ_FLUSHING (1 << SFC_RXQ_FLUSHING_BIT)
+ SFC_RXQ_FLUSHED_BIT,
+#define SFC_RXQ_FLUSHED (1 << SFC_RXQ_FLUSHED_BIT)
+ SFC_RXQ_FLUSH_FAILED_BIT,
+#define SFC_RXQ_FLUSH_FAILED (1 << SFC_RXQ_FLUSH_FAILED_BIT)
+};
+
+/**
+ * Receive queue control information.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_rxq {
+ struct sfc_evq *evq;
+ efx_rxq_t *common;
+ efsys_mem_t mem;
+ unsigned int hw_index;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ struct sfc_dp_rxq *dp;
+ unsigned int state;
+};
+
+static inline unsigned int
+sfc_rxq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_rxq_sw_index(const struct sfc_rxq *rxq)
+{
+ return sfc_rxq_sw_index_by_hw_index(rxq->hw_index);
+}
+
+struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_rxq {
+ /* Used on data path */
+ struct sfc_evq *evq;
+ unsigned int flags;
+#define SFC_EFX_RXQ_FLAG_STARTED 0x1
+#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
+#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+ unsigned int ptr_mask;
+ unsigned int pending;
+ unsigned int completed;
+ uint16_t batch_max;
+ uint16_t prefix_size;
+ struct sfc_efx_rx_sw_desc *sw_desc;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int pushed;
+ unsigned int refill_threshold;
+ uint16_t buf_size;
+ struct rte_mempool *refill_mb_pool;
+ efx_rxq_t *common;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_efx_rxq *
+sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_efx_rxq, dp);
+}
+
+/**
+ * Receive queue information used during setup/release only.
+ * Allocated on the same socket as adapter data.
+ */
+struct sfc_rxq_info {
+ unsigned int max_entries;
+ unsigned int entries;
+ efx_rxq_type_t type;
+ struct sfc_rxq *rxq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_rx_configure(struct sfc_adapter *sa);
+void sfc_rx_close(struct sfc_adapter *sa);
+int sfc_rx_start(struct sfc_adapter *sa);
+void sfc_rx_stop(struct sfc_adapter *sa);
+
+int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_rx_qflush_done(struct sfc_rxq *rxq);
+void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
+
+unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
+ unsigned int sw_index);
+int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);
+uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_RX_H */
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
new file mode 100644
index 00000000..fb79d749
--- /dev/null
+++ b/drivers/net/sfc/sfc_tso.c
@@ -0,0 +1,201 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_tx.h"
+#include "sfc_ev.h"
+
+/** Standard TSO header length */
+#define SFC_TSOH_STD_LEN 256
+
+/** The number of TSO option descriptors that precede the packet descriptors */
+#define SFC_TSO_OPDESCS_IDX_SHIFT 2
+
+int
+sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries, unsigned int socket_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
+ SFC_TSOH_STD_LEN,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (sw_ring[i].tsoh == NULL)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ return 0;
+
+fail_alloc_tsoh_objs:
+ while (i > 0)
+ rte_free(sw_ring[--i].tsoh);
+
+ return ENOMEM;
+}
+
+void
+sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ rte_free(sw_ring[i].tsoh);
+ sw_ring[i].tsoh = NULL;
+ }
+}
+
+static void
+sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
+ size_t *in_off, unsigned int idx, size_t bytes_left)
+{
+ struct rte_mbuf *m = *in_seg;
+ size_t bytes_to_copy = 0;
+ uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ do {
+ bytes_to_copy = MIN(bytes_left, m->data_len);
+
+ rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
+ bytes_to_copy);
+
+ bytes_left -= bytes_to_copy;
+ tsoh += bytes_to_copy;
+
+ if (bytes_left > 0) {
+ m = m->next;
+ SFC_ASSERT(m != NULL);
+ }
+ } while (bytes_left > 0);
+
+ if (bytes_to_copy == m->data_len) {
+ *in_seg = m->next;
+ *in_off = 0;
+ } else {
+ *in_seg = m;
+ *in_off = bytes_to_copy;
+ }
+}
+
+int
+sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len)
+{
+ uint8_t *tsoh;
+ const struct tcp_hdr *th;
+ efsys_dma_addr_t header_paddr;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ struct rte_mbuf *m = *in_seg;
+ size_t nh_off = m->l2_len; /* IP header offset */
+ size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
+ size_t header_len = m->l2_len + m->l3_len + m->l4_len;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
+
+ idx += SFC_TSO_OPDESCS_IDX_SHIFT;
+
+ /* Packets which have too big headers should be discarded */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
+ /*
+ * The TCP header must start at most 208 bytes into the frame.
+ * If it starts later than this then the NIC won't realise
+ * it's a TCP packet and TSO edits won't be applied
+ */
+ if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
+ return EMSGSIZE;
+
+ header_paddr = rte_pktmbuf_mtophys(m);
+
+ /*
+ * Sometimes headers may be split across multiple mbufs. In such cases
+ * we need to glue those pieces and store them in some temporary place.
+ * Also, packet headers must be contiguous in memory, so that
+ * they can be referred to with a single DMA descriptor. EF10 has no
+ * limitations on address boundaries crossing by DMA descriptor data.
+ */
+ if (m->data_len < header_len) {
+ sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
+ header_len);
+ tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ } else {
+ if (m->data_len == header_len) {
+ *in_off = 0;
+ *in_seg = m->next;
+ } else {
+ *in_off = header_len;
+ }
+
+ tsoh = rte_pktmbuf_mtod(m, uint8_t *);
+ }
+
+ /* Handle IP header */
+ if (m->ol_flags & PKT_TX_IPV4) {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ } else if (m->ol_flags & PKT_TX_IPV6) {
+ packet_id = 0;
+ } else {
+ return EINVAL;
+ }
+
+ /* Handle TCP header */
+ th = (const struct tcp_hdr *)(tsoh + tcph_off);
+
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
+ *pend, EFX_TX_FATSOV2_OPT_NDESCS);
+
+ *pend += EFX_TX_FATSOV2_OPT_NDESCS;
+ *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
+
+ efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
+ B_FALSE, (*pend)++);
+ (*pkt_descs)++;
+ *pkt_len -= header_len;
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
new file mode 100644
index 00000000..4ef7fc8b
--- /dev/null
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TWEAK_H_
+#define _SFC_TWEAK_H_
+
+/*
+ * The header is intended to collect defines/constants which could be
+ * tweaked to improve the PMD performance characteristics depending on
+ * the usecase or requirements (CPU load, packet rate, latency).
+ */
+
+/**
+ * Number of Rx descriptors in the bulk submitted on Rx ring refill.
+ */
+#define SFC_RX_REFILL_BULK (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+/**
+ * Make the transmit path reap at least one time per a burst;
+ * this improves cache locality because the same mbufs may be used to send
+ * subsequent bursts in certain cases because of well-timed reap
+ */
+#define SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE 0
+
+/** Default free threshold follows recommendations from DPDK documentation */
+#define SFC_TX_DEFAULT_FREE_THRESH 32
+
+#endif /* _SFC_TWEAK_H_ */
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
new file mode 100644
index 00000000..b8581d14
--- /dev/null
+++ b/drivers/net/sfc/sfc_tx.c
@@ -0,0 +1,992 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+
+/*
+ * Maximum number of TX queue flush attempts in case of
+ * failure or flush timeout
+ */
+#define SFC_TX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for TX
+ * queue flush done or flush failed events
+ */
+#define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for TX queue
+ * flush done or flush failed events; it defines TX queue flush attempt timeout
+ * together with SFC_TX_QFLUSH_POLL_WAIT_MS
+ */
+#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+
+static int
+sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
+ const struct rte_eth_txconf *tx_conf)
+{
+ unsigned int flags = tx_conf->txq_flags;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc = 0;
+
+ if (tx_conf->tx_rs_thresh != 0) {
+ sfc_err(sa, "RS bit in transmit descriptor is not supported");
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
+ sfc_err(sa,
+ "TxQ free threshold too large: %u vs maximum %u",
+ tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_thresh.pthresh != 0 ||
+ tx_conf->tx_thresh.hthresh != 0 ||
+ tx_conf->tx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
+ sfc_err(sa, "Multi-segment is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
+ if (!encp->enc_hw_tx_insert_vlan_enabled) {
+ sfc_err(sa, "VLAN offload is not supported");
+ rc = EINVAL;
+ } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
+ sfc_err(sa,
+ "VLAN offload is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
+ sfc_err(sa, "SCTP offload is not supported");
+ rc = EINVAL;
+ }
+
+ /* We either perform both TCP and UDP offload, or no offload at all */
+ if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
+ ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
+ sfc_err(sa, "TCP and UDP offloads can't be set independently");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+void
+sfc_tx_qflush_done(struct sfc_txq *txq)
+{
+ txq->state |= SFC_TXQ_FLUSHED;
+ txq->state &= ~SFC_TXQ_FLUSHING;
+}
+
+int
+sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_txq_info *txq_info;
+ struct sfc_evq *evq;
+ struct sfc_txq *txq;
+ int rc = 0;
+ struct sfc_dp_tx_qcreate_info info;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
+ txq_info->entries = nb_tx_desc;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ txq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ txq_info->txq = txq;
+
+ txq->hw_index = sw_index;
+ txq->evq = evq;
+ txq->free_thresh =
+ (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
+ SFC_TX_DEFAULT_FREE_THRESH;
+ txq->flags = tx_conf->txq_flags;
+
+ rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
+ socket_id, &txq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.free_thresh = txq->free_thresh;
+ info.flags = tx_conf->txq_flags;
+ info.txq_entries = txq_info->entries;
+ info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
+ info.txq_hw_ring = txq->mem.esm_base;
+ info.evq_entries = txq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = txq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq->dp);
+ if (rc != 0)
+ goto fail_dp_tx_qinit;
+
+ evq->dp_txq = txq->dp;
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_tx_qinit:
+ sfc_dma_free(sa, &txq->mem);
+
+fail_dma_alloc:
+ txq_info->txq = NULL;
+ rte_free(txq);
+
+fail_txq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ txq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
+ return rc;
+}
+
+void
+sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ sa->dp_tx->qdestroy(txq->dp);
+ txq->dp = NULL;
+
+ txq_info->txq = NULL;
+ txq_info->entries = 0;
+
+ sfc_dma_free(sa, &txq->mem);
+
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
+ rte_free(txq);
+}
+
+static int
+sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ return 0;
+}
+
+static int
+sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+{
+ int rc = 0;
+
+ switch (txmode->mq_mode) {
+ case ETH_MQ_TX_NONE:
+ break;
+ default:
+ sfc_err(sa, "Tx multi-queue mode %u not supported",
+ txmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ /*
+ * These features are claimed to be i40e-specific,
+ * but it does make sense to double-check their absence
+ */
+ if (txmode->hw_vlan_reject_tagged) {
+ sfc_err(sa, "Rejecting tagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_reject_untagged) {
+ sfc_err(sa, "Rejecting untagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_insert_pvid) {
+ sfc_err(sa, "Port-based VLAN insertion not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_tx_queues <= sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (--sw_index >= (int)nb_tx_queues) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qfini(sa, sw_index);
+ }
+
+ sa->txq_count = nb_tx_queues;
+}
+
+int
+sfc_tx_configure(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ int rc = 0;
+
+ sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
+ nb_tx_queues, sa->txq_count);
+
+ /*
+ * The datapath implementation assumes absence of boundary
+ * limits on Tx DMA descriptors. Addition of these checks on
+ * datapath would simply make the datapath slower.
+ */
+ if (encp->enc_tx_dma_desc_boundary != 0) {
+ rc = ENOTSUP;
+ goto fail_tx_dma_desc_boundary;
+ }
+
+ rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_tx_queues == sa->txq_count)
+ goto done;
+
+ if (sa->txq_info == NULL) {
+ sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sizeof(sa->txq_info[0]), 0,
+ sa->socket_id);
+ if (sa->txq_info == NULL)
+ goto fail_txqs_alloc;
+ } else {
+ struct sfc_txq_info *new_txq_info;
+
+ if (nb_tx_queues < sa->txq_count)
+ sfc_tx_fini_queues(sa, nb_tx_queues);
+
+ new_txq_info =
+ rte_realloc(sa->txq_info,
+ nb_tx_queues * sizeof(sa->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_tx_queues > 0)
+ goto fail_txqs_realloc;
+
+ sa->txq_info = new_txq_info;
+ if (nb_tx_queues > sa->txq_count)
+ memset(&sa->txq_info[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_info[0]));
+ }
+
+ while (sa->txq_count < nb_tx_queues) {
+ rc = sfc_tx_qinit_info(sa, sa->txq_count);
+ if (rc != 0)
+ goto fail_tx_qinit_info;
+
+ sa->txq_count++;
+ }
+
+done:
+ return 0;
+
+fail_tx_qinit_info:
+fail_txqs_realloc:
+fail_txqs_alloc:
+ sfc_tx_close(sa);
+
+fail_check_mode:
+fail_tx_dma_desc_boundary:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_close(struct sfc_adapter *sa)
+{
+ sfc_tx_fini_queues(sa, 0);
+
+ rte_free(sa->txq_info);
+ sa->txq_info = NULL;
+}
+
+int
+sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ struct sfc_evq *evq;
+ uint16_t flags;
+ unsigned int desc_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ evq = txq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ /*
+ * It seems that DPDK has no controls regarding IPv4 offloads,
+ * hence, we always enable it here
+ */
+ if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
+ (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
+ flags = EFX_TXQ_CKSUM_IPV4;
+ } else {
+ flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
+
+ if (sa->tso)
+ flags |= EFX_TXQ_FATSOV2;
+ }
+
+ rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
+ txq_info->entries, 0 /* not used on EF10 */,
+ flags, evq->common,
+ &txq->common, &desc_index);
+ if (rc != 0) {
+ if (sa->tso && (rc == ENOSPC))
+ sfc_err(sa, "ran out of TSO contexts");
+
+ goto fail_tx_qcreate;
+ }
+
+ efx_tx_qenable(txq->common);
+
+ txq->state |= SFC_TXQ_STARTED;
+
+ rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_dp_qstart:
+ txq->state = SFC_TXQ_INITIALIZED;
+ efx_tx_qdestroy(txq->common);
+
+fail_tx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ if (txq->state == SFC_TXQ_INITIALIZED)
+ return;
+
+ SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
+
+ sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+
+ /*
+ * Retry TX queue flushing in case of flush failed or
+ * timeout; in the worst case it can delay for 6 seconds
+ */
+ for (retry_count = 0;
+ ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_tx_qflush(txq->common) != 0) {
+ txq->state |= SFC_TXQ_FLUSHING;
+ break;
+ }
+
+ /*
+ * Wait for TX queue flush done or flush failed event at least
+ * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(txq->evq);
+ } while ((txq->state & SFC_TXQ_FLUSHING) &&
+ wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
+
+ if (txq->state & SFC_TXQ_FLUSHING)
+ sfc_err(sa, "TxQ %u flush timed out", sw_index);
+
+ if (txq->state & SFC_TXQ_FLUSHED)
+ sfc_info(sa, "TxQ %u flushed", sw_index);
+ }
+
+ sa->dp_tx->qreap(txq->dp);
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ efx_tx_qdestroy(txq->common);
+
+ sfc_ev_qstop(txq->evq);
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+int
+sfc_tx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ if (sa->tso) {
+ if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
+ sfc_warn(sa, "TSO support was unable to be restored");
+ sa->tso = B_FALSE;
+ }
+ }
+
+ rc = efx_tx_init(sa->nic);
+ if (rc != 0)
+ goto fail_efx_tx_init;
+
+ for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
+ if (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started) {
+ rc = sfc_tx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_tx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_tx_qstart:
+ while (sw_index-- > 0)
+ sfc_tx_qstop(sa, sw_index);
+
+ efx_tx_fini(sa->nic);
+
+fail_efx_tx_init:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (sw_index-- > 0) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qstop(sa, sw_index);
+ }
+
+ efx_tx_fini(sa->nic);
+}
+
+static void
+sfc_efx_tx_reap(struct sfc_efx_txq *txq)
+{
+ unsigned int completed;
+
+ sfc_ev_qpoll(txq->evq);
+
+ for (completed = txq->completed;
+ completed != txq->pending; completed++) {
+ struct sfc_efx_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ }
+
+ txq->completed = completed;
+}
+
+/*
+ * The function is used to insert or update VLAN tag;
+ * the firmware has state of the firmware tag to insert per TxQ
+ * (controlled by option descriptors), hence, if the tag of the
+ * packet to be sent is different from one remembered by the firmware,
+ * the function will update it
+ */
+static unsigned int
+sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
+ efx_desc_t **pend)
+{
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ m->vlan_tci : 0);
+
+ if (this_tag == txq->hw_vlan_tci)
+ return 0;
+
+ /*
+ * The expression inside SFC_ASSERT() is not desired to be checked in
+ * a non-debug build because it might be too expensive on the data path
+ */
+ SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
+
+ efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
+ *pend);
+ (*pend)++;
+ txq->hw_vlan_tci = this_tag;
+
+ return 1;
+}
+
+static uint16_t
+sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int added = txq->added;
+ unsigned int pushed = added;
+ unsigned int pkts_sent = 0;
+ efx_desc_t *pend = &txq->pend_desc[0];
+ const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
+ const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
+ unsigned int fill_level = added - txq->completed;
+ boolean_t reap_done;
+ int rc __rte_unused;
+ struct rte_mbuf **pktp;
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
+ goto done;
+
+ /*
+ * If insufficient space for a single packet is present,
+ * we should reap; otherwise, we shouldn't do that all the time
+ * to avoid latency increase
+ */
+ reap_done = (fill_level > soft_max_fill);
+
+ if (reap_done) {
+ sfc_efx_tx_reap(txq);
+ /*
+ * Recalculate fill level since 'txq->completed'
+ * might have changed on reap
+ */
+ fill_level = added - txq->completed;
+ }
+
+ for (pkts_sent = 0, pktp = &tx_pkts[0];
+ (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
+ pkts_sent++, pktp++) {
+ struct rte_mbuf *m_seg = *pktp;
+ size_t pkt_len = m_seg->pkt_len;
+ unsigned int pkt_descs = 0;
+ size_t in_off = 0;
+
+ /*
+ * Here VLAN TCI is expected to be zero in case if no
+ * DEV_TX_VLAN_OFFLOAD capability is advertised;
+ * if the calling app ignores the absence of
+ * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
+ * TX_ERROR will occur
+ */
+ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
+
+ if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ /*
+ * We expect correct 'pkt->l[2, 3, 4]_len' values
+ * to be set correctly by the caller
+ */
+ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
+ &pkt_descs, &pkt_len) != 0) {
+ /* We may have reached this place for
+ * one of the following reasons:
+ *
+ * 1) Packet header length is greater
+ * than SFC_TSOH_STD_LEN
+ * 2) TCP header starts at more then
+ * 208 bytes into the frame
+ *
+ * We will deceive RTE saying that we have sent
+ * the packet, but we will actually drop it.
+ * Hence, we should revert 'pend' to the
+ * previous state (in case we have added
+ * VLAN descriptor) and start processing
+ * another one packet. But the original
+ * mbuf shouldn't be orphaned
+ */
+ pend -= pkt_descs;
+
+ rte_pktmbuf_free(*pktp);
+
+ continue;
+ }
+
+ /*
+ * We've only added 2 FATSOv2 option descriptors
+ * and 1 descriptor for the linearized packet header.
+ * The outstanding work will be done in the same manner
+ * as for the usual non-TSO path
+ */
+ }
+
+ for (; m_seg != NULL; m_seg = m_seg->next) {
+ efsys_dma_addr_t next_frag;
+ size_t seg_len;
+
+ seg_len = m_seg->data_len;
+ next_frag = rte_mbuf_data_dma_addr(m_seg);
+
+ /*
+ * If we've started TSO transaction few steps earlier,
+ * we'll skip packet header using an offset in the
+ * current segment (which has been set to the
+ * first one containing payload)
+ */
+ seg_len -= in_off;
+ next_frag += in_off;
+ in_off = 0;
+
+ do {
+ efsys_dma_addr_t frag_addr = next_frag;
+ size_t frag_len;
+
+ /*
+ * It is assumed here that there is no
+ * limitation on address boundary
+ * crossing by DMA descriptor.
+ */
+ frag_len = MIN(seg_len, txq->dma_desc_size_max);
+ next_frag += frag_len;
+ seg_len -= frag_len;
+ pkt_len -= frag_len;
+
+ efx_tx_qdesc_dma_create(txq->common,
+ frag_addr, frag_len,
+ (pkt_len == 0),
+ pend++);
+
+ pkt_descs++;
+ } while (seg_len != 0);
+ }
+
+ added += pkt_descs;
+
+ fill_level += pkt_descs;
+ if (unlikely(fill_level > hard_max_fill)) {
+ /*
+ * Our estimation for maximum number of descriptors
+ * required to send a packet seems to be wrong.
+ * Try to reap (if we haven't yet).
+ */
+ if (!reap_done) {
+ sfc_efx_tx_reap(txq);
+ reap_done = B_TRUE;
+ fill_level = added - txq->completed;
+ if (fill_level > hard_max_fill) {
+ pend -= pkt_descs;
+ break;
+ }
+ } else {
+ pend -= pkt_descs;
+ break;
+ }
+ }
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(pkts_sent > 0)) {
+ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
+ pend - &txq->pend_desc[0],
+ txq->completed, &txq->added);
+ SFC_ASSERT(rc == 0);
+
+ if (likely(pushed != txq->added))
+ efx_tx_qpush(txq->common, txq->added, pushed);
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_efx_tx_reap(txq);
+#endif
+
+done:
+ return pkts_sent;
+}
+
+struct sfc_txq *
+sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_txq *txq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ txq = sa->txq_info[dpq->queue_id].txq;
+
+ SFC_ASSERT(txq != NULL);
+ return txq;
+}
+
+static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
+static int
+sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_efx_txq *txq;
+ struct sfc_txq *ctrl_txq;
+ int rc;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
+ EFX_TXQ_LIMIT(info->txq_entries),
+ sizeof(*txq->pend_desc), 0,
+ socket_id);
+ if (txq->pend_desc == NULL)
+ goto fail_pend_desc_alloc;
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
+ if (ctrl_txq->evq->sa->tso) {
+ rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
+ info->txq_entries, socket_id);
+ if (rc != 0)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ txq->evq = ctrl_txq->evq;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->dma_desc_size_max = info->dma_desc_size_max;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_alloc_tsoh_objs:
+ rte_free(txq->sw_ring);
+
+fail_sw_ring_alloc:
+ rte_free(txq->pend_desc);
+
+fail_pend_desc_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
+static void
+sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
+ rte_free(txq->sw_ring);
+ rte_free(txq->pend_desc);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
+static int
+sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
+
+ txq->common = ctrl_txq->common;
+
+ txq->pending = txq->completed = txq->added = txq_desc_index;
+ txq->hw_vlan_tci = 0;
+
+ txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
+static void
+sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
+}
+
+static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
+static void
+sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ sfc_efx_tx_reap(txq);
+
+ for (txds = 0; txds <= txq->ptr_mask; txds++) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_tx sfc_efx_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_TX_FEAT_VLAN_INSERT |
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_efx_tx_qcreate,
+ .qdestroy = sfc_efx_tx_qdestroy,
+ .qstart = sfc_efx_tx_qstart,
+ .qstop = sfc_efx_tx_qstop,
+ .qreap = sfc_efx_tx_qreap,
+ .pkt_burst = sfc_efx_xmit_pkts,
+};
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
new file mode 100644
index 00000000..6c3ac3b6
--- /dev/null
+++ b/drivers/net/sfc/sfc_tx.h
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TX_H
+#define _SFC_TX_H
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_tx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Tx descriptor information associated with hardware Tx
+ * descriptor.
+ */
+struct sfc_efx_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+ uint8_t *tsoh; /* Buffer to store TSO header */
+};
+
+enum sfc_txq_state_bit {
+ SFC_TXQ_INITIALIZED_BIT = 0,
+#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT)
+ SFC_TXQ_STARTED_BIT,
+#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT)
+ SFC_TXQ_FLUSHING_BIT,
+#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
+ SFC_TXQ_FLUSHED_BIT,
+#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
+};
+
+/**
+ * Transmit queue control information. Not used on datapath.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_txq {
+ unsigned int state;
+ unsigned int hw_index;
+ struct sfc_evq *evq;
+ efsys_mem_t mem;
+ struct sfc_dp_txq *dp;
+ efx_txq_t *common;
+ unsigned int free_thresh;
+ unsigned int flags;
+};
+
+static inline unsigned int
+sfc_txq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_txq_sw_index(const struct sfc_txq *txq)
+{
+ return sfc_txq_sw_index_by_hw_index(txq->hw_index);
+}
+
+struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_txq {
+ struct sfc_evq *evq;
+ struct sfc_efx_tx_sw_desc *sw_ring;
+ unsigned int ptr_mask;
+ efx_desc_t *pend_desc;
+ efx_txq_t *common;
+ unsigned int added;
+ unsigned int pending;
+ unsigned int completed;
+ unsigned int free_thresh;
+ uint16_t hw_vlan_tci;
+ uint16_t dma_desc_size_max;
+
+ unsigned int hw_index;
+ unsigned int flags;
+#define SFC_EFX_TXQ_FLAG_STARTED 0x1
+#define SFC_EFX_TXQ_FLAG_RUNNING 0x2
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_efx_txq *
+sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_efx_txq, dp);
+}
+
+struct sfc_txq_info {
+ unsigned int entries;
+ struct sfc_txq *txq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_tx_configure(struct sfc_adapter *sa);
+void sfc_tx_close(struct sfc_adapter *sa);
+
+int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_tx_qflush_done(struct sfc_txq *txq);
+int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_tx_start(struct sfc_adapter *sa);
+void sfc_tx_stop(struct sfc_adapter *sa);
+
+/* From 'sfc_tso.c' */
+int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries,
+ unsigned int socket_id);
+void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries);
+int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_TX_H */
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 4a7b14c9..836c3b2a 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -54,11 +54,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index f3cd52dc..54212b71 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -45,6 +45,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_kvargs.h>
@@ -91,6 +92,7 @@ struct pmd_internals {
uint16_t max_rx_queues;
uint16_t max_tx_queues;
char sze_dev[PATH_MAX];
+ struct rte_mem_resource *pci_rsc;
};
static struct ether_addr eth_addr = {
@@ -1030,6 +1032,7 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
@@ -1144,8 +1147,10 @@ eth_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link;
struct rte_eth_link *link_ptr = &link;
struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
switch (cgmii_link_speed(ibuf)) {
@@ -1180,11 +1185,13 @@ eth_link_update(struct rte_eth_dev *dev,
static int
eth_dev_set_link_up(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
volatile struct szedata2_cgmii_obuf *);
cgmii_ibuf_enable(ibuf);
@@ -1195,11 +1202,13 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
static int
eth_dev_set_link_down(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
volatile struct szedata2_cgmii_obuf *);
cgmii_ibuf_disable(ibuf);
@@ -1281,8 +1290,10 @@ eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
static void
eth_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC);
}
@@ -1290,8 +1301,10 @@ eth_promiscuous_enable(struct rte_eth_dev *dev)
static void
eth_promiscuous_disable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
}
@@ -1299,8 +1312,10 @@ eth_promiscuous_disable(struct rte_eth_dev *dev)
static void
eth_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
}
@@ -1308,8 +1323,10 @@ eth_allmulticast_enable(struct rte_eth_dev *dev)
static void
eth_allmulticast_disable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
}
@@ -1349,7 +1366,7 @@ static const struct eth_dev_ops ops = {
* -1 on error
*/
static int
-get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
+get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
{
DIR *dir;
struct dirent *entry;
@@ -1357,7 +1374,6 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
uint32_t tmp_index;
FILE *fd;
char pcislot_path[PATH_MAX];
- struct rte_pci_addr pcislot_addr = dev->pci_dev->addr;
uint32_t domain;
uint32_t bus;
uint32_t devid;
@@ -1392,10 +1408,10 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
if (ret != 4)
continue;
- if (pcislot_addr.domain == domain &&
- pcislot_addr.bus == bus &&
- pcislot_addr.devid == devid &&
- pcislot_addr.function == function) {
+ if (pcislot_addr->domain == domain &&
+ pcislot_addr->bus == bus &&
+ pcislot_addr->devid == devid &&
+ pcislot_addr->function == function) {
*index = tmp_index;
closedir(dir);
return 0;
@@ -1415,9 +1431,10 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
struct szedata *szedata_temp;
int ret;
uint32_t szedata2_index;
- struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
struct rte_mem_resource *pci_rsc =
- &dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
+ &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
char rsc_filename[PATH_MAX];
void *pci_resource_ptr = NULL;
int fd;
@@ -1427,7 +1444,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
pci_addr->function);
/* Get index of szedata2 device file and create path to device file */
- ret = get_szedata2_index(dev, &szedata2_index);
+ ret = get_szedata2_index(pci_addr, &szedata2_index);
if (ret != 0) {
RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
return -ENODEV;
@@ -1471,10 +1488,10 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
/* Set function callbacks for Ethernet API */
dev->dev_ops = &ops;
- rte_eth_copy_pci_info(dev, dev->pci_dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
/* mmap pci resource0 file to rte_mem_resource structure */
- if (dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
+ if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
0) {
RTE_LOG(ERR, PMD, "Missing resource%u file\n",
PCI_RESOURCE_NUMBER);
@@ -1491,7 +1508,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
}
pci_resource_ptr = mmap(0,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (pci_resource_ptr == NULL) {
@@ -1499,8 +1516,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
rsc_filename, fd);
return -EINVAL;
}
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr =
- pci_resource_ptr;
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
+ internals->pci_rsc = pci_rsc;
RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
"virt addr = %llx\n", PCI_RESOURCE_NUMBER,
@@ -1516,8 +1533,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
RTE_CACHE_LINE_SIZE);
if (data->mac_addrs == NULL) {
RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
- munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
+ munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
return -EINVAL;
}
@@ -1537,12 +1554,13 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
- munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
+ munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
RTE_LOG(INFO, PMD, "szedata2 device ("
PCI_PRI_FMT ") successfully uninitialized\n",
@@ -1570,16 +1588,26 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
}
};
-static struct eth_driver szedata2_eth_driver = {
- .pci_drv = {
- .id_table = rte_szedata2_pci_id_table,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = rte_szedata2_eth_dev_init,
- .eth_dev_uninit = rte_szedata2_eth_dev_uninit,
- .dev_private_size = sizeof(struct pmd_internals),
+static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
+}
+
+static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ rte_szedata2_eth_dev_uninit);
+}
+
+static struct rte_pci_driver szedata2_eth_driver = {
+ .id_table = rte_szedata2_pci_id_table,
+ .probe = szedata2_eth_pci_probe,
+ .remove = szedata2_eth_pci_remove,
};
-RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver.pci_drv);
+RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
+RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
+ "* combo6core & combov3 & szedata2 & szedata2_cv3");
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index 522cf47f..afe8a383 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -117,94 +117,82 @@ struct szedata {
* @return Byte from PCI resource at offset "offset".
*/
static inline uint8_t
-pci_resource_read8(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read8(struct rte_mem_resource *rsc, uint32_t offset)
{
- return *((uint8_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset));
+ return *((uint8_t *)((uint8_t *)rsc->addr + offset));
}
/*
* @return Two bytes from PCI resource starting at offset "offset".
*/
static inline uint16_t
-pci_resource_read16(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read16(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* @return Four bytes from PCI resource starting at offset "offset".
*/
static inline uint32_t
-pci_resource_read32(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read32(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* @return Eight bytes from PCI resource starting at offset "offset".
*/
static inline uint64_t
-pci_resource_read64(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read64(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* Write one byte to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write8(struct rte_eth_dev *dev, uint32_t offset, uint8_t val)
+pci_resource_write8(struct rte_mem_resource *rsc, uint32_t offset, uint8_t val)
{
- *((uint8_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = val;
+ *((uint8_t *)((uint8_t *)rsc->addr + offset)) = val;
}
/*
* Write two bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write16(struct rte_eth_dev *dev, uint32_t offset, uint16_t val)
+pci_resource_write16(struct rte_mem_resource *rsc, uint32_t offset,
+ uint16_t val)
{
- *((uint16_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_16(val);
+ *((uint16_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_16(val);
}
/*
* Write four bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write32(struct rte_eth_dev *dev, uint32_t offset, uint32_t val)
+pci_resource_write32(struct rte_mem_resource *rsc, uint32_t offset,
+ uint32_t val)
{
- *((uint32_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_32(val);
+ *((uint32_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_32(val);
}
/*
* Write eight bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write64(struct rte_eth_dev *dev, uint32_t offset, uint64_t val)
+pci_resource_write64(struct rte_mem_resource *rsc, uint32_t offset,
+ uint64_t val)
{
- *((uint64_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_64(val);
+ *((uint64_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_64(val);
}
-#define SZEDATA2_PCI_RESOURCE_PTR(dev, offset, type) \
- ((type)((uint8_t *) \
- ((dev)->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr) \
- + (offset)))
+#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
+ ((type)(((uint8_t *)(rsc)->addr) + (offset)))
enum szedata2_link_speed {
SZEDATA2_LINK_SPEED_DEFAULT = 0,
diff --git a/scripts/depdirs-rule.sh b/drivers/net/tap/Makefile
index 7aba0885..b0de0284 100755..100644
--- a/scripts/depdirs-rule.sh
+++ b/drivers/net/tap/Makefile
@@ -1,9 +1,6 @@
-#!/bin/sh
-
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -31,65 +28,66 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+include $(RTE_SDK)/mk/rte.vars.mk
+
#
-# This (obscure) bash script finds the smallest different path between
-# path1 and path2 given as command line argument. The given paths MUST
-# be relative paths, the script is not designed to work with absolute
-# paths.
-#
-# The script will then generate Makefile code that can be saved in a
-# file and included in build system.
+# library name
#
-# For instance:
-# depdirs-rule.sh a/b/c/d a/b/e/f
-# Will print:
-# FULL_DEPDIRS-a/b/c/d += a/b/e/f
-# LOCAL_DEPDIRS-a/b/c += a/b/e
+LIB = librte_pmd_tap.a
+
+EXPORT_MAP := rte_pmd_tap_version.map
+
+LIBABIVER := 1
+
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I.
+CFLAGS += $(WERROR_FLAGS)
+
#
-# The script returns 0 except if invalid arguments are given.
+# all source are stored in SRCS-y
#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += rte_eth_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_netlink.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_tcmsgs.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up tap_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+tap_autoconf.h.new: FORCE
+
+tap_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_FLOWER \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+
+# Create tap_autoconf.h or update it in case it differs from the new one.
+
+tap_autoconf.h: tap_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP):.c=.o): tap_autoconf.h
+
+clean_tap: FORCE
+ $Q rm -f -- tap_autoconf.h tap_autoconf.h.new
-if [ $# -ne 2 ]; then
- echo "Bad arguments"
- echo "Usage:"
- echo " $0 path1 path2"
- exit 1
-fi
-
-left1=${1%%/*}
-right1=${1#*/}
-prev_right1=$1
-prev_left1=
-
-left2=${2%%/*}
-right2=${2#*/}
-prev_right2=$2
-prev_left2=
-
-while [ "${right1}" != "" -a "${right2}" != "" ]; do
-
- if [ "$left1" != "$left2" ]; then
- break
- fi
-
- prev_left1=$left1
- left1=$left1/${right1%%/*}
- prev_right1=$right1
- right1=${prev_right1#*/}
- if [ "$right1" = "$prev_right1" ]; then
- right1=""
- fi
-
- prev_left2=$left2
- left2=$left2/${right2%%/*}
- prev_right2=$right2
- right2=${prev_right2#*/}
- if [ "$right2" = "$prev_right2" ]; then
- right2=""
- fi
-done
-
-echo FULL_DEPDIRS-$1 += $2
-echo LOCAL_DEPDIRS-$left1 += $left2
-
-exit 0
+clean: clean_tap
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
new file mode 100644
index 00000000..e44de027
--- /dev/null
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -0,0 +1,1394 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_net.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/uio.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include <fcntl.h>
+
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_netlink.h>
+#include <tap_tcmsgs.h>
+
+/* Linux based path to the TUN device */
+#define TUN_TAP_DEV_PATH "/dev/net/tun"
+#define DEFAULT_TAP_NAME "dtap"
+
+#define ETH_TAP_IFACE_ARG "iface"
+#define ETH_TAP_SPEED_ARG "speed"
+#define ETH_TAP_REMOTE_ARG "remote"
+
+#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
+#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
+
+static struct rte_vdev_driver pmd_tap_drv;
+
+static const char *valid_arguments[] = {
+ ETH_TAP_IFACE_ARG,
+ ETH_TAP_SPEED_ARG,
+ ETH_TAP_REMOTE_ARG,
+ NULL
+};
+
+static int tap_unit;
+
+static volatile uint32_t tap_trigger; /* Rx trigger */
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG
+};
+
+static void
+tap_trigger_cb(int sig __rte_unused)
+{
+ /* Valid trigger values are nonzero */
+ tap_trigger = (tap_trigger + 1) | 0x80000000;
+}
+
+/* Specifies on what netdevices the ioctl should be applied */
+enum ioctl_mode {
+ LOCAL_AND_REMOTE,
+ LOCAL_ONLY,
+ REMOTE_ONLY,
+};
+
+static int
+tap_ioctl(struct pmd_internals *pmd, unsigned long request,
+ struct ifreq *ifr, int set, enum ioctl_mode mode);
+
+static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
+
+/* Tun/Tap allocation routine
+ *
+ * name is the number of the interface to use, unless NULL to take the host
+ * supplied name.
+ */
+static int
+tun_alloc(struct pmd_internals *pmd, uint16_t qid)
+{
+ struct ifreq ifr;
+#ifdef IFF_MULTI_QUEUE
+ unsigned int features;
+#endif
+ int fd;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+
+ /*
+ * Do not set IFF_NO_PI as packet information header will be needed
+ * to check if a received packet has been truncated.
+ */
+ ifr.ifr_flags = IFF_TAP;
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
+
+ RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
+
+ fd = open(TUN_TAP_DEV_PATH, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "Unable to create TAP interface");
+ goto error;
+ }
+
+#ifdef IFF_MULTI_QUEUE
+ /* Grab the TUN features to verify we can work multi-queue */
+ if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
+ RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
+ goto error;
+ }
+ RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
+
+ if (features & IFF_MULTI_QUEUE) {
+ RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
+ RTE_PMD_TAP_MAX_QUEUES);
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+ } else
+#endif
+ {
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+ RTE_LOG(DEBUG, PMD, " Single queue only support\n");
+ }
+
+ /* Set the TUN/TAP configuration and set the name if needed */
+ if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
+ RTE_LOG(WARNING, PMD,
+ "Unable to set TUNSETIFF for %s\n",
+ ifr.ifr_name);
+ perror("TUNSETIFF");
+ goto error;
+ }
+
+ /* Always set the file descriptor to non-blocking */
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
+ RTE_LOG(WARNING, PMD,
+ "Unable to set %s to nonblocking\n",
+ ifr.ifr_name);
+ perror("F_SETFL, NONBLOCK");
+ goto error;
+ }
+
+ /* Set up trigger to optimize empty Rx bursts */
+ errno = 0;
+ do {
+ struct sigaction sa;
+ int flags = fcntl(fd, F_GETFL);
+
+ if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
+ break;
+ if (sa.sa_handler != tap_trigger_cb) {
+ /*
+ * Make sure SIGIO is not already taken. This is done
+ * as late as possible to leave the application a
+ * chance to set up its own signal handler first.
+ */
+ if (sa.sa_handler != SIG_IGN &&
+ sa.sa_handler != SIG_DFL) {
+ errno = EBUSY;
+ break;
+ }
+ sa = (struct sigaction){
+ .sa_flags = SA_RESTART,
+ .sa_handler = tap_trigger_cb,
+ };
+ if (sigaction(SIGIO, &sa, NULL) == -1)
+ break;
+ }
+ /* Enable SIGIO on file descriptor */
+ fcntl(fd, F_SETFL, flags | O_ASYNC);
+ fcntl(fd, F_SETOWN, getpid());
+ } while (0);
+ if (errno) {
+ /* Disable trigger globally in case of error */
+ tap_trigger = 0;
+ RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
+ strerror(errno));
+ }
+
+ if (qid == 0) {
+ struct ifreq ifr;
+
+ /*
+ * pmd->eth_addr contains the desired MAC, either from remote
+ * or from a random assignment. Sync it with the tap netdevice.
+ */
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
+ ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error;
+
+ pmd->if_index = if_nametoindex(pmd->name);
+ if (!pmd->if_index) {
+ RTE_LOG(ERR, PMD,
+ "Could not find ifindex for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (!pmd->flower_support)
+ return fd;
+ if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (pmd->remote_if_index) {
+ /*
+ * Flush usually returns negative value because it tries
+ * to delete every QDISC (and on a running device, one
+ * QDISC at least is needed). Ignore negative return
+ * value.
+ */
+ qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
+ if (qdisc_create_ingress(pmd->nlsk_fd,
+ pmd->remote_if_index) < 0)
+ goto remote_fail;
+ LIST_INIT(&pmd->implicit_flows);
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_BROADCAST) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_BROADCASTV6) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_TX) < 0)
+ goto remote_fail;
+ }
+ }
+
+ return fd;
+
+remote_fail:
+ RTE_LOG(ERR, PMD,
+ "Could not set up remote flow rules for %s: remote disabled.\n",
+ pmd->name);
+ pmd->remote_if_index = 0;
+ tap_flow_implicit_flush(pmd, NULL);
+ return fd;
+
+error:
+ if (fd > 0)
+ close(fd);
+ return -1;
+}
+
+/* Callback to handle the rx burst of packets to the correct interface and
+ * file descriptor(s) in a multi-queue setup.
+ */
+static uint16_t
+pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct rx_queue *rxq = queue;
+ uint16_t num_rx;
+ unsigned long num_rx_bytes = 0;
+ uint32_t trigger = tap_trigger;
+
+ if (trigger == rxq->trigger_seen)
+ return 0;
+ if (trigger)
+ rxq->trigger_seen = trigger;
+ rte_compiler_barrier();
+ for (num_rx = 0; num_rx < nb_pkts; ) {
+ struct rte_mbuf *mbuf = rxq->pool;
+ struct rte_mbuf *seg = NULL;
+ struct rte_mbuf *new_tail = NULL;
+ uint16_t data_off = rte_pktmbuf_headroom(mbuf);
+ int len;
+
+ len = readv(rxq->fd, *rxq->iovecs,
+ 1 + (rxq->rxmode->enable_scatter ?
+ rxq->nb_rx_desc : 1));
+ if (len < (int)sizeof(struct tun_pi))
+ break;
+
+ /* Packet couldn't fit in the provided mbuf */
+ if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
+ rxq->stats.ierrors++;
+ continue;
+ }
+
+ len -= sizeof(struct tun_pi);
+
+ mbuf->pkt_len = len;
+ mbuf->port = rxq->in_port;
+ while (1) {
+ struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(!buf)) {
+ rxq->stats.rx_nombuf++;
+ /* No new buf has been allocated: do nothing */
+ if (!new_tail || !seg)
+ goto end;
+
+ seg->next = NULL;
+ rte_pktmbuf_free(mbuf);
+
+ goto end;
+ }
+ seg = seg ? seg->next : mbuf;
+ if (rxq->pool == mbuf)
+ rxq->pool = buf;
+ if (new_tail)
+ new_tail->next = buf;
+ new_tail = buf;
+ new_tail->next = seg->next;
+
+ /* iovecs[0] is reserved for packet info (pi) */
+ (*rxq->iovecs)[mbuf->nb_segs].iov_len =
+ buf->buf_len - data_off;
+ (*rxq->iovecs)[mbuf->nb_segs].iov_base =
+ (char *)buf->buf_addr + data_off;
+
+ seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
+ seg->data_off = data_off;
+
+ len -= seg->data_len;
+ if (len <= 0)
+ break;
+ mbuf->nb_segs++;
+ /* First segment has headroom, not the others */
+ data_off = 0;
+ }
+ seg->next = NULL;
+ mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
+ RTE_PTYPE_ALL_MASK);
+
+ /* account for the receive frame */
+ bufs[num_rx++] = mbuf;
+ num_rx_bytes += mbuf->pkt_len;
+ }
+end:
+ rxq->stats.ipackets += num_rx;
+ rxq->stats.ibytes += num_rx_bytes;
+
+ return num_rx;
+}
+
+/* Callback to handle sending packets from the tap interface
+ */
+static uint16_t
+pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tx_queue *txq = queue;
+ uint16_t num_tx = 0;
+ unsigned long num_tx_bytes = 0;
+ uint32_t max_size;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = bufs[num_tx];
+ struct iovec iovecs[mbuf->nb_segs + 1];
+ struct tun_pi pi = { .flags = 0 };
+ struct rte_mbuf *seg = mbuf;
+ int n;
+ int j;
+
+ /* stats.errs will be incremented */
+ if (rte_pktmbuf_pkt_len(mbuf) > max_size)
+ break;
+
+ iovecs[0].iov_base = &pi;
+ iovecs[0].iov_len = sizeof(pi);
+ for (j = 1; j <= mbuf->nb_segs; j++) {
+ iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
+ iovecs[j].iov_base =
+ rte_pktmbuf_mtod(seg, void *);
+ seg = seg->next;
+ }
+ /* copy the tx frame data */
+ n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
+ if (n <= 0)
+ break;
+
+ num_tx++;
+ num_tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ txq->stats.opackets += num_tx;
+ txq->stats.errs += nb_pkts - num_tx;
+ txq->stats.obytes += num_tx_bytes;
+
+ return num_tx;
+}
+
+static int
+tap_ioctl(struct pmd_internals *pmd, unsigned long request,
+ struct ifreq *ifr, int set, enum ioctl_mode mode)
+{
+ short req_flags = ifr->ifr_flags;
+ int remote = pmd->remote_if_index &&
+ (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
+
+ if (!pmd->remote_if_index && mode == REMOTE_ONLY)
+ return 0;
+ /*
+ * If there is a remote netdevice, apply ioctl on it, then apply it on
+ * the tap netdevice.
+ */
+apply:
+ if (remote)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
+ else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
+ switch (request) {
+ case SIOCSIFFLAGS:
+ /* fetch current flags to leave other flags untouched */
+ if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
+ goto error;
+ if (set)
+ ifr->ifr_flags |= req_flags;
+ else
+ ifr->ifr_flags &= ~req_flags;
+ break;
+ case SIOCGIFFLAGS:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMTU:
+ break;
+ default:
+ RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+ pmd->name);
+ return -EINVAL;
+ }
+ if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
+ goto error;
+ if (remote-- && mode == LOCAL_AND_REMOTE)
+ goto apply;
+ return 0;
+
+error:
+ RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n",
+ ifr->ifr_name, request, strerror(errno));
+ return -errno;
+}
+
+static int
+tap_link_set_down(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+}
+
+static int
+tap_link_set_up(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+}
+
+static int
+tap_dev_start(struct rte_eth_dev *dev)
+{
+ int err;
+
+ err = tap_intr_handle_set(dev, 1);
+ if (err)
+ return err;
+ return tap_link_set_up(dev);
+}
+
+/* This function gets called when the current port gets stopped.
+ */
+static void
+tap_dev_stop(struct rte_eth_dev *dev)
+{
+ tap_intr_handle_set(dev, 0);
+ tap_link_set_down(dev);
+}
+
+static int
+tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static uint32_t
+tap_dev_speed_capa(void)
+{
+ uint32_t speed = pmd_link.link_speed;
+ uint32_t capa = 0;
+
+ if (speed >= ETH_SPEED_NUM_10M)
+ capa |= ETH_LINK_SPEED_10M;
+ if (speed >= ETH_SPEED_NUM_100M)
+ capa |= ETH_LINK_SPEED_100M;
+ if (speed >= ETH_SPEED_NUM_1G)
+ capa |= ETH_LINK_SPEED_1G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_2_5G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_5G;
+ if (speed >= ETH_SPEED_NUM_10G)
+ capa |= ETH_LINK_SPEED_10G;
+ if (speed >= ETH_SPEED_NUM_20G)
+ capa |= ETH_LINK_SPEED_20G;
+ if (speed >= ETH_SPEED_NUM_25G)
+ capa |= ETH_LINK_SPEED_25G;
+ if (speed >= ETH_SPEED_NUM_40G)
+ capa |= ETH_LINK_SPEED_40G;
+ if (speed >= ETH_SPEED_NUM_50G)
+ capa |= ETH_LINK_SPEED_50G;
+ if (speed >= ETH_SPEED_NUM_56G)
+ capa |= ETH_LINK_SPEED_56G;
+ if (speed >= ETH_SPEED_NUM_100G)
+ capa |= ETH_LINK_SPEED_100G;
+
+ return capa;
+}
+
+static void
+tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
+ dev_info->max_rx_queues = internals->nb_queues;
+ dev_info->max_tx_queues = internals->nb_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+ dev_info->speed_capa = tap_dev_speed_capa();
+}
+
+static void
+tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
+{
+ unsigned int i, imax;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
+ unsigned long rx_nombuf = 0, ierrors = 0;
+ const struct pmd_internals *pmd = dev->data->dev_private;
+
+ imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+
+ for (i = 0; i < imax; i++) {
+ tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
+ tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
+ rx_total += tap_stats->q_ipackets[i];
+ rx_bytes_total += tap_stats->q_ibytes[i];
+ rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
+ ierrors += pmd->rxq[i].stats.ierrors;
+
+ tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
+ tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
+ tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
+ tx_total += tap_stats->q_opackets[i];
+ tx_err_total += tap_stats->q_errors[i];
+ tx_bytes_total += tap_stats->q_obytes[i];
+ }
+
+ tap_stats->ipackets = rx_total;
+ tap_stats->ibytes = rx_bytes_total;
+ tap_stats->ierrors = ierrors;
+ tap_stats->rx_nombuf = rx_nombuf;
+ tap_stats->opackets = tx_total;
+ tap_stats->oerrors = tx_err_total;
+ tap_stats->obytes = tx_bytes_total;
+}
+
+static void
+tap_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ for (i = 0; i < pmd->nb_queues; i++) {
+ pmd->rxq[i].stats.ipackets = 0;
+ pmd->rxq[i].stats.ibytes = 0;
+ pmd->rxq[i].stats.ierrors = 0;
+ pmd->rxq[i].stats.rx_nombuf = 0;
+
+ pmd->txq[i].stats.opackets = 0;
+ pmd->txq[i].stats.errs = 0;
+ pmd->txq[i].stats.obytes = 0;
+ }
+}
+
+static void
+tap_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ tap_link_set_down(dev);
+ tap_flow_flush(dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+
+ for (i = 0; i < internals->nb_queues; i++) {
+ if (internals->rxq[i].fd != -1)
+ close(internals->rxq[i].fd);
+ internals->rxq[i].fd = -1;
+ internals->txq[i].fd = -1;
+ }
+}
+
+static void
+tap_rx_queue_release(void *queue)
+{
+ struct rx_queue *rxq = queue;
+
+ if (rxq && (rxq->fd > 0)) {
+ close(rxq->fd);
+ rxq->fd = -1;
+ rte_pktmbuf_free(rxq->pool);
+ rte_free(rxq->iovecs);
+ rxq->pool = NULL;
+ rxq->iovecs = NULL;
+ }
+}
+
+static void
+tap_tx_queue_release(void *queue)
+{
+ struct tx_queue *txq = queue;
+
+ if (txq && (txq->fd > 0)) {
+ close(txq->fd);
+ txq->fd = -1;
+ }
+}
+
+static int
+tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = 0 };
+
+ if (pmd->remote_if_index) {
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
+ if (!(ifr.ifr_flags & IFF_UP) ||
+ !(ifr.ifr_flags & IFF_RUNNING)) {
+ dev_link->link_status = ETH_LINK_DOWN;
+ return 0;
+ }
+ }
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
+ dev_link->link_status =
+ ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
+ ETH_LINK_UP :
+ ETH_LINK_DOWN);
+ return 0;
+}
+
+static void
+tap_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_allmulti_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+static void
+tap_allmulti_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+
+static void
+tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr;
+
+ if (is_zero_ether_addr(mac_addr)) {
+ RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
+ dev->data->name);
+ return;
+ }
+ /* Check the actual current MAC address on the tap netdevice */
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: couldn't check current tap MAC address\n",
+ dev->data->name);
+ return;
+ }
+ if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ return;
+
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ return;
+ rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
+ if (pmd->remote_if_index) {
+ /* Replace MAC redirection rule after a MAC change */
+ if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: Couldn't delete MAC redirection rule\n",
+ dev->data->name);
+ return;
+ }
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ RTE_LOG(ERR, PMD,
+ "%s: Couldn't add MAC redirection rule\n",
+ dev->data->name);
+ }
+}
+
+static int
+tap_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rx_queue *rx = &internals->rxq[qid];
+ struct tx_queue *tx = &internals->txq[qid];
+ int fd;
+
+ fd = rx->fd;
+ if (fd < 0) {
+ fd = tx->fd;
+ if (fd < 0) {
+ RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
+ pmd->name, qid);
+ fd = tun_alloc(pmd, qid);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
+ pmd->name, qid);
+ return -1;
+ }
+ if (qid == 0) {
+ struct ifreq ifr;
+
+ ifr.ifr_mtu = dev->data->mtu;
+ if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1,
+ LOCAL_AND_REMOTE) < 0) {
+ close(fd);
+ return -1;
+ }
+ }
+ }
+ }
+
+ rx->fd = fd;
+ tx->fd = fd;
+ tx->mtu = &dev->data->mtu;
+ rx->rxmode = &dev->data->dev_conf.rxmode;
+
+ return fd;
+}
+
+static int
+rx_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ dev->data->rx_queues[qid] = &internals->rxq[qid];
+
+ return tap_setup_queue(dev, internals, qid);
+}
+
+static int
+tx_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ dev->data->tx_queues[qid] = &internals->txq[qid];
+
+ return tap_setup_queue(dev, internals, qid);
+}
+
+static int
+tap_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct rx_queue *rxq = &internals->rxq[rx_queue_id];
+ struct rte_mbuf **tmp = &rxq->pool;
+ long iov_max = sysconf(_SC_IOV_MAX);
+ uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
+ struct iovec (*iovecs)[nb_desc + 1];
+ int data_off = RTE_PKTMBUF_HEADROOM;
+ int ret = 0;
+ int fd;
+ int i;
+
+ if ((rx_queue_id >= internals->nb_queues) || !mp) {
+ RTE_LOG(WARNING, PMD,
+ "nb_queues %d too small or mempool NULL\n",
+ internals->nb_queues);
+ return -1;
+ }
+
+ rxq->mp = mp;
+ rxq->trigger_seen = 1; /* force initial burst */
+ rxq->in_port = dev->data->port_id;
+ rxq->nb_rx_desc = nb_desc;
+ iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
+ socket_id);
+ if (!iovecs) {
+ RTE_LOG(WARNING, PMD,
+ "%s: Couldn't allocate %d RX descriptors\n",
+ dev->data->name, nb_desc);
+ return -ENOMEM;
+ }
+ rxq->iovecs = iovecs;
+
+ fd = rx_setup_queue(dev, internals, rx_queue_id);
+ if (fd == -1) {
+ ret = fd;
+ goto error;
+ }
+
+ (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
+ (*rxq->iovecs)[0].iov_base = &rxq->pi;
+
+ for (i = 1; i <= nb_desc; i++) {
+ *tmp = rte_pktmbuf_alloc(rxq->mp);
+ if (!*tmp) {
+ RTE_LOG(WARNING, PMD,
+ "%s: couldn't allocate memory for queue %d\n",
+ dev->data->name, rx_queue_id);
+ ret = -ENOMEM;
+ goto error;
+ }
+ (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
+ (*rxq->iovecs)[i].iov_base =
+ (char *)(*tmp)->buf_addr + data_off;
+ data_off = 0;
+ tmp = &(*tmp)->next;
+ }
+
+ RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
+ internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
+
+ return 0;
+
+error:
+ rte_pktmbuf_free(rxq->pool);
+ rxq->pool = NULL;
+ rte_free(rxq->iovecs);
+ rxq->iovecs = NULL;
+ return ret;
+}
+
+static int
+tap_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (tx_queue_id >= internals->nb_queues)
+ return -1;
+
+ ret = tx_setup_queue(dev, internals, tx_queue_id);
+ if (ret == -1)
+ return -1;
+
+ RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
+ internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
+
+ return 0;
+}
+
+static int
+tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int err = 0;
+
+ err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
+ if (!err)
+ dev->data->mtu = mtu;
+
+ return err;
+}
+
+static int
+tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mc_addr_set __rte_unused,
+ uint32_t nb_mc_addr __rte_unused)
+{
+ /*
+ * Nothing to do actually: the tap has no filtering whatsoever, every
+ * packet is received.
+ */
+ return 0;
+}
+
+static int
+tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifinfomsg *info = NLMSG_DATA(nh);
+
+ if (nh->nlmsg_type != RTM_NEWLINK ||
+ (info->ifi_index != pmd->if_index &&
+ info->ifi_index != pmd->remote_if_index))
+ return 0;
+ return tap_link_update(dev, 0);
+}
+
+static void
+tap_dev_intr_handler(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+}
+
+static int
+tap_intr_handle_set(struct rte_eth_dev *dev, int set)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ /* In any case, disable interrupt if the conf is no longer there. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ if (pmd->intr_handle.fd != -1)
+ nl_final(pmd->intr_handle.fd);
+ rte_intr_callback_unregister(
+ &pmd->intr_handle, tap_dev_intr_handler, dev);
+ return 0;
+ }
+ if (set) {
+ pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
+ if (unlikely(pmd->intr_handle.fd == -1))
+ return -EBADF;
+ return rte_intr_callback_register(
+ &pmd->intr_handle, tap_dev_intr_handler, dev);
+ }
+ nl_final(pmd->intr_handle.fd);
+ return rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+}
+
+static const uint32_t*
+tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L2_ETHER_QINQ,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ };
+
+ return ptypes;
+}
+
+static int
+tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int
+tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ if (fc_conf->mode != RTE_FC_NONE)
+ return -ENOTSUP;
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = tap_dev_start,
+ .dev_stop = tap_dev_stop,
+ .dev_close = tap_dev_close,
+ .dev_configure = tap_dev_configure,
+ .dev_infos_get = tap_dev_info,
+ .rx_queue_setup = tap_rx_queue_setup,
+ .tx_queue_setup = tap_tx_queue_setup,
+ .rx_queue_release = tap_rx_queue_release,
+ .tx_queue_release = tap_tx_queue_release,
+ .flow_ctrl_get = tap_flow_ctrl_get,
+ .flow_ctrl_set = tap_flow_ctrl_set,
+ .link_update = tap_link_update,
+ .dev_set_link_up = tap_link_set_up,
+ .dev_set_link_down = tap_link_set_down,
+ .promiscuous_enable = tap_promisc_enable,
+ .promiscuous_disable = tap_promisc_disable,
+ .allmulticast_enable = tap_allmulti_enable,
+ .allmulticast_disable = tap_allmulti_disable,
+ .mac_addr_set = tap_mac_set,
+ .mtu_set = tap_mtu_set,
+ .set_mc_addr_list = tap_set_mc_addr_list,
+ .stats_get = tap_stats_get,
+ .stats_reset = tap_stats_reset,
+ .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .filter_ctrl = tap_dev_filter_ctrl,
+};
+
+static int
+tap_kernel_support(struct pmd_internals *pmd)
+{
+ struct utsname utsname;
+ int ver[3];
+
+ if (uname(&utsname) == -1 ||
+ sscanf(utsname.release, "%d.%d.%d",
+ &ver[0], &ver[1], &ver[2]) != 3)
+ return 0;
+ if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
+ pmd->flower_support = 1;
+ if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
+ FLOWER_VLAN_KERNEL_VERSION)
+ pmd->flower_vlan_support = 1;
+ return 1;
+}
+
+static int
+eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
+ char *remote_iface)
+{
+ int numa_node = rte_socket_id();
+ struct rte_eth_dev *dev;
+ struct pmd_internals *pmd;
+ struct rte_eth_dev_data *data;
+ int i;
+
+ RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
+
+ data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
+ if (!data) {
+ RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
+ goto error_exit;
+ }
+
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
+ if (!dev) {
+ RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
+ goto error_exit;
+ }
+
+ pmd = dev->data->dev_private;
+ snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
+ pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
+
+ pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (pmd->ioctl_sock == -1) {
+ RTE_LOG(ERR, PMD,
+ "TAP Unable to get a socket for management: %s\n",
+ strerror(errno));
+ goto error_exit;
+ }
+
+ /* Setup some default values */
+ rte_memcpy(data, dev->data, sizeof(*data));
+ data->dev_private = pmd;
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->numa_node = numa_node;
+ data->drv_name = pmd_tap_drv.driver.name;
+
+ data->dev_link = pmd_link;
+ data->mac_addrs = &pmd->eth_addr;
+ data->nb_rx_queues = pmd->nb_queues;
+ data->nb_tx_queues = pmd->nb_queues;
+
+ dev->data = data;
+ dev->dev_ops = &ops;
+ dev->rx_pkt_burst = pmd_rx_burst;
+ dev->tx_pkt_burst = pmd_tx_burst;
+
+ pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ pmd->intr_handle.fd = -1;
+
+ /* Presetup the fds to -1 as being not valid */
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ pmd->rxq[i].fd = -1;
+ pmd->txq[i].fd = -1;
+ }
+
+ tap_kernel_support(pmd);
+ if (!pmd->flower_support)
+ return 0;
+ LIST_INIT(&pmd->flows);
+ /*
+ * If no netlink socket can be created, then it will fail when
+ * creating/destroying flow rules.
+ */
+ pmd->nlsk_fd = nl_init(0);
+ if (strlen(remote_iface)) {
+ struct ifreq ifr;
+
+ pmd->remote_if_index = if_nametoindex(remote_iface);
+ snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
+ "%s", remote_iface);
+ if (!pmd->remote_if_index) {
+ RTE_LOG(ERR, PMD, "Could not find %s ifindex: "
+ "remote interface will remain unconfigured\n",
+ remote_iface);
+ return 0;
+ }
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
+ RTE_LOG(ERR, PMD, "Could not get remote MAC address\n");
+ goto error_exit;
+ }
+ rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
+ ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ }
+
+ return 0;
+
+error_exit:
+ RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n",
+ rte_vdev_device_name(vdev));
+
+ rte_free(data);
+ return -EINVAL;
+}
+
+static int
+set_interface_name(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
+ else
+ snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
+ DEFAULT_TAP_NAME, (tap_unit - 1));
+
+ return 0;
+}
+
+static int
+set_interface_speed(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
+
+ return 0;
+}
+
+static int
+set_remote_iface(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
+
+ return 0;
+}
+
+/* Open a TAP interface device.
+ */
+static int
+rte_pmd_tap_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ int speed;
+ char tap_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
+ speed = ETH_SPEED_NUM_10G;
+ snprintf(tap_name, sizeof(tap_name), "%s%d",
+ DEFAULT_TAP_NAME, tap_unit++);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (params && (params[0] != '\0')) {
+ RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_SPEED_ARG,
+ &set_interface_speed,
+ &speed);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tap_name);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_REMOTE_ARG,
+ &set_remote_iface,
+ remote_iface);
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = speed;
+
+ RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
+ name, tap_name);
+
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface);
+
+leave:
+ if (ret == -1) {
+ RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
+ name, tap_name);
+ tap_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/* detach a TAP device.
+ */
+static int
+rte_pmd_tap_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internals;
+ int i;
+
+ RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
+ rte_socket_id());
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ return 0;
+
+ internals = eth_dev->data->dev_private;
+ if (internals->flower_support && internals->nlsk_fd) {
+ tap_flow_flush(eth_dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+ nl_final(internals->nlsk_fd);
+ }
+ for (i = 0; i < internals->nb_queues; i++)
+ if (internals->rxq[i].fd != -1)
+ close(internals->rxq[i].fd);
+
+ close(internals->ioctl_sock);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_tap_drv = {
+ .probe = rte_pmd_tap_probe,
+ .remove = rte_pmd_tap_remove,
+};
+RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
+RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
+RTE_PMD_REGISTER_PARAM_STRING(net_tap,
+ ETH_TAP_IFACE_ARG "=<string> "
+ ETH_TAP_SPEED_ARG "=<int> "
+ ETH_TAP_REMOTE_ARG "=<string>");
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
new file mode 100644
index 00000000..ad497b3d
--- /dev/null
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_TAP_H_
+#define _RTE_ETH_TAP_H_
+
+#include <sys/queue.h>
+#include <sys/uio.h>
+#include <inttypes.h>
+
+#include <linux/if_tun.h>
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+
+#ifdef IFF_MULTI_QUEUE
+#define RTE_PMD_TAP_MAX_QUEUES 16
+#else
+#define RTE_PMD_TAP_MAX_QUEUES 1
+#endif
+
+struct pkt_stats {
+ uint64_t opackets; /* Number of output packets */
+ uint64_t ipackets; /* Number of input packets */
+ uint64_t obytes; /* Number of bytes on output */
+ uint64_t ibytes; /* Number of bytes on input */
+ uint64_t errs; /* Number of TX error packets */
+ uint64_t ierrors; /* Number of RX error packets */
+ uint64_t rx_nombuf; /* Nb of RX mbuf alloc failures */
+};
+
+struct rx_queue {
+ struct rte_mempool *mp; /* Mempool for RX packets */
+ uint32_t trigger_seen; /* Last seen Rx trigger value */
+ uint16_t in_port; /* Port ID */
+ int fd;
+ struct pkt_stats stats; /* Stats for this RX queue */
+ uint16_t nb_rx_desc; /* max number of mbufs available */
+ struct rte_eth_rxmode *rxmode; /* RX features */
+ struct rte_mbuf *pool; /* mbufs pool for this queue */
+ struct iovec (*iovecs)[]; /* descriptors for this queue */
+ struct tun_pi pi; /* packet info for iovecs */
+};
+
+struct tx_queue {
+ int fd;
+ uint16_t *mtu; /* Pointer to MTU from dev_data */
+ struct pkt_stats stats; /* Stats for this TX queue */
+};
+
+struct pmd_internals {
+ char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
+ char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
+ uint16_t nb_queues; /* Number of queues supported */
+ struct ether_addr eth_addr; /* Mac address of the device port */
+ int remote_if_index; /* remote netdevice IF_INDEX */
+ int if_index; /* IF_INDEX for the port */
+ int ioctl_sock; /* socket for ioctl calls */
+ int nlsk_fd; /* Netlink socket fd */
+ int flower_support; /* 1 if kernel supports, else 0 */
+ int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
+ /* implicit rte_flow rules set when a remote device is active */
+ LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
+ struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
+ struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
+ struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
+};
+
+#endif /* _RTE_ETH_TAP_H_ */
diff --git a/drivers/net/tap/rte_pmd_tap_version.map b/drivers/net/tap/rte_pmd_tap_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/drivers/net/tap/rte_pmd_tap_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
new file mode 100644
index 00000000..cf1c8a26
--- /dev/null
+++ b/drivers/net/tap/tap_flow.c
@@ -0,0 +1,1507 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_jhash.h>
+#include <rte_malloc.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_autoconf.h>
+#include <tap_tcmsgs.h>
+
+#ifndef HAVE_TC_FLOWER
+/*
+ * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
+ * avoid sending TC messages the kernel cannot understand.
+ */
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+};
+#endif
+#ifndef HAVE_TC_VLAN_ID
+enum {
+ /* TCA_FLOWER_FLAGS, */
+ TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+};
+#endif
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
+ struct rte_flow *remote_flow; /* associated remote flow */
+ struct nlmsg msg;
+};
+
+struct convert_data {
+ uint16_t eth_type;
+ uint16_t ip_proto;
+ uint8_t vlan;
+ struct rte_flow *flow;
+};
+
+struct remote_rule {
+ struct rte_flow_attr attr;
+ struct rte_flow_item items[2];
+ int mirred;
+};
+
+static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+static const struct rte_flow_ops tap_flow_ops = {
+ .validate = tap_flow_validate,
+ .create = tap_flow_create,
+ .destroy = tap_flow_destroy,
+ .flush = tap_flow_flush,
+};
+
+/* Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/* Structure to generate a simple graph of layers supported by the NIC. */
+struct tap_flow_items {
+ /* Bit-mask corresponding to what is supported for this item. */
+ const void *mask;
+ const unsigned int mask_sz; /* Bit-mask size in bytes. */
+ /*
+ * Bit-mask corresponding to the default mask, if none is provided
+ * along with the item.
+ */
+ const void *default_mask;
+ /**
+ * Conversion function from rte_flow to netlink attributes.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item, void *data);
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+/* Graph of supported items and associated actions. */
+static const struct tap_flow_items tap_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
+ },
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .default_mask = &rte_flow_item_eth_mask,
+ .convert = tap_flow_create_eth,
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_vlan){
+ .tpid = -1,
+ /* DEI matching is not supported */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ .tci = 0xffef,
+#else
+ .tci = 0xefff,
+#endif
+ },
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .default_mask = &rte_flow_item_vlan_mask,
+ .convert = tap_flow_create_vlan,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ .next_proto_id = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .convert = tap_flow_create_ipv4,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv6){
+ .hdr = {
+ .src_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .dst_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .proto = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv6),
+ .default_mask = &rte_flow_item_ipv6_mask,
+ .convert = tap_flow_create_ipv6,
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .default_mask = &rte_flow_item_udp_mask,
+ .convert = tap_flow_create_udp,
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .default_mask = &rte_flow_item_tcp_mask,
+ .convert = tap_flow_create_tcp,
+ },
+};
+
+static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
+ [TAP_REMOTE_LOCAL_MAC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_REDIR,
+ },
+ [TAP_REMOTE_BROADCAST] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_BROADCASTV6] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_PROMISC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_ALLMULTI] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_TX] = {
+ .attr = {
+ .group = 0,
+ .priority = TAP_REMOTE_TX,
+ .egress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+};
+
+/**
+ * Make as much checks as possible on an Ethernet item, and if a flow is
+ * provided, fill it appropriately with Ethernet info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_eth(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
+ /* TC does not support eth_type masking. Only accept if exact match. */
+ if (mask->type && mask->type != 0xffff)
+ return -1;
+ if (!spec)
+ return 0;
+ /* store eth_type for consistency if ipv4/6 pattern item comes next */
+ if (spec->type & mask->type)
+ info->eth_type = spec->type;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (spec->type & mask->type)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info,
+ (spec->type & mask->type));
+ if (!is_zero_ether_addr(&spec->dst)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ &spec->dst.addr_bytes);
+ nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ &mask->dst.addr_bytes);
+ }
+ if (!is_zero_ether_addr(&mask->src)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
+ nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ &mask->src.addr_bytes);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a VLAN item, and if a flow is provided,
+ * fill it appropriately with VLAN info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
+ /* TC does not support tpid masking. Only accept if exact match. */
+ if (mask->tpid && mask->tpid != 0xffff)
+ return -1;
+ /* Double-tagging not supported. */
+ if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
+ return -1;
+ info->vlan = 1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
+#define VLAN_PRIO(tci) ((tci) >> 13)
+#define VLAN_ID(tci) ((tci) & 0xfff)
+ if (!spec)
+ return 0;
+ if (spec->tci) {
+ uint16_t tci = ntohs(spec->tci) & mask->tci;
+ uint16_t prio = VLAN_PRIO(tci);
+ uint8_t vid = VLAN_ID(tci);
+
+ if (prio)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_VLAN_PRIO, prio);
+ if (vid)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_VLAN_ID, vid);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv4 item, and if a flow is provided,
+ * fill it appropriately with IPv4 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
+ /* check that previous eth type is compatible with ipv4 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IP))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.next_proto_id;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IP);
+ if (!info->vlan)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IP));
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_addr) {
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
+ spec->hdr.dst_addr);
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask->hdr.dst_addr);
+ }
+ if (spec->hdr.src_addr) {
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
+ spec->hdr.src_addr);
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask->hdr.src_addr);
+ }
+ if (spec->hdr.next_proto_id)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
+ spec->hdr.next_proto_id);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv6 item, and if a flow is provided,
+ * fill it appropriately with IPv6 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ uint8_t empty_addr[16] = { 0 };
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
+ /* check that previous eth type is compatible with ipv6 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.proto;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IPV6);
+ if (!info->vlan)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IPV6));
+ if (!spec)
+ return 0;
+ if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
+ }
+ if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
+ }
+ if (spec->hdr.proto)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a UDP item, and if a flow is provided,
+ * fill it appropriately with UDP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_udp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
+ /* check that previous ip_proto is compatible with udp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
+ return -1;
+ /* TC does not support UDP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_port & mask->hdr.dst_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
+ spec->hdr.dst_port);
+ if (spec->hdr.src_port & mask->hdr.src_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a TCP item, and if a flow is provided,
+ * fill it appropriately with TCP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
+ /* check that previous ip_proto is compatible with tcp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
+ return -1;
+ /* TC does not support TCP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_port & mask->hdr.dst_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
+ spec->hdr.dst_port);
+ if (spec->hdr.src_port & mask->hdr.src_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param size
+ * Bit-Mask size in bytes.
+ * @param[in] supported_mask
+ * Bit-mask covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param[in] default_mask
+ * Bit-mask default mask if none is provided in \item.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+tap_flow_item_validate(const struct rte_flow_item *item,
+ unsigned int size,
+ const uint8_t *supported_mask,
+ const uint8_t *default_mask)
+{
+ int ret = 0;
+
+ /* An empty layer is allowed, as long as all fields are NULL */
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ /* Is the item spec compatible with what the NIC supports? */
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ /* Is the default mask compatible with what the NIC supports? */
+ for (i = 0; i < size; i++)
+ if ((default_mask[i] | supported_mask[i]) !=
+ supported_mask[i])
+ return -1;
+ }
+ /* Is the item last compatible with what the NIC supports? */
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /* Is the item mask compatible with what the NIC supports? */
+ if (item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->mask;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /**
+ * Once masked, Are item spec and item last equal?
+ * TC does not support range so anything else is invalid.
+ */
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = default_mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+/**
+ * Transform a DROP/PASSTHRU action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] action
+ * Appropriate action to be set in the TCA_GACT_PARMS structure.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_gact(struct rte_flow *flow, int action)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_gact p = {
+ .action = action
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("gact"), "gact");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(p), &p);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Transform a MIRRED action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] ifindex
+ * Netdevice ifindex, where to mirror/redirect packet to.
+ * @param[in] action_type
+ * Either TCA_EGRESS_REDIR for redirection or TCA_EGRESS_MIRROR for mirroring.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_mirred(struct rte_flow *flow, uint16_t ifindex, uint16_t action_type)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_mirred p = {
+ .eaction = action_type,
+ .ifindex = ifindex,
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("mirred"), "mirred");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ if (action_type == TCA_EGRESS_MIRROR)
+ p.action = TC_ACT_PIPE;
+ else /* REDIRECT */
+ p.action = TC_ACT_STOLEN;
+ nlattr_add(&msg->nh, TCA_MIRRED_PARMS, sizeof(p), &p);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Transform a QUEUE action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] queue
+ * Queue id to use.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_skbedit(struct rte_flow *flow, uint16_t queue)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_skbedit p = {
+ .action = TC_ACT_PIPE
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("skbedit"), "skbedit");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS, sizeof(p), &p);
+ nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING, queue);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Validate a flow supported by TC.
+ * If flow param is not NULL, then also fill the netlink message inside.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ * @param[in] mirred
+ * If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
+ * redirection to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd.
+ * If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
+ * mirroring to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd. Matching packets will thus be duplicated.
+ * If set to 0, the standard behavior is to be used: set correct actions for
+ * the TC rule, and apply it on the tap netdevice.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_process(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow,
+ int mirred)
+{
+ const struct tap_flow_items *cur_item = tap_flow_items;
+ struct convert_data data = {
+ .eth_type = 0,
+ .ip_proto = 0,
+ .flow = flow,
+ };
+ int action = 0; /* Only one action authorized for now */
+
+ if (attr->group > MAX_GROUP) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "group value too big: cannot exceed 15");
+ return -rte_errno;
+ }
+ if (attr->priority > MAX_PRIORITY) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ return -rte_errno;
+ } else if (flow) {
+ uint16_t group = attr->group << GROUP_SHIFT;
+ uint16_t prio = group | (attr->priority + PRIORITY_OFFSET);
+ flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
+ flow->msg.t.tcm_info);
+ }
+ if (flow) {
+ if (mirred) {
+ /*
+ * If attr->ingress, the rule applies on remote ingress
+ * to match incoming packets
+ * If attr->egress, the rule applies on tap ingress (as
+ * seen from the kernel) to deal with packets going out
+ * from the DPDK app.
+ */
+ flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
+ } else {
+ /* Standard rule on tap egress (kernel standpoint). */
+ flow->msg.t.tcm_parent =
+ TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ }
+ /* use flower filter type */
+ nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
+ if (nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
+ goto exit_item_not_supported;
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct tap_flow_items *token = NULL;
+ unsigned int i;
+ int err = 0;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &tap_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = tap_flow_item_validate(
+ items, cur_item->mask_sz,
+ (const uint8_t *)cur_item->mask,
+ (const uint8_t *)cur_item->default_mask);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow && cur_item->convert) {
+ if (!pmd->flower_vlan_support &&
+ cur_item->convert == tap_flow_create_vlan)
+ goto exit_item_not_supported;
+ err = cur_item->convert(items, &data);
+ if (err)
+ goto exit_item_not_supported;
+ }
+ }
+ if (flow) {
+ if (pmd->flower_vlan_support && data.vlan) {
+ nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ htons(ETH_P_8021Q));
+ nlattr_add16(&flow->msg.nh,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ data.eth_type ?
+ data.eth_type : htons(ETH_P_ALL));
+ } else if (data.eth_type) {
+ nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ data.eth_type);
+ }
+ }
+ if (mirred && flow) {
+ uint16_t if_index = pmd->if_index;
+
+ /*
+ * If attr->egress && mirred, then this is a special
+ * case where the rule must be applied on the tap, to
+ * redirect packets coming from the DPDK App, out
+ * through the remote netdevice.
+ */
+ if (attr->egress)
+ if_index = pmd->remote_if_index;
+ if (add_action_mirred(flow, if_index, mirred) < 0)
+ goto exit_action_not_supported;
+ else
+ goto end;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ int err = 0;
+
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow)
+ err = add_action_gact(flow, TC_ACT_SHOT);
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow)
+ err = add_action_gact(flow, TC_ACT_UNSPEC);
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (!queue || (queue->index >= pmd->nb_queues))
+ goto exit_action_not_supported;
+ if (flow)
+ err = add_action_skbedit(flow, queue->index);
+ } else {
+ goto exit_action_not_supported;
+ }
+ if (err)
+ goto exit_action_not_supported;
+ }
+end:
+ if (flow)
+ nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+
+
+/**
+ * Validate a flow.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
+}
+
+/**
+ * Set a unique handle in a flow.
+ *
+ * The kernel supports TC rules with equal priority, as long as they use the
+ * same matching fields (e.g.: dst mac and ipv4) with different values (and
+ * full mask to ensure no collision is possible).
+ * In those rules, the handle (uint32_t) is the part that would identify
+ * specifically each rule.
+ *
+ * On 32-bit architectures, the handle can simply be the flow's pointer address.
+ * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
+ * unique handle.
+ *
+ * @param[in, out] flow
+ * The flow that needs its handle set.
+ */
+static void
+tap_flow_set_handle(struct rte_flow *flow)
+{
+ uint32_t handle = 0;
+
+ if (sizeof(flow) > 4)
+ handle = rte_jhash(&flow, sizeof(flow), 1);
+ else
+ handle = (uintptr_t)flow;
+ /* must be at least 1 to avoid letting the kernel choose one for us */
+ if (!handle)
+ handle = 1;
+ flow->msg.t.tcm_handle = handle;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *remote_flow = NULL;
+ struct rte_flow *flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err;
+
+ if (!pmd->if_index) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "can't create rule, ifindex not found");
+ goto fail;
+ }
+ /*
+ * No rules configured through standard rte_flow should be set on the
+ * priorities used by implicit rules.
+ */
+ if ((attr->group == MAX_GROUP) &&
+ attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ goto fail;
+ }
+ flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &flow->msg;
+ tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(flow);
+ if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
+ goto fail;
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "overlapping rules");
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->flows, flow, next);
+ /**
+ * If a remote device is configured, a TC rule with identical items for
+ * matching must be set on that device, with a single action: redirect
+ * to the local pmd->if_index.
+ */
+ if (pmd->remote_if_index) {
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ /* set the rule if_index for the remote netdevice */
+ tc_init_msg(
+ msg, pmd->remote_if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, NULL,
+ error, remote_flow, TCA_EGRESS_REDIR)) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "rte flow rule validation failed");
+ goto fail;
+ }
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "overlapping rules");
+ goto fail;
+ }
+ flow->remote_flow = remote_flow;
+ }
+ return flow;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ if (flow)
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Destroy a flow using pointer to pmd_internal.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] flow
+ * Pointer to the flow to destroy.
+ * @param[in, out] error
+ * Pointer to the flow error handler
+ *
+ * @return 0 if the flow could be destroyed, -1 otherwise.
+ */
+static int
+tap_flow_destroy_pmd(struct pmd_internals *pmd,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow = flow->remote_flow;
+ int ret = 0;
+
+ LIST_REMOVE(flow, next);
+ flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = nl_send(pmd->nlsk_fd, &flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto end;
+ }
+ ret = nl_recv_ack(pmd->nlsk_fd);
+ /* If errno is ENOENT, the rule is already no longer in the kernel. */
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule deletion (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "couldn't receive kernel ack to our request");
+ goto end;
+ }
+ if (remote_flow) {
+ remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto end;
+ }
+ ret = nl_recv_ack(pmd->nlsk_fd);
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule deletion (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure trying to receive nl ack");
+ goto end;
+ }
+ }
+end:
+ if (remote_flow)
+ rte_free(remote_flow);
+ rte_free(flow);
+ return ret;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return tap_flow_destroy_pmd(pmd, flow, error);
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ while (!LIST_EMPTY(&pmd->flows)) {
+ flow = LIST_FIRST(&pmd->flows);
+ if (tap_flow_destroy(dev, flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Add an implicit flow rule on the remote device to make sure traffic gets to
+ * the tap netdevice from there.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to apply.
+ *
+ * @return -1 if the rule couldn't be applied, 0 otherwise.
+ */
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ struct rte_flow_item *items = implicit_rte_flows[idx].items;
+ struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
+ struct rte_flow_item_eth eth_local = { .type = 0 };
+ uint16_t if_index = pmd->remote_if_index;
+ struct rte_flow *remote_flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err = 0;
+ struct rte_flow_item items_local[2] = {
+ [0] = {
+ .type = items[0].type,
+ .spec = &eth_local,
+ .mask = items[0].mask,
+ },
+ [1] = {
+ .type = items[1].type,
+ }
+ };
+
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ if (idx == TAP_REMOTE_TX) {
+ if_index = pmd->if_index;
+ } else if (idx == TAP_REMOTE_LOCAL_MAC) {
+ /*
+ * eth addr couldn't be set in implicit_rte_flows[] as it is not
+ * known at compile time.
+ */
+ memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
+ items = items_local;
+ }
+ tc_init_msg(msg, if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, NULL, NULL,
+ remote_flow, implicit_rte_flows[idx].mirred)) {
+ RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
+ goto fail;
+ }
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD, "Failure sending nl request");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
+ return 0;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ return -1;
+}
+
+/**
+ * Remove specific implicit flow rule on the remote device.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to remove.
+ *
+ * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
+ */
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ struct rte_flow *remote_flow;
+ int cur_prio = -1;
+ int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
+
+ for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ remote_flow;
+ remote_flow = LIST_NEXT(remote_flow, next)) {
+ cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
+ if (cur_prio != idx_prio)
+ continue;
+ return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
+ }
+ return 0;
+}
+
+/**
+ * Destroy all implicit flows.
+ *
+ * @see rte_flow_flush()
+ */
+int
+tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow;
+
+ while (!LIST_EMPTY(&pmd->implicit_flows)) {
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ if (!pmd->flower_support)
+ return -ENOTSUP;
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &tap_flow_ops;
+ return 0;
+ default:
+ RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ }
+ return -EINVAL;
+}
+
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
new file mode 100644
index 00000000..94414f18
--- /dev/null
+++ b/drivers/net/tap/tap_flow.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TAP_FLOW_H_
+#define _TAP_FLOW_H_
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_eth_tap.h>
+
+/**
+ * In TC, priority 0 means we require the kernel to allocate one for us.
+ * In rte_flow, however, we want the priority 0 to be the most important one.
+ * Use an offset to have the most important priority being 1 in TC.
+ */
+#define PRIORITY_OFFSET 1
+#define PRIORITY_MASK (0xfff)
+#define MAX_PRIORITY (PRIORITY_MASK - PRIORITY_OFFSET)
+#define GROUP_MASK (0xf)
+#define GROUP_SHIFT 12
+#define MAX_GROUP GROUP_MASK
+
+/**
+ * These index are actually in reversed order: their priority is processed
+ * by subtracting their value to the lowest priority (PRIORITY_MASK).
+ * Thus the first one will have the lowest priority in the end
+ * (but biggest value).
+ */
+enum implicit_rule_index {
+ TAP_REMOTE_TX,
+ TAP_REMOTE_BROADCASTV6,
+ TAP_REMOTE_BROADCAST,
+ TAP_REMOTE_ALLMULTI,
+ TAP_REMOTE_PROMISC,
+ TAP_REMOTE_LOCAL_MAC,
+ TAP_REMOTE_MAX_IDX,
+};
+
+int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_flush(struct pmd_internals *pmd,
+ struct rte_flow_error *error);
+
+#endif /* _TAP_FLOW_H_ */
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
new file mode 100644
index 00000000..ee92e2e7
--- /dev/null
+++ b/drivers/net/tap/tap_netlink.c
@@ -0,0 +1,367 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+#include <tap_netlink.h>
+#include <rte_random.h>
+
+/* Must be quite large to support dumping a huge list of QDISC or filters. */
+#define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */
+#define SNDBUF_SIZE 32768 /* Send buffer size for the netlink socket */
+#define RCVBUF_SIZE 32768 /* Receive buffer size for the netlink socket */
+
+struct nested_tail {
+ struct rtattr *tail;
+ struct nested_tail *prev;
+};
+
+/**
+ * Initialize a netlink socket for communicating with the kernel.
+ *
+ * @param nl_groups
+ * Set it to a netlink group value (e.g. RTMGRP_LINK) to receive messages for
+ * specific netlink multicast groups. Otherwise, no subscription will be made.
+ *
+ * @return
+ * netlink socket file descriptor on success, -1 otherwise.
+ */
+int
+nl_init(uint32_t nl_groups)
+{
+ int fd, sndbuf_size = SNDBUF_SIZE, rcvbuf_size = RCVBUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ .nl_groups = nl_groups,
+ };
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "Unable to create a netlink socket\n");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
+ RTE_LOG(ERR, PMD, "Unable to set socket buffer send size\n");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
+ RTE_LOG(ERR, PMD, "Unable to set socket buffer receive size\n");
+ return -1;
+ }
+ if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
+ RTE_LOG(ERR, PMD, "Unable to bind to the netlink socket\n");
+ return -1;
+ }
+ return fd;
+}
+
+/**
+ * Clean up a netlink socket once all communicating with the kernel is finished.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+nl_final(int nlsk_fd)
+{
+ if (close(nlsk_fd)) {
+ RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Send a message to the kernel on the netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The netlink message send to the kernel.
+ *
+ * @return
+ * the number of sent bytes on success, -1 otherwise.
+ */
+int
+nl_send(int nlsk_fd, struct nlmsghdr *nh)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = (uint32_t)rte_rand();
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ RTE_LOG(ERR, PMD, "Failed to send netlink message: %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+ return send_bytes;
+}
+
+/**
+ * Check that the kernel sends an appropriate ACK in response to an nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+nl_recv_ack(int nlsk_fd)
+{
+ return nl_recv(nlsk_fd, NULL, NULL);
+}
+
+/**
+ * Receive a message from the kernel on the netlink socket, following an
+ * nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] cb
+ * The callback function to call for each netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa;
+ char buf[BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes < 0)
+ return -1;
+ for (nh = (struct nlmsghdr *)buf;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ errno = -err_data->error;
+ return -1;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb)
+ ret = cb(nh, arg);
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Append a netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data_len
+ * The length of the data to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data)
+{
+ /* see man 3 rtnetlink */
+ struct rtattr *rta;
+
+ rta = (struct rtattr *)NLMSG_TAIL(nh);
+ rta->rta_len = RTA_LENGTH(data_len);
+ rta->rta_type = type;
+ memcpy(RTA_DATA(rta), data, data_len);
+ nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+}
+
+/**
+ * Append a uint8_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
+{
+ nlattr_add(nh, type, sizeof(uint8_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
+{
+ nlattr_add(nh, type, sizeof(uint16_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
+{
+ nlattr_add(nh, type, sizeof(uint32_t), &data);
+}
+
+/**
+ * Start a nested netlink attribute.
+ * It must be followed later by a call to nlattr_nested_finish().
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ * @param[in] type
+ * The nested attribute type to append.
+ *
+ * @return
+ * -1 if adding a nested netlink attribute failed, 0 otherwise.
+ */
+int
+nlattr_nested_start(struct nlmsg *msg, uint16_t type)
+{
+ struct nested_tail *tail;
+
+ tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0);
+ if (!tail) {
+ RTE_LOG(ERR, PMD,
+ "Couldn't allocate memory for nested netlink"
+ " attribute\n");
+ return -1;
+ }
+
+ tail->tail = (struct rtattr *)NLMSG_TAIL(&msg->nh);
+
+ nlattr_add(&msg->nh, type, 0, NULL);
+
+ tail->prev = msg->nested_tails;
+
+ msg->nested_tails = tail;
+
+ return 0;
+}
+
+/**
+ * End a nested netlink attribute.
+ * It follows a call to nlattr_nested_start().
+ * In effect, it will modify the nested attribute length to include every bytes
+ * from the nested attribute start, up to here.
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ */
+void
+nlattr_nested_finish(struct nlmsg *msg)
+{
+ struct nested_tail *tail = msg->nested_tails;
+
+ tail->tail->rta_len = (char *)NLMSG_TAIL(&msg->nh) - (char *)tail->tail;
+
+ if (tail->prev)
+ msg->nested_tails = tail->prev;
+
+ rte_free(tail);
+}
diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h
new file mode 100644
index 00000000..98e13902
--- /dev/null
+++ b/drivers/net/tap/tap_netlink.h
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TAP_NETLINK_H_
+#define _TAP_NETLINK_H_
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
+#include <stdio.h>
+
+#include <rte_log.h>
+
+#define NLMSG_BUF 512
+
+struct nlmsg {
+ struct nlmsghdr nh;
+ struct tcmsg t;
+ char buf[NLMSG_BUF];
+ struct nested_tail *nested_tails;
+};
+
+#define NLMSG_TAIL(nlh) (void *)((char *)(nlh) + NLMSG_ALIGN((nlh)->nlmsg_len))
+
+int nl_init(uint32_t nl_groups);
+int nl_final(int nlsk_fd);
+int nl_send(int nlsk_fd, struct nlmsghdr *nh);
+int nl_recv(int nlsk_fd, int (*callback)(struct nlmsghdr *, void *), void *arg);
+int nl_recv_ack(int nlsk_fd);
+void nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data);
+void nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data);
+void nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data);
+void nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data);
+int nlattr_nested_start(struct nlmsg *msg, uint16_t type);
+void nlattr_nested_finish(struct nlmsg *msg);
+
+#endif /* _TAP_NETLINK_H_ */
diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c
new file mode 100644
index 00000000..d74ac805
--- /dev/null
+++ b/drivers/net/tap/tap_tcmsgs.c
@@ -0,0 +1,323 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <net/if.h>
+#include <string.h>
+
+#include <rte_log.h>
+#include <tap_tcmsgs.h>
+
+struct qdisc {
+ uint32_t handle;
+ uint32_t parent;
+};
+
+struct list_args {
+ int nlsk_fd;
+ uint16_t ifindex;
+ void *custom_arg;
+};
+
+struct qdisc_custom_arg {
+ uint32_t handle;
+ uint32_t parent;
+ uint8_t exists;
+};
+
+/**
+ * Initialize a netlink message with a TC header.
+ *
+ * @param[in, out] msg
+ * The netlink message to fill.
+ * @param[in] ifindex
+ * The netdevice ifindex where the rule will be applied.
+ * @param[in] type
+ * The type of TC message to create (RTM_NEWTFILTER, RTM_NEWQDISC, etc.).
+ * @param[in] flags
+ * Overrides the default netlink flags for this msg with those specified.
+ */
+void
+tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags)
+{
+ struct nlmsghdr *n = &msg->nh;
+
+ n->nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ n->nlmsg_type = type;
+ if (flags)
+ n->nlmsg_flags = flags;
+ else
+ n->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ msg->t.tcm_family = AF_UNSPEC;
+ msg->t.tcm_ifindex = ifindex;
+}
+
+/**
+ * Delete a specific QDISC identified by its iface, and it's handle and parent.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex on whom the deletion will happen.
+ * @param[in] qinfo
+ * Additional info to identify the QDISC (handle and parent).
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
+{
+ struct nlmsg msg;
+ int fd = 0;
+
+ tc_init_msg(&msg, ifindex, RTM_DELQDISC, 0);
+ msg.t.tcm_handle = qinfo->handle;
+ msg.t.tcm_parent = qinfo->parent;
+ /* if no netlink socket is provided, create one */
+ if (!nlsk_fd) {
+ fd = nl_init(0);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not delete QDISC: null netlink socket\n");
+ return -1;
+ }
+ } else {
+ fd = nlsk_fd;
+ }
+ if (nl_send(fd, &msg.nh) < 0)
+ goto error;
+ if (nl_recv_ack(fd) < 0)
+ goto error;
+ if (!nlsk_fd)
+ return nl_final(fd);
+ return 0;
+error:
+ if (!nlsk_fd)
+ nl_final(fd);
+ return -1;
+}
+
+/**
+ * Add the multiqueue QDISC with MULTIQ_MAJOR_HANDLE handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ struct tc_multiq_qopt opt;
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ msg.t.tcm_parent = TC_H_ROOT;
+ nlattr_add(&msg.nh, TCA_KIND, sizeof("multiq"), "multiq");
+ nlattr_add(&msg.nh, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Add the ingress QDISC with default ffff: handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where the QDISC will be added.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ msg.t.tcm_parent = TC_H_INGRESS;
+ nlattr_add(&msg.nh, TCA_KIND, sizeof("ingress"), "ingress");
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Callback function to delete a QDISC.
+ *
+ * @param[in] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct tcmsg *t = NLMSG_DATA(nh);
+ struct list_args *args = arg;
+
+ struct qdisc qinfo = {
+ .handle = t->tcm_handle,
+ .parent = t->tcm_parent,
+ };
+
+ /* filter out other ifaces' qdiscs */
+ if (args->ifindex != (unsigned int)t->tcm_ifindex)
+ return 0;
+ /*
+ * Use another nlsk_fd (0) to avoid tampering with the current list
+ * iteration.
+ */
+ return qdisc_del(0, args->ifindex, &qinfo);
+}
+
+/**
+ * Iterate over all QDISC, and call the callback() function for each.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ * @param[in] callback
+ * The function to call for each QDISC.
+ * @param[in, out] arg
+ * The arguments to provide the callback function with.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_iterate(int nlsk_fd, uint16_t ifindex,
+ int (*callback)(struct nlmsghdr *, void *), void *arg)
+{
+ struct nlmsg msg;
+ struct list_args args = {
+ .nlsk_fd = nlsk_fd,
+ .ifindex = ifindex,
+ .custom_arg = arg,
+ };
+
+ tc_init_msg(&msg, ifindex, RTM_GETQDISC, NLM_F_REQUEST | NLM_F_DUMP);
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv(nlsk_fd, callback, &args) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Delete all QDISCs for a given netdevice.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_flush(int nlsk_fd, uint16_t ifindex)
+{
+ return qdisc_iterate(nlsk_fd, ifindex, qdisc_del_cb, NULL);
+}
+
+/**
+ * Create the multiqueue QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_multiq(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ RTE_LOG(ERR, PMD, "Could not add multiq qdisc (%d): %s\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Create the ingress QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the ingress QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_ingress(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ RTE_LOG(ERR, PMD, "Could not add ingress qdisc (%d): %s\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h
new file mode 100644
index 00000000..78959577
--- /dev/null
+++ b/drivers/net/tap/tap_tcmsgs.h
@@ -0,0 +1,61 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TAP_TCMSGS_H_
+#define _TAP_TCMSGS_H_
+
+#include <linux/if_ether.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/pkt_cls.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_skbedit.h>
+#include <inttypes.h>
+
+#include <rte_ether.h>
+#include <tap_netlink.h>
+
+#define MULTIQ_MAJOR_HANDLE (1 << 16)
+
+void tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type,
+ uint16_t flags);
+int qdisc_list(int nlsk_fd, uint16_t ifindex);
+int qdisc_flush(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_multiq(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_multiq(int nlsk_fd, uint16_t ifindex);
+int filter_list_ingress(int nlsk_fd, uint16_t ifindex);
+
+#endif /* _TAP_TCMSGS_H_ */
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index bcab5f93..706250b8 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -65,8 +65,4 @@ CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
endif
CFLAGS_nicvf_rxtx.o += -Ofast
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_mempool lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 9e028a3a..49a2646d 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -37,7 +37,7 @@
#include "nicvf_bsvf.h"
#include "nicvf_plat.h"
-static SIMPLEQ_HEAD(, svf_entry) head = SIMPLEQ_HEAD_INITIALIZER(head);
+static STAILQ_HEAD(, svf_entry) head = STAILQ_HEAD_INITIALIZER(head);
void
nicvf_bsvf_push(struct svf_entry *entry)
@@ -45,7 +45,7 @@ nicvf_bsvf_push(struct svf_entry *entry)
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_INSERT_TAIL(&head, entry, next);
+ STAILQ_INSERT_TAIL(&head, entry, next);
}
struct svf_entry *
@@ -53,14 +53,14 @@ nicvf_bsvf_pop(void)
{
struct svf_entry *entry;
- assert(!SIMPLEQ_EMPTY(&head));
+ assert(!STAILQ_EMPTY(&head));
- entry = SIMPLEQ_FIRST(&head);
+ entry = STAILQ_FIRST(&head);
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_REMOVE_HEAD(&head, next);
+ STAILQ_REMOVE_HEAD(&head, next);
return entry;
}
@@ -68,5 +68,5 @@ nicvf_bsvf_pop(void)
int
nicvf_bsvf_empty(void)
{
- return SIMPLEQ_EMPTY(&head);
+ return STAILQ_EMPTY(&head);
}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index 5d5a25e2..fb9b2484 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -41,7 +41,7 @@ struct nicvf;
* The base queue structure to hold secondary qsets.
*/
struct svf_entry {
- SIMPLEQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */
struct nicvf *vf; /**< Holder of a secondary qset */
};
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 00dd2feb..79f83c8d 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -36,6 +36,8 @@
#include <stdint.h>
#include <stdbool.h>
+#include "nicvf_plat.h"
+
/* Virtual function register offsets */
#define NIC_VF_CFG (0x000020)
@@ -213,10 +215,6 @@
typedef uint64_t nicvf_phys_addr_t;
-#ifndef __BYTE_ORDER__
-#error __BYTE_ORDER__ not defined
-#endif
-
/* vNIC HW Enumerations */
enum nic_send_ld_type_e {
@@ -559,7 +557,7 @@ enum nic_stat_vnic_tx_e {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t stdn_fault:1;
uint64_t rsvd0:1;
@@ -604,7 +602,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t pkt_len:16;
uint64_t l2_ptr:8;
uint64_t l3_ptr:8;
@@ -629,7 +627,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rss_tag:32;
uint64_t vlan_tci:16;
uint64_t vlan_ptr:8;
@@ -646,7 +644,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb3_sz;
uint16_t rb2_sz;
uint16_t rb1_sz;
@@ -663,7 +661,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb7_sz;
uint16_t rb6_sz;
uint16_t rb5_sz;
@@ -680,7 +678,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb11_sz;
uint16_t rb10_sz;
uint16_t rb9_sz;
@@ -697,7 +695,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t vlan_found:1;
uint64_t vlan_stripped:1;
uint64_t vlan2_found:1;
@@ -742,7 +740,7 @@ struct cqe_rx_t {
};
struct cqe_rx_tcp_err_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:60;
@@ -764,7 +762,7 @@ struct cqe_rx_tcp_err_t {
};
struct cqe_rx_tcp_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:52;
uint64_t cq_tcp_status:8;
@@ -786,7 +784,7 @@ struct cqe_rx_tcp_t {
};
struct cqe_send_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:4;
uint64_t sqe_ptr:16;
@@ -798,7 +796,7 @@ struct cqe_send_t {
uint64_t send_status:8;
uint64_t ptp_timestamp:64; /* W1 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
+#elif NICVF_BYTE_ORDER == NICVF_LITTLE_ENDIAN
uint64_t send_status:8;
uint64_t rsvd3:8;
uint64_t sq_idx:3;
@@ -814,7 +812,7 @@ struct cqe_send_t {
};
struct cq_entry_type_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t __pad:60;
#else
@@ -835,7 +833,7 @@ union cq_entry_t {
NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
struct rbdr_entry_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
union {
struct {
uint64_t rsvd0:15;
@@ -860,7 +858,7 @@ NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t tcp_pkt_cnt:12;
uint64_t rsvd1:4;
uint64_t align_hdr_bytes:4;
@@ -899,7 +897,7 @@ struct rx_hdr_t {
};
struct sq_crc_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rsvd1:32;
uint64_t crc_ival:32;
uint64_t subdesc_type:4;
@@ -921,7 +919,7 @@ struct sq_crc_subdesc {
};
struct sq_gather_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t ld_type:2;
uint64_t rsvd0:42;
@@ -942,7 +940,7 @@ struct sq_gather_subdesc {
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t rsvd0:46;
uint64_t len:14;
@@ -958,7 +956,7 @@ struct sq_imm_subdesc {
};
struct sq_mem_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t mem_alg:4;
uint64_t mem_dsz:2;
@@ -982,7 +980,7 @@ struct sq_mem_subdesc {
};
struct sq_hdr_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4;
uint64_t tso:1;
uint64_t post_cqe:1; /* Post CQE on no error also */
@@ -1045,7 +1043,7 @@ NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
/* Queue config register formats */
struct rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_2_63:62;
uint64_t ena:1;
uint64_t reserved_0:1;
@@ -1059,7 +1057,7 @@ struct rq_cfg { union { struct {
}; };
struct cq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_43_63:21;
uint64_t ena:1;
uint64_t reset:1;
@@ -1085,7 +1083,7 @@ struct cq_cfg { union { struct {
}; };
struct sq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_20_63:44;
uint64_t ena:1;
uint64_t reserved_18_18:1;
@@ -1111,7 +1109,7 @@ struct sq_cfg { union { struct {
}; };
struct rbdr_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_45_63:19;
uint64_t ena:1;
uint64_t reset:1;
@@ -1139,7 +1137,7 @@ struct rbdr_cfg { union { struct {
}; };
struct pf_qs_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_32_63:32;
uint64_t ena:1;
uint64_t reserved_27_30:4;
@@ -1169,7 +1167,7 @@ struct pf_qs_cfg { union { struct {
}; };
struct pf_rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved1:1;
uint64_t reserved0:34;
uint64_t strip_pre_l2:1;
@@ -1197,7 +1195,7 @@ struct pf_rq_cfg { union { struct {
}; };
struct pf_rq_drop_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rbdr_red:1;
uint64_t cq_red:1;
uint64_t reserved3:14;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index 3b7b8a51..a072f19d 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -62,9 +62,6 @@ static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
[NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
[NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
- [NIC_MBOX_MSG_RES_BIT] = "NIC_MBOX_MSG_RES_BIT",
- [NIC_MBOX_MSG_RSS_SIZE_RES_BIT] = "NIC_MBOX_MSG_RSS_SIZE",
- [NIC_MBOX_MSG_ALLOC_SQS_RES_BIT] = "NIC_MBOX_MSG_ALLOC_SQS",
};
static inline const char * __attribute__((unused))
@@ -176,7 +173,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = true;
break;
- case NIC_MBOX_MSG_RSS_SIZE_RES_BIT:
+ case NIC_MBOX_MSG_RSS_SIZE:
nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
nic->pf_acked = true;
break;
@@ -186,7 +183,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->pf_acked = true;
break;
- case NIC_MBOX_MSG_ALLOC_SQS_RES_BIT:
+ case NIC_MBOX_MSG_ALLOC_SQS:
assert_primary(nic);
if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
nicvf_log_error("Received %" PRIu8 "/%" PRIu8
@@ -331,7 +328,7 @@ nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
{
struct nic_mbx mbx = { .msg = { 0 } };
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
qs_cfg->be = 1;
#endif
/* Send a mailbox msg to PF to config Qset */
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 084f3a76..8675fe8f 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -68,16 +68,10 @@
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
-#define NIC_MBOX_MSG_CFG_DONE 0x7E /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0x7F /* VF is being shutdown */
-#define NIC_MBOX_MSG_RES_BIT 0x80 /* Reset bit from PF */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
-#define NIC_MBOX_MSG_RSS_SIZE_RES_BIT \
- (NIC_MBOX_MSG_RSS_SIZE | NIC_MBOX_MSG_RES_BIT)
-#define NIC_MBOX_MSG_ALLOC_SQS_RES_BIT \
- (NIC_MBOX_MSG_ALLOC_SQS | NIC_MBOX_MSG_RES_BIT)
-
/* Get vNIC VF configuration */
struct nic_cfg_msg {
uint8_t msg;
@@ -157,6 +151,7 @@ struct rss_cfg_msg {
/* Physical interface link status */
struct bgx_link_status {
uint8_t msg;
+ uint8_t mac_type;
uint8_t link_up;
uint8_t duplex;
uint32_t speed;
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index 83c1844d..36da1200 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -65,35 +65,23 @@
#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+#define NICVF_BYTE_ORDER RTE_BYTE_ORDER
+#define NICVF_BIG_ENDIAN RTE_BIG_ENDIAN
+#define NICVF_LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+
/* Constants */
#include <rte_ether.h>
#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+#include <rte_io.h>
+#define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))
+#define nicvf_addr_read(addr) rte_read64_relaxed((void *)(addr))
+
/* ARM64 specific functions */
#if defined(RTE_ARCH_ARM64)
#define nicvf_prefetch_store_keep(_ptr) ({\
asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- asm volatile(
- "str %x[val], [%x[addr]]"
- :
- : [val] "r" (val), [addr] "r" (addr));
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- uint64_t val;
-
- asm volatile(
- "ldr %x[val], [%x[addr]]"
- : [val] "=r" (val)
- : [addr] "r" (addr));
- return val;
-}
#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
asm volatile( \
@@ -106,18 +94,6 @@ nicvf_addr_read(uintptr_t addr)
#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- *(volatile uint64_t *)addr = val;
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- return *(volatile uint64_t *)addr;
-}
-
#define NICVF_LOAD_PAIR(reg1, reg2, addr) \
do { \
reg1 = nicvf_addr_read((uintptr_t)addr); \
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 466e49ce..e4910c9b 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -41,7 +41,6 @@
#include <inttypes.h>
#include <netinet/in.h>
#include <sys/queue.h>
-#include <sys/timerfd.h>
#include <rte_alarm.h>
#include <rte_atomic.h>
@@ -54,6 +53,7 @@
#include <rte_eal.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_memory.h>
@@ -145,16 +145,29 @@ nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
* Return 0 means link status changed, -1 means not changed
*/
static int
-nicvf_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete __rte_unused)
+nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
struct rte_eth_link link;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ int i;
PMD_INIT_FUNC_TRACE();
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ if (wait_to_complete) {
+ /* rte_eth_link_get() might need to wait up to 9 seconds */
+ for (i = 0; i < MAX_CHECK_TIME; i++) {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ if (link.link_status)
+ break;
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+ } else {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ }
return nicvf_atomic_write_link_status(dev, &link);
}
@@ -245,7 +258,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
@@ -258,7 +271,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
@@ -277,7 +290,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(snic, &rx_qstats,
@@ -290,7 +303,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nicvf_tx_range(dev, snic, &tx_start, &tx_end);
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(snic, &tx_qstats,
@@ -1219,6 +1232,23 @@ nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
return nicvf_vf_stop_tx_queue(dev, nic, qidx);
}
+static inline void
+nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def;
+
+ RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer.value = *(uint64_t *)p;
+}
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
@@ -1311,6 +1341,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
else
rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+ nicvf_rxq_mbuf_setup(rxq);
/* Alloc completion queue */
if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
@@ -1335,9 +1366,12 @@ static void
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
dev_info->max_rx_queues =
@@ -1345,7 +1379,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues =
(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_mac_addrs = 1;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa =
@@ -1407,7 +1441,7 @@ static int
nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
{
int ret;
- uint16_t qidx;
+ uint16_t qidx, data_off;
uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
uint64_t mbuf_phys_off = 0;
struct nicvf_rxq *rxq;
@@ -1448,10 +1482,18 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->vf_id, qidx, rxq->pool->name);
return -ENOMEM;
}
- rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
- rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+ data_off = nicvf_mbuff_meta_length(mbuf);
+ data_off += RTE_PKTMBUF_HEADROOM;
rte_pktmbuf_free(mbuf);
+ if (data_off % RTE_CACHE_LINE_SIZE) {
+ PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
+ rxq->pool->name, data_off,
+ data_off % RTE_CACHE_LINE_SIZE);
+ return -EINVAL;
+ }
+ rxq->mbuf_phys_off -= data_off;
+
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
if (mbuf_phys_off != rxq->mbuf_phys_off) {
@@ -1975,7 +2017,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
}
}
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
@@ -2108,16 +2150,25 @@ static const struct rte_pci_id pci_id_nicvf_map[] = {
},
};
-static struct eth_driver rte_nicvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = nicvf_eth_dev_init,
- .dev_private_size = sizeof(struct nicvf),
+static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
+ nicvf_eth_dev_init);
+}
+
+static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nicvf_pmd = {
+ .id_table = pci_id_nicvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = nicvf_eth_pci_probe,
+ .remove = nicvf_eth_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fc43b747..6cae8341 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -430,9 +430,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union cq_entry_t *desc = rxq->desc;
const uint64_t cqe_mask = rxq->qlen_mask;
uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
uint32_t cqe_head = rxq->head & cqe_mask;
int32_t available_space = rxq->available_space;
- uint8_t port_id = rxq->port_id;
const uint8_t rbptr_offset = rxq->rbptr_offset;
to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
@@ -448,17 +448,12 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-
pkt->ol_flags = 0;
- pkt->port = port_id;
pkt->data_len = cqe_rx_w3.rb0_sz;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = 1;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
-
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
- rte_mbuf_refcnt_set(pkt, 1);
rx_pkts[i] = pkt;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(pkt);
@@ -469,11 +464,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
@@ -481,8 +475,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
static inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
- uint64_t mbuf_phys_off, uint8_t port_id,
- struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -501,12 +496,10 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
pkt->ol_flags = 0;
- pkt->port = port_id;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = nb_segs;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
- rte_mbuf_refcnt_set(pkt, 1);
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
@@ -518,9 +511,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
prev->next = seg;
seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
- seg->port = port_id;
- seg->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(seg, 1);
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
prev = seg;
}
@@ -541,7 +532,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t i, to_process, cqe_head, buffers_consumed = 0;
int32_t available_space = rxq->available_space;
uint16_t nb_segs;
- const uint8_t port_id = rxq->port_id;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
const uint8_t rbptr_offset = rxq->rbptr_offset;
cqe_head = rxq->head & cqe_mask;
@@ -552,7 +543,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- port_id, rx_pkts + i, rbptr_offset);
+ rx_pkts + i, rbptr_offset, mbuf_init);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -563,11 +554,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 9dad8a5a..3631ff22 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -84,6 +84,33 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
}
#endif
+static inline void
+nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
+static inline void
+nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad, uint16_t nb_segs)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ init.fields.nb_segs = nb_segs;
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index c900e121..34c41b79 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -43,8 +43,8 @@
#include <rte_memory.h>
struct nicvf_rbdr {
- uint64_t rbdr_status;
- uint64_t rbdr_door;
+ uintptr_t rbdr_status;
+ uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
nicvf_phys_addr_t phys;
uint32_t buffsz;
@@ -58,8 +58,8 @@ struct nicvf_txq {
union sq_entry_t *desc;
nicvf_phys_addr_t phys;
struct rte_mbuf **txbuffs;
- uint64_t sq_head;
- uint64_t sq_door;
+ uintptr_t sq_head;
+ uintptr_t sq_door;
struct rte_mempool *pool;
struct nicvf *nic;
void (*pool_free)(struct nicvf_txq *sq);
@@ -72,10 +72,21 @@ struct nicvf_txq {
uint16_t tx_free_thresh;
} __rte_cache_aligned;
+union mbuf_initializer {
+ struct {
+ uint16_t data_off;
+ uint16_t refcnt;
+ uint16_t nb_segs;
+ uint16_t port;
+ } fields;
+ uint64_t value;
+};
+
struct nicvf_rxq {
uint64_t mbuf_phys_off;
- uint64_t cq_status;
- uint64_t cq_door;
+ uintptr_t cq_status;
+ uintptr_t cq_door;
+ union mbuf_initializer mbuf_initializer;
nicvf_phys_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile
index 050c5aa5..3ba8ad64 100644
--- a/drivers/net/vhost/Makefile
+++ b/drivers/net/vhost/Makefile
@@ -55,12 +55,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c
#
SYMLINK-y-include += rte_eth_vhost.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 328dde08..257bf6d6 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -33,27 +33,26 @@
#include <unistd.h>
#include <pthread.h>
#include <stdbool.h>
-#ifdef RTE_LIBRTE_VHOST_NUMA
-#include <numaif.h>
-#endif
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_vdev.h>
#include <rte_kvargs.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include <rte_spinlock.h>
#include "rte_eth_vhost.h"
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
#define ETH_VHOST_IFACE_ARG "iface"
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
-
-static const char *drivername = "VHOST PMD";
+#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_IFACE_ARG,
@@ -112,9 +111,11 @@ struct vhost_queue {
};
struct pmd_internal {
+ rte_atomic32_t dev_attached;
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ rte_atomic32_t started;
};
struct internal_list {
@@ -128,9 +129,6 @@ static struct internal_list_head internal_list =
static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
-static rte_atomic16_t nb_started_ports;
-static pthread_t session_th;
-
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -391,6 +389,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_rx = 0;
+ uint16_t nb_receive = nb_bufs;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -401,8 +400,20 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Dequeue packets from guest TX queue */
- nb_rx = rte_vhost_dequeue_burst(r->vid,
- r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
+ while (nb_receive) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_receive,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
+ r->mb_pool, &bufs[nb_rx],
+ num);
+
+ nb_rx += nb_pkts;
+ nb_receive -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
r->stats.pkts += nb_rx;
@@ -424,6 +435,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_tx = 0;
+ uint16_t nb_send = nb_bufs;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -434,8 +446,19 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Enqueue packets to guest RX queue */
- nb_tx = rte_vhost_enqueue_burst(r->vid,
- r->virtqueue_id, bufs, nb_bufs);
+ while (nb_send) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_send,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
+ &bufs[nb_tx], num);
+
+ nb_tx += nb_pkts;
+ nb_send -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
r->stats.pkts += nb_tx;
r->stats.missed_pkts += nb_bufs - nb_tx;
@@ -494,6 +517,38 @@ find_internal_resource(char *ifname)
return list;
}
+static void
+update_queuing_status(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ struct vhost_queue *vq;
+ unsigned int i;
+ int allow_queuing = 1;
+
+ if (rte_atomic32_read(&internal->started) == 0 ||
+ rte_atomic32_read(&internal->dev_attached) == 0)
+ allow_queuing = 0;
+
+ /* Wait until rx/tx_pkt_burst stops accessing vhost device */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+}
+
static int
new_device(int vid)
{
@@ -540,23 +595,15 @@ new_device(int vid)
vq->port = eth_dev->data->port_id;
}
- for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
+ for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
rte_vhost_enable_guest_notification(vid, i, 0);
+ rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
+
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_queuing_status(eth_dev);
RTE_LOG(INFO, PMD, "New connection established\n");
@@ -569,6 +616,7 @@ static void
destroy_device(int vid)
{
struct rte_eth_dev *eth_dev;
+ struct pmd_internal *internal;
struct vhost_queue *vq;
struct internal_list *list;
char ifname[PATH_MAX];
@@ -582,24 +630,10 @@ destroy_device(int vid)
return;
}
eth_dev = list->eth_dev;
+ internal = eth_dev->data->dev_private;
- /* Wait until rx/tx_pkt_burst stops accessing vhost device */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_queuing_status(eth_dev);
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
@@ -661,6 +695,12 @@ vring_state_changed(int vid, uint16_t vring, int enable)
return 0;
}
+static struct vhost_device_ops vhost_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
+};
+
int
rte_eth_vhost_get_queue_event(uint8_t port_id,
struct rte_eth_vhost_queue_event *event)
@@ -727,60 +767,24 @@ rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
return vid;
}
-static void *
-vhost_driver_session(void *param __rte_unused)
-{
- static struct virtio_net_device_ops vhost_ops;
-
- /* set vhost arguments */
- vhost_ops.new_device = new_device;
- vhost_ops.destroy_device = destroy_device;
- vhost_ops.vring_state_changed = vring_state_changed;
- if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
- RTE_LOG(ERR, PMD, "Can't register callbacks\n");
-
- /* start event handling */
- rte_vhost_driver_session_start();
-
- return NULL;
-}
-
static int
-vhost_driver_session_start(void)
+eth_dev_start(struct rte_eth_dev *dev)
{
- int ret;
+ struct pmd_internal *internal = dev->data->dev_private;
- ret = pthread_create(&session_th,
- NULL, vhost_driver_session, NULL);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't create a thread\n");
+ rte_atomic32_set(&internal->started, 1);
+ update_queuing_status(dev);
- return ret;
-}
-
-static void
-vhost_driver_session_stop(void)
-{
- int ret;
-
- ret = pthread_cancel(session_th);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
-
- ret = pthread_join(session_th, NULL);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't join the thread\n");
-}
-
-static int
-eth_dev_start(struct rte_eth_dev *dev __rte_unused)
-{
return 0;
}
static void
-eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
+eth_dev_stop(struct rte_eth_dev *dev)
{
+ struct pmd_internal *internal = dev->data->dev_private;
+
+ rte_atomic32_set(&internal->started, 0);
+ update_queuing_status(dev);
}
static void
@@ -788,11 +792,14 @@ eth_dev_close(struct rte_eth_dev *dev)
{
struct pmd_internal *internal;
struct internal_list *list;
+ unsigned int i;
internal = dev->data->dev_private;
if (!internal)
return;
+ eth_dev_stop(dev);
+
rte_vhost_driver_unregister(internal->iface_name);
list = find_internal_resource(internal->iface_name);
@@ -804,9 +811,17 @@ eth_dev_close(struct rte_eth_dev *dev)
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
+
+ rte_free(dev->data->mac_addrs);
free(internal->dev_name);
free(internal->iface_name);
rte_free(internal);
+
+ dev->data->dev_private = NULL;
}
static int
@@ -865,7 +880,6 @@ eth_dev_info(struct rte_eth_dev *dev,
return;
}
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = internal->max_queues;
@@ -943,35 +957,20 @@ eth_queue_release(void *q)
}
static int
-eth_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
+eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
{
+ /*
+ * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
+ * and releases mbuf, so nothing to cleanup.
+ */
return 0;
}
-/**
- * Disable features in feature_mask. Returns 0 on success.
- */
-int
-rte_eth_vhost_feature_disable(uint64_t feature_mask)
-{
- return rte_vhost_feature_disable(feature_mask);
-}
-
-/**
- * Enable features in feature_mask. Returns 0 on success.
- */
-int
-rte_eth_vhost_feature_enable(uint64_t feature_mask)
-{
- return rte_vhost_feature_enable(feature_mask);
-}
-
-/* Returns currently supported vhost features */
-uint64_t
-rte_eth_vhost_feature_get(void)
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
{
- return rte_vhost_feature_get();
+ return 0;
}
static const struct eth_dev_ops ops = {
@@ -984,6 +983,7 @@ static const struct eth_dev_ops ops = {
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
+ .tx_done_cleanup = eth_tx_done_cleanup,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -992,10 +992,13 @@ static const struct eth_dev_ops ops = {
.xstats_get_names = vhost_dev_xstats_get_names,
};
+static struct rte_vdev_driver pmd_vhost_drv;
+
static int
-eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
- const unsigned numa_node, uint64_t flags)
+eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
+ int16_t queues, const unsigned int numa_node, uint64_t flags)
{
+ const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev_data *data = NULL;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -1006,23 +1009,19 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
+ /* now do all data allocation - for eth_dev structure and internal
+ * (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
goto error;
- internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
- if (internal == NULL)
- goto error;
-
list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
if (list == NULL)
goto error;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
if (eth_dev == NULL)
goto error;
@@ -1037,14 +1036,12 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
if (vring_state == NULL)
goto error;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
/* now put it all together
* - store queue data in internal,
- * - store numa_node info in ethdev data
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
+ internal = eth_dev->data->dev_private;
internal->dev_name = strdup(name);
if (internal->dev_name == NULL)
goto error;
@@ -1060,26 +1057,21 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- data->dev_private = internal;
- data->port_id = eth_dev->data->port_id;
- memmove(data->name, eth_dev->data->name, sizeof(data->name));
+ /* We'll replace the 'data' originally allocated by eth_dev. So the
+ * vhost PMD resources won't be shared between multi processes.
+ */
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ eth_dev->data = data;
+
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
-
- /* We'll replace the 'data' originally allocated by eth_dev. So the
- * vhost PMD resources won't be shared between multi processes.
- */
- eth_dev->data = data;
- eth_dev->dev_ops = &ops;
- eth_dev->driver = NULL;
data->dev_flags =
RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = internal->dev_name;
- data->numa_node = numa_node;
+
+ eth_dev->dev_ops = &ops;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_vhost_rx;
@@ -1088,17 +1080,24 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
if (rte_vhost_driver_register(iface_name, flags))
goto error;
- /* We need only one message handling thread */
- if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
- if (vhost_driver_session_start())
- goto error;
+ if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
+ RTE_LOG(ERR, PMD, "Can't register callbacks\n");
+ goto error;
+ }
+
+ if (rte_vhost_driver_start(iface_name) < 0) {
+ RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
+ iface_name);
+ goto error;
}
return data->port_id;
error:
- if (internal)
+ if (internal) {
+ free(internal->iface_name);
free(internal->dev_name);
+ }
rte_free(vring_state);
rte_free(eth_addr);
if (eth_dev)
@@ -1139,7 +1138,7 @@ open_int(const char *key __rte_unused, const char *value, void *extra_args)
}
static int
-rte_pmd_vhost_probe(const char *name, const char *params)
+rte_pmd_vhost_probe(struct rte_vdev_device *dev)
{
struct rte_kvargs *kvlist = NULL;
int ret = 0;
@@ -1149,9 +1148,10 @@ rte_pmd_vhost_probe(const char *name, const char *params)
int client_mode = 0;
int dequeue_zero_copy = 0;
- RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
+ RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
+ rte_vdev_device_name(dev));
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -1194,7 +1194,11 @@ rte_pmd_vhost_probe(const char *name, const char *params)
flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
}
- eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
+ flags);
out_free:
rte_kvargs_free(kvlist);
@@ -1202,11 +1206,12 @@ out_free:
}
static int
-rte_pmd_vhost_remove(const char *name)
+rte_pmd_vhost_remove(struct rte_vdev_device *dev)
{
+ const char *name;
struct rte_eth_dev *eth_dev = NULL;
- unsigned int i;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
/* find an ethdev entry */
@@ -1214,22 +1219,11 @@ rte_pmd_vhost_remove(const char *name)
if (eth_dev == NULL)
return -ENODEV;
- eth_dev_stop(eth_dev);
-
eth_dev_close(eth_dev);
- if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
- vhost_driver_session_stop();
-
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
- rte_free(eth_dev->data->rx_queues[i]);
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- rte_free(eth_dev->data->tx_queues[i]);
-
- rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index 7c98b1ae..39ca7719 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -41,37 +41,7 @@ extern "C" {
#include <stdint.h>
#include <stdbool.h>
-#include <rte_virtio_net.h>
-
-/**
- * Disable features in feature_mask.
- *
- * @param feature_mask
- * Vhost features defined in "linux/virtio_net.h".
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int rte_eth_vhost_feature_disable(uint64_t feature_mask);
-
-/**
- * Enable features in feature_mask.
- *
- * @param feature_mask
- * Vhost features defined in "linux/virtio_net.h".
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int rte_eth_vhost_feature_enable(uint64_t feature_mask);
-
-/**
- * Returns currently supported vhost features.
- *
- * @return
- * Vhost features defined in "linux/virtio_net.h".
- */
-uint64_t rte_eth_vhost_feature_get(void);
+#include <rte_vhost.h>
/*
* Event description.
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 3d44083f..695db857 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -1,9 +1,6 @@
DPDK_16.04 {
global:
- rte_eth_vhost_feature_disable;
- rte_eth_vhost_feature_enable;
- rte_eth_vhost_feature_get;
rte_eth_vhost_get_queue_event;
local: *;
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index 97972a6c..b21b8781 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -60,14 +60,10 @@ endif
ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_user.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel_tap.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/virtio_user_dev.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user_ethdev.c
endif
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f5961ab7..983b95f1 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -38,6 +38,7 @@
#include <unistd.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
@@ -86,7 +87,7 @@ static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void virtio_mac_addr_add(struct rte_eth_dev *dev,
+static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
@@ -485,11 +486,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
hw->cvq = cvq;
}
- /* For virtio_user case (that is when dev->pci_dev is NULL), we use
+ /* For virtio_user case (that is when hw->dev is NULL), we use
* virtual address. And we need properly set _offset_, please see
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
- if (dev->pci_dev)
+ if (!hw->virtio_user_dev)
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
@@ -545,6 +546,9 @@ virtio_free_queues(struct virtio_hw *hw)
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
@@ -563,9 +567,11 @@ virtio_free_queues(struct virtio_hw *hw)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
@@ -593,16 +599,29 @@ virtio_alloc_queues(struct rte_eth_dev *dev)
return 0;
}
+static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
+
static void
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
+ VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
+ if (intr_conf->rxq)
+ virtio_queues_unbind_intr(dev);
+
+ if (intr_conf->lsc || intr_conf->rxq) {
+ rte_intr_disable(dev->intr_handle);
+ rte_intr_efd_disable(dev->intr_handle);
+ rte_free(dev->intr_handle->intr_vec);
+ dev->intr_handle->intr_vec = NULL;
+ }
+
vtpci_reset(hw);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
@@ -617,7 +636,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -640,7 +659,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -663,7 +682,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -686,7 +705,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -708,15 +727,38 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
hw->vtnet_hdr_size;
uint32_t frame_size = mtu + ether_hdr_len;
+ uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
+
+ max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
- if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
- PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
- ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
+ if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
+ ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
return 0;
}
+static int
+virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_enable_intr(vq);
+ return 0;
+}
+
+static int
+virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}
+
/*
* dev_ops for virtio, bare necessities for basic operation
*/
@@ -738,7 +780,10 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
.rx_queue_setup = virtio_dev_rx_queue_setup,
+ .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
+ .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
@@ -980,7 +1025,7 @@ virtio_get_hwaddr(struct virtio_hw *hw)
}
}
-static void
+static int
virtio_mac_table_set(struct virtio_hw *hw,
const struct virtio_net_ctrl_mac *uc,
const struct virtio_net_ctrl_mac *mc)
@@ -990,7 +1035,7 @@ virtio_mac_table_set(struct virtio_hw *hw,
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
PMD_DRV_LOG(INFO, "host does not support mac table");
- return;
+ return -1;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
@@ -1005,9 +1050,10 @@ virtio_mac_table_set(struct virtio_hw *hw,
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
if (err != 0)
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+ return err;
}
-static void
+static int
virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused)
{
@@ -1018,7 +1064,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
if (index >= VIRTIO_MAX_MAC_ADDRS) {
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
- return;
+ return -EINVAL;
}
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
@@ -1035,7 +1081,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
}
- virtio_mac_table_set(hw, uc, mc);
+ return virtio_mac_table_set(hw, uc, mc);
}
static void
@@ -1122,6 +1168,18 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
+ /* If supported, ensure MTU value is valid before acknowledging it. */
+ if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
+ struct virtio_net_config config;
+
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config.mtu, sizeof(config.mtu));
+
+ if (config.mtu < ETHER_MIN_MTU)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ }
+
/*
* Negotiate features: Subset of device feature bits are written back
* guest feature bits.
@@ -1154,9 +1212,8 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
* Process Virtio Config changed interrupt and call the callback
* if link state changed.
*/
-static void
-virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+void
+virtio_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = param;
struct virtio_hw *hw = dev->data->dev_private;
@@ -1166,7 +1223,7 @@ virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
+ if (rte_intr_enable(dev->intr_handle) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
@@ -1187,6 +1244,95 @@ rx_func_get(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int
+virtio_queues_bind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ dev->intr_handle->intr_vec[i] = i + 1;
+ if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set queue vector");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void
+virtio_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
+ VTPCI_OPS(hw)->set_queue_irq(hw,
+ hw->vqs[i * VTNET_CQ],
+ VIRTIO_MSI_NO_VECTOR);
+}
+
+static int
+virtio_configure_intr(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (!rte_intr_cap_multiple(dev->intr_handle)) {
+ PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
+ return -ENOTSUP;
+ }
+
+ if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "Fail to create eventfd");
+ return -1;
+ }
+
+ if (!dev->intr_handle->intr_vec) {
+ dev->intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->max_queue_pairs * sizeof(int), 0);
+ if (!dev->intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
+ hw->max_queue_pairs);
+ return -ENOMEM;
+ }
+ }
+
+ /* Re-register callback to update max_intr */
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
+ /* DO NOT try to remove this! This function will enable msix, or QEMU
+ * will encounter SIGSEGV when DRIVER_OK is sent.
+ * And for legacy devices, this should be done before queue/vec binding
+ * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
+ * (22) will be ignored.
+ */
+ if (rte_intr_enable(dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+
+ if (virtio_queues_bind_intr(dev) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
+ return -1;
+ }
+
+ return 0;
+}
+
/* reset device and renegotiate features if needed */
static int
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
@@ -1194,7 +1340,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = NULL;
int ret;
/* Reset the device although not necessary at startup */
@@ -1208,13 +1354,17 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
+ if (!hw->virtio_user_dev) {
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ }
- /* If host does not support status then disable LSC */
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- else
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rx_func_get(eth_dev);
@@ -1264,6 +1414,32 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->max_queue_pairs = config->max_virtqueue_pairs;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config->mtu,
+ sizeof(config->mtu));
+
+ /*
+ * MTU value has already been checked at negotiation
+ * time, but check again in case it has changed since
+ * then, which should not happen.
+ */
+ if (config->mtu < ETHER_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
+ config->mtu);
+ return -1;
+ }
+
+ hw->max_mtu = config->mtu;
+ /* Set initial MTU to maximum one supported by vhost */
+ eth_dev->data->mtu = config->mtu;
+
+ } else {
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
+ }
+
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
config->max_virtqueue_pairs);
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
@@ -1280,6 +1456,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
ret = virtio_alloc_queues(eth_dev);
if (ret < 0)
return ret;
+
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (virtio_configure_intr(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "failed to configure interrupt");
+ return -1;
+ }
+ }
+
vtpci_reinit_complete(hw);
if (pci_dev)
@@ -1301,7 +1485,7 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
if (hw->modern) {
/*
* We don't have to re-parse the PCI config space, since
- * rte_eal_pci_map_device() makes sure the mapped address
+ * rte_pci_map_device() makes sure the mapped address
* in secondary process would equal to the one mapped in
* the primary process: error will be returned if that
* requirement is not met.
@@ -1310,12 +1494,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
* (such as dev_cfg, common_cfg, etc.) parsed from the
* primary process, which is stored in shared memory.
*/
- if (rte_eal_pci_map_device(pci_dev)) {
+ if (rte_pci_map_device(pci_dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
} else {
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
return -1;
}
@@ -1344,8 +1528,6 @@ int
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev;
- uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
int ret;
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
@@ -1355,7 +1537,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(eth_dev->pci_dev, hw);
+ ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
+ hw);
if (ret)
return ret;
}
@@ -1379,17 +1562,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
- pci_dev = eth_dev->pci_dev;
hw->port_id = eth_dev->data->port_id;
-
- if (pci_dev) {
- ret = vtpci_init(pci_dev, hw, &dev_flags);
+ /* For virtio_user case the hw->virtio_user_dev is populated by
+ * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
+ */
+ if (!hw->virtio_user_dev) {
+ ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
if (ret)
return ret;
}
- eth_dev->data->dev_flags = dev_flags;
-
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@@ -1397,7 +1579,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
return 0;
@@ -1406,8 +1588,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
-
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
@@ -1415,7 +1595,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -1426,29 +1605,37 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
/* reset interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_unregister(&pci_dev->intr_handle,
+ rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
- rte_eal_pci_unmap_device(pci_dev);
+ if (eth_dev->device)
+ rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
return 0;
}
-static struct eth_driver rte_virtio_pmd = {
- .pci_drv = {
- .driver = {
- .name = "net_virtio",
- },
- .id_table = pci_id_virtio_map,
- .drv_flags = RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
+static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
+ eth_virtio_dev_init);
+}
+
+static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+}
+
+static struct rte_pci_driver rte_virtio_pmd = {
+ .driver = {
+ .name = "net_virtio",
},
- .eth_dev_init = eth_virtio_dev_init,
- .eth_dev_uninit = eth_virtio_dev_uninit,
- .dev_private_size = sizeof(struct virtio_hw),
+ .id_table = pci_id_virtio_map,
+ .drv_flags = 0,
+ .probe = eth_virtio_pci_probe,
+ .remove = eth_virtio_pci_remove,
};
RTE_INIT(rte_virtio_pmd_init);
@@ -1460,7 +1647,7 @@ rte_virtio_pmd_init(void)
return;
}
- rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
+ rte_pci_register(&rte_virtio_pmd);
}
/*
@@ -1520,7 +1707,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
+ /* Enable vector (0) for Link State Intrerrupt */
+ if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
+ VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
return -EBUSY;
}
@@ -1543,16 +1732,22 @@ virtio_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "link status not supported by host");
return -ENOTSUP;
}
+ }
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
+ /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ * in device configure, but it could be unmapped when device is
+ * stopped.
+ */
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rxq) {
+ rte_intr_disable(dev->intr_handle);
+
+ if (rte_intr_enable(dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
@@ -1582,6 +1777,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
@@ -1636,13 +1836,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
- if (dev->data->dev_conf.intr_conf.lsc)
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ if (intr_conf->lsc || intr_conf->rxq)
+ rte_intr_disable(dev->intr_handle);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
}
@@ -1659,7 +1862,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = SPEED_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
@@ -1684,13 +1889,12 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- uint64_t tso_mask;
+ uint64_t tso_mask, host_features;
struct virtio_hw *hw = dev->data->dev_private;
- if (dev->pci_dev)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
- else
- dev_info->driver_name = "virtio_user PMD";
+ dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+
+ dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -1701,18 +1905,25 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
};
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ dev_info->rx_offload_capa = 0;
+ if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+ }
+ tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
-
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((hw->guest_features & tso_mask) == tso_mask)
@@ -1732,3 +1943,4 @@ __rte_unused uint8_t is_rx)
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 4feccf93..c3413c6d 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -51,7 +51,7 @@
#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MAX_MAC_ADDRS 64
#define VIRTIO_MIN_RX_BUFSIZE 64
-#define VIRTIO_MAX_RX_PKTLEN 9728
+#define VIRTIO_MAX_RX_PKTLEN 9728U
/* Features desired/implemented by this driver. */
#define VIRTIO_PMD_DEFAULT_GUEST_FEATURES \
@@ -66,6 +66,7 @@
1u << VIRTIO_NET_F_HOST_TSO4 | \
1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
+ 1u << VIRTIO_NET_F_MTU | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
@@ -83,6 +84,9 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev);
/*
* RX/TX function prototypes
*/
+
+int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
+
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -109,4 +113,6 @@ uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
+void virtio_interrupt_handler(void *param);
+
#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 8d5355c7..b7b3d615 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -37,6 +37,8 @@
#include <fcntl.h>
#endif
+#include <rte_io.h>
+
#include "virtio_pci.h"
#include "virtio_logs.h"
#include "virtqueue.h"
@@ -48,6 +50,7 @@
*/
#define PCI_CAPABILITY_LIST 0x34
#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
/*
* The remaining space is defined by each driver as the per-driver
@@ -92,17 +95,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
while (length > 0) {
if (length >= 4) {
size = 4;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
} else if (length >= 2) {
size = 2;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
} else {
size = 1;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -111,8 +114,8 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
- VIRTIO_PCI_CONFIG(hw) + offset);
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -131,16 +134,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
if (length >= 4) {
size = 4;
tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else if (length >= 2) {
size = 2;
tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else {
size = 1;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), src, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -149,8 +152,8 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
- VIRTIO_PCI_CONFIG(hw) + offset);
+ rte_pci_ioport_write(VTPCI_IO(hw), src, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -159,8 +162,7 @@ legacy_get_features(struct virtio_hw *hw)
{
uint32_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
- VIRTIO_PCI_HOST_FEATURES);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
return dst;
}
@@ -172,8 +174,8 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features)
"only 32 bit features are allowed for legacy virtio!");
return;
}
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
- VIRTIO_PCI_GUEST_FEATURES);
+ rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
+ VIRTIO_PCI_GUEST_FEATURES);
}
static uint8_t
@@ -181,14 +183,14 @@ legacy_get_status(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
return dst;
}
static void
legacy_set_status(struct virtio_hw *hw, uint8_t status)
{
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
+ rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
}
static void
@@ -202,7 +204,7 @@ legacy_get_isr(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
return dst;
}
@@ -212,10 +214,20 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
uint16_t dst;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
- VIRTIO_MSI_CONFIG_VECTOR);
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
- VIRTIO_MSI_CONFIG_VECTOR);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ return dst;
+}
+
+static uint16_t
+legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ uint16_t dst;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
return dst;
}
@@ -224,9 +236,8 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
{
uint16_t dst;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
- VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
+ rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
return dst;
}
@@ -238,10 +249,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (!check_vq_phys_addr_ok(vq))
return -1;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
return 0;
}
@@ -251,57 +262,16 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t src = 0;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}
static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_NOTIFY);
-}
-
-#ifdef RTE_EXEC_ENV_LINUXAPP
-static int
-legacy_virtio_has_msix(const struct rte_pci_addr *loc)
-{
- DIR *d;
- char dirname[PATH_MAX];
-
- snprintf(dirname, sizeof(dirname),
- "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(),
- loc->domain, loc->bus, loc->devid, loc->function);
-
- d = opendir(dirname);
- if (d)
- closedir(d);
-
- return d != NULL;
-}
-#else
-static int
-legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
-{
- /* nic_uio does not enable interrupts, return 0 (false). */
- return 0;
-}
-#endif
-
-static int
-legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
- struct virtio_hw *hw, uint32_t *dev_flags)
-{
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
- return -1;
-
- if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
- *dev_flags |= RTE_ETH_DEV_INTR_LSC;
- else
- *dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
-
- return 0;
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_NOTIFY);
}
const struct virtio_pci_ops legacy_ops = {
@@ -314,54 +284,18 @@ const struct virtio_pci_ops legacy_ops = {
.set_features = legacy_set_features,
.get_isr = legacy_get_isr,
.set_config_irq = legacy_set_config_irq,
+ .set_queue_irq = legacy_set_queue_irq,
.get_queue_num = legacy_get_queue_num,
.setup_queue = legacy_setup_queue,
.del_queue = legacy_del_queue,
.notify_queue = legacy_notify_queue,
};
-
-static inline uint8_t
-io_read8(uint8_t *addr)
-{
- return *(volatile uint8_t *)addr;
-}
-
-static inline void
-io_write8(uint8_t val, uint8_t *addr)
-{
- *(volatile uint8_t *)addr = val;
-}
-
-static inline uint16_t
-io_read16(uint16_t *addr)
-{
- return *(volatile uint16_t *)addr;
-}
-
-static inline void
-io_write16(uint16_t val, uint16_t *addr)
-{
- *(volatile uint16_t *)addr = val;
-}
-
-static inline uint32_t
-io_read32(uint32_t *addr)
-{
- return *(volatile uint32_t *)addr;
-}
-
-static inline void
-io_write32(uint32_t val, uint32_t *addr)
-{
- *(volatile uint32_t *)addr = val;
-}
-
static inline void
io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
{
- io_write32(val & ((1ULL << 32) - 1), lo);
- io_write32(val >> 32, hi);
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
}
static void
@@ -373,13 +307,13 @@ modern_read_dev_config(struct virtio_hw *hw, size_t offset,
uint8_t old_gen, new_gen;
do {
- old_gen = io_read8(&hw->common_cfg->config_generation);
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
- *p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i);
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
- new_gen = io_read8(&hw->common_cfg->config_generation);
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
@@ -391,7 +325,7 @@ modern_write_dev_config(struct virtio_hw *hw, size_t offset,
const uint8_t *p = src;
for (i = 0; i < length; i++)
- io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i);
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
}
static uint64_t
@@ -399,11 +333,11 @@ modern_get_features(struct virtio_hw *hw)
{
uint32_t features_lo, features_hi;
- io_write32(0, &hw->common_cfg->device_feature_select);
- features_lo = io_read32(&hw->common_cfg->device_feature);
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
- io_write32(1, &hw->common_cfg->device_feature_select);
- features_hi = io_read32(&hw->common_cfg->device_feature);
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
return ((uint64_t)features_hi << 32) | features_lo;
}
@@ -411,25 +345,25 @@ modern_get_features(struct virtio_hw *hw)
static void
modern_set_features(struct virtio_hw *hw, uint64_t features)
{
- io_write32(0, &hw->common_cfg->guest_feature_select);
- io_write32(features & ((1ULL << 32) - 1),
- &hw->common_cfg->guest_feature);
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
- io_write32(1, &hw->common_cfg->guest_feature_select);
- io_write32(features >> 32,
- &hw->common_cfg->guest_feature);
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
}
static uint8_t
modern_get_status(struct virtio_hw *hw)
{
- return io_read8(&hw->common_cfg->device_status);
+ return rte_read8(&hw->common_cfg->device_status);
}
static void
modern_set_status(struct virtio_hw *hw, uint8_t status)
{
- io_write8(status, &hw->common_cfg->device_status);
+ rte_write8(status, &hw->common_cfg->device_status);
}
static void
@@ -442,21 +376,29 @@ modern_reset(struct virtio_hw *hw)
static uint8_t
modern_get_isr(struct virtio_hw *hw)
{
- return io_read8(hw->isr);
+ return rte_read8(hw->isr);
}
static uint16_t
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
- io_write16(vec, &hw->common_cfg->msix_config);
- return io_read16(&hw->common_cfg->msix_config);
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
}
static uint16_t
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
{
- io_write16(queue_id, &hw->common_cfg->queue_select);
- return io_read16(&hw->common_cfg->queue_size);
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
}
static int
@@ -474,7 +416,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
ring[vq->vq_nentries]),
VIRTIO_PCI_VRING_ALIGN);
- io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -483,11 +425,11 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
&hw->common_cfg->queue_used_hi);
- notify_off = io_read16(&hw->common_cfg->queue_notify_off);
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
notify_off * hw->notify_off_multiplier);
- io_write16(1, &hw->common_cfg->queue_enable);
+ rte_write16(1, &hw->common_cfg->queue_enable);
PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
@@ -502,7 +444,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -511,13 +453,13 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
&hw->common_cfg->queue_used_hi);
- io_write16(0, &hw->common_cfg->queue_enable);
+ rte_write16(0, &hw->common_cfg->queue_enable);
}
static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
{
- io_write16(1, vq->notify_addr);
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
}
const struct virtio_pci_ops modern_ops = {
@@ -530,6 +472,7 @@ const struct virtio_pci_ops modern_ops = {
.set_features = modern_set_features,
.get_isr = modern_get_isr,
.set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
.get_queue_num = modern_get_queue_num,
.setup_queue = modern_setup_queue,
.del_queue = modern_del_queue,
@@ -601,14 +544,6 @@ vtpci_isr(struct virtio_hw *hw)
return VTPCI_OPS(hw)->get_isr(hw);
}
-
-/* Enable one vector (0) for Link State Intrerrupt */
-uint16_t
-vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
-{
- return VTPCI_OPS(hw)->set_config_irq(hw, vec);
-}
-
static void *
get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
{
@@ -651,25 +586,28 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
struct virtio_pci_cap cap;
int ret;
- if (rte_eal_pci_map_device(dev)) {
+ if (rte_pci_map_device(dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
- ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
if (ret < 0) {
PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
return -1;
}
while (pos) {
- ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
if (ret < 0) {
PMD_INIT_LOG(ERR,
"failed to read pci cap at pos: %x", pos);
break;
}
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX)
+ hw->use_msix = 1;
+
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
"[%2x] skipping non VNDR cap id: %02x",
@@ -686,8 +624,8 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
hw->common_cfg = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
- rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
- 4, pos + sizeof(cap));
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
hw->notify_base = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
@@ -728,11 +666,8 @@ next:
* Return 0 on success.
*/
int
-vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
- uint32_t *dev_flags)
+vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
{
- hw->dev = dev;
-
/*
* Try if we can succeed reading virtio pci caps, which exists
* only on modern pci device. If failed, we fallback to legacy
@@ -742,12 +677,11 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
hw->modern = 1;
- *dev_flags |= RTE_ETH_DEV_INTR_LSC;
return 0;
}
PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
- if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
+ if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
(!dev->device.devargs ||
dev->device.devargs->type !=
@@ -760,7 +694,6 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
}
virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
- hw->use_msix = legacy_virtio_has_msix(&dev->addr);
hw->modern = 0;
return 0;
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 511a1c87..18caebdd 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -106,6 +106,7 @@ struct virtnet_ctl;
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -160,7 +161,8 @@ struct virtnet_ctl;
/*
* Maximum number of virtqueues per device.
*/
-#define VIRTIO_MAX_VIRTQUEUES 8
+#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
+#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
/* Common configuration */
#define VIRTIO_PCI_CAP_COMMON_CFG 1
@@ -235,6 +237,9 @@ struct virtio_pci_ops {
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
+ uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
+ uint16_t vec);
+
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
@@ -248,6 +253,8 @@ struct virtio_hw {
uint64_t req_guest_features;
uint64_t guest_features;
uint32_t max_queue_pairs;
+ uint16_t started;
+ uint16_t max_mtu;
uint16_t vtnet_hdr_size;
uint8_t vlan_strip;
uint8_t use_msix;
@@ -258,7 +265,6 @@ struct virtio_hw {
uint32_t notify_off_multiplier;
uint8_t *isr;
uint16_t *notify_base;
- struct rte_pci_device *dev;
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
void *virtio_user_dev;
@@ -294,6 +300,7 @@ struct virtio_net_config {
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
uint16_t status;
uint16_t max_virtqueue_pairs;
+ uint16_t mtu;
} __attribute__((packed));
/*
@@ -314,8 +321,7 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
/*
* Function declaration from virtio_pci.c
*/
-int vtpci_init(struct rte_pci_device *, struct virtio_hw *,
- uint32_t *dev_flags);
+int vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw);
void vtpci_reset(struct virtio_hw *);
void vtpci_reinit_complete(struct virtio_hw *);
@@ -331,8 +337,6 @@ void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
uint8_t vtpci_isr(struct virtio_hw *);
-uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t);
-
extern const struct virtio_pci_ops legacy_ops;
extern const struct virtio_pci_ops modern_ops;
extern const struct virtio_pci_ops virtio_user_ops;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a33ef1a8..fbc96dfb 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -72,6 +72,15 @@
#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
+int
+virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
+{
+ struct virtnet_rx *rxvq = rxq;
+ struct virtqueue *vq = rxvq->vq;
+
+ return VIRTQUEUE_NUSED(vq) >= offset;
+}
+
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
@@ -124,7 +133,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
if (unlikely(cookie == NULL)) {
- PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
vq->vq_used_cons_idx);
break;
}
@@ -716,7 +725,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
- struct virtio_hw *hw;
+ struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
@@ -727,6 +736,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int offload;
struct virtio_net_hdr *hdr;
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
@@ -739,8 +752,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
- hw = vq->hw;
- nb_rx = 0;
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
offload = rx_offload_enabled(hw);
@@ -763,8 +774,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
- rxm->nb_segs = 1;
- rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
@@ -784,7 +793,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_pkts[nb_rx++] = rxm;
- rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
+ rxvq->stats.bytes += rxm->pkt_len;
virtio_update_packet_stats(&rxvq->stats, rxm);
}
@@ -827,7 +836,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
- struct virtio_hw *hw;
+ struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
@@ -841,14 +850,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint32_t hdr_size;
int offload;
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
- hw = vq->hw;
- nb_rx = 0;
i = 0;
nb_enqueued = 0;
seg_num = 0;
@@ -891,7 +902,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = seg_num;
- rxm->next = NULL;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
@@ -936,7 +946,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm = rcv_pkts[extra_idx];
rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[extra_idx]);
rxm->data_len = (uint16_t)(len[extra_idx]);
@@ -1000,9 +1009,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_tx;
+ uint16_t nb_used, nb_tx = 0;
int error;
+ if (unlikely(hw->started == 0))
+ return nb_tx;
+
if (unlikely(nb_pkts < 1))
return nb_pkts;
@@ -1027,7 +1039,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* optimize ring usage */
- if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index b651e53b..542cf805 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -89,12 +89,17 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct virtnet_tx *txvq = tx_queue;
struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_desc *start_dp;
uint16_t nb_tail, nb_commit;
int i;
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
+ uint16_t nb_tx = 0;
+
+ if (unlikely(hw->started == 0))
+ return nb_tx;
nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index b08f8594..f531c542 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -98,13 +98,13 @@ virtio_xmit_cleanup(struct virtqueue *vq)
desc_idx = (uint16_t)(vq->vq_used_cons_idx &
((vq->vq_nentries >> 1) - 1));
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -123,7 +123,7 @@ virtio_xmit_cleanup(struct virtqueue *vq)
} else {
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 793eefbe..ecc62ada 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -72,12 +72,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_used_elem *rused;
struct rte_mbuf **sw_ring;
struct rte_mbuf **sw_ring_end;
- uint16_t nb_pkts_received;
+ uint16_t nb_pkts_received = 0;
uint8x16_t shuf_msk1 = {
0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
@@ -106,6 +107,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0, 0
};
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 87bb5c63..7cf0f8b8 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -74,12 +74,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_used_elem *rused;
struct rte_mbuf **sw_ring;
struct rte_mbuf **sw_ring_end;
- uint16_t nb_pkts_received;
+ uint16_t nb_pkts_received = 0;
__m128i shuf_msk1, shuf_msk2, len_adjust;
shuf_msk1 = _mm_set_epi8(
@@ -109,6 +110,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0, (uint16_t)-vq->hw->vtnet_hdr_size,
0, 0);
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 7adb55f5..5c983bd4 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -42,8 +42,6 @@
#include "../virtio_logs.h"
#include "../virtqueue.h"
-#define VHOST_MEMORY_MAX_NREGIONS 8
-
struct vhost_vring_state {
unsigned int index;
unsigned int num;
@@ -98,6 +96,8 @@ enum vhost_user_request {
VHOST_USER_MAX
};
+const char * const vhost_msg_strings[VHOST_USER_MAX];
+
struct vhost_memory_region {
uint64_t guest_phys_addr;
uint64_t memory_size; /* bytes */
@@ -105,42 +105,19 @@ struct vhost_memory_region {
uint64_t mmap_offset;
};
-struct vhost_memory {
- uint32_t nregions;
- uint32_t padding;
- struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
-};
-
-struct vhost_user_msg {
- enum vhost_user_request request;
+struct virtio_user_dev;
-#define VHOST_USER_VERSION_MASK 0x3
-#define VHOST_USER_REPLY_MASK (0x1 << 2)
- uint32_t flags;
- uint32_t size; /* the following payload size */
- union {
-#define VHOST_USER_VRING_IDX_MASK 0xff
-#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
- uint64_t u64;
- struct vhost_vring_state state;
- struct vhost_vring_addr addr;
- struct vhost_memory memory;
- } payload;
- int fds[VHOST_MEMORY_MAX_NREGIONS];
-} __attribute((packed));
-
-#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
-#define VHOST_USER_PAYLOAD_SIZE \
- (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
-
-/* The version of the protocol we support */
-#define VHOST_USER_VERSION 0x1
-
-#define VHOST_USER_F_PROTOCOL_FEATURES 30
-#define VHOST_USER_MQ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+struct virtio_user_backend_ops {
+ int (*setup)(struct virtio_user_dev *dev);
+ int (*send_request)(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg);
+ int (*enable_qp)(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable);
+};
-int vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg);
-int vhost_user_setup(const char *path);
-int vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable);
+struct virtio_user_backend_ops ops_user;
+struct virtio_user_backend_ops ops_kernel;
#endif
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
new file mode 100644
index 00000000..68d28b13
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -0,0 +1,403 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+#include "vhost_kernel_tap.h"
+
+struct vhost_memory_kernel {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[0];
+};
+
+/* vhost kernel ioctls */
+#define VHOST_VIRTIO 0xAF
+#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
+#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
+#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel)
+#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
+#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
+#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
+#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
+#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
+#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
+
+static uint64_t max_regions = 64;
+
+static void
+get_vhost_kernel_max_regions(void)
+{
+ int fd;
+ char buf[20] = {'\0'};
+
+ fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY);
+ if (fd < 0)
+ return;
+
+ if (read(fd, buf, sizeof(buf) - 1) > 0)
+ max_regions = strtoull(buf, NULL, 10);
+
+ close(fd);
+}
+
+static uint64_t vhost_req_user_to_kernel[] = {
+ [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER,
+ [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
+ [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
+ [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
+ [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
+ [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
+ [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
+ [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
+ [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
+ [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
+ [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
+};
+
+/* By default, vhost kernel module allows 64 regions, but DPDK allows
+ * 256 segments. As a relief, below function merges those virtually
+ * adjacent memsegs into one region.
+ */
+static struct vhost_memory_kernel *
+prepare_vhost_memory_kernel(void)
+{
+ uint32_t i, j, k = 0;
+ struct rte_memseg *seg;
+ struct vhost_memory_region *mr;
+ struct vhost_memory_kernel *vm;
+
+ vm = malloc(sizeof(struct vhost_memory_kernel) +
+ max_regions *
+ sizeof(struct vhost_memory_region));
+ if (!vm)
+ return NULL;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
+ seg = &rte_eal_get_configuration()->mem_config->memseg[i];
+ if (!seg->addr)
+ break;
+
+ int new_region = 1;
+
+ for (j = 0; j < k; ++j) {
+ mr = &vm->regions[j];
+
+ if (mr->userspace_addr + mr->memory_size ==
+ (uint64_t)(uintptr_t)seg->addr) {
+ mr->memory_size += seg->len;
+ new_region = 0;
+ break;
+ }
+
+ if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
+ mr->userspace_addr) {
+ mr->guest_phys_addr =
+ (uint64_t)(uintptr_t)seg->addr;
+ mr->userspace_addr =
+ (uint64_t)(uintptr_t)seg->addr;
+ mr->memory_size += seg->len;
+ new_region = 0;
+ break;
+ }
+ }
+
+ if (new_region == 0)
+ continue;
+
+ mr = &vm->regions[k++];
+ /* use vaddr here! */
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
+ mr->memory_size = seg->len;
+ mr->mmap_offset = 0;
+
+ if (k >= max_regions) {
+ free(vm);
+ return NULL;
+ }
+ }
+
+ vm->nregions = k;
+ vm->padding = 0;
+ return vm;
+}
+
+/* with below features, vhost kernel does not need to do the checksum and TSO,
+ * these info will be passed to virtio_user through virtio net header.
+ */
+#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO))
+
+/* with below features, when flows from virtio_user to vhost kernel
+ * (1) if flows goes up through the kernel networking stack, it does not need
+ * to verify checksum, which can save CPU cycles;
+ * (2) if flows goes through a Linux bridge and outside from an interface
+ * (kernel driver), checksum and TSO will be done by GSO in kernel or even
+ * offloaded into real physical device.
+ */
+#define VHOST_KERNEL_HOST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_CSUM))
+
+static int
+tap_supporte_mq(void)
+{
+ int tapfd;
+ unsigned int tap_features;
+
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ close(tapfd);
+ return -1;
+ }
+
+ close(tapfd);
+ return tap_features & IFF_MULTI_QUEUE;
+}
+
+static int
+vhost_kernel_ioctl(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
+{
+ int ret = -1;
+ unsigned int i;
+ uint64_t req_kernel;
+ struct vhost_memory_kernel *vm = NULL;
+ int vhostfd;
+ unsigned int queue_sel;
+
+ PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+
+ req_kernel = vhost_req_user_to_kernel[req];
+
+ if (req_kernel == VHOST_SET_MEM_TABLE) {
+ vm = prepare_vhost_memory_kernel();
+ if (!vm)
+ return -1;
+ arg = (void *)vm;
+ }
+
+ if (req_kernel == VHOST_SET_FEATURES) {
+ /* We don't need memory protection here */
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
+
+ /* VHOST kernel does not know about below flags */
+ *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
+ }
+
+ switch (req_kernel) {
+ case VHOST_SET_VRING_NUM:
+ case VHOST_SET_VRING_ADDR:
+ case VHOST_SET_VRING_BASE:
+ case VHOST_GET_VRING_BASE:
+ case VHOST_SET_VRING_KICK:
+ case VHOST_SET_VRING_CALL:
+ queue_sel = *(unsigned int *)arg;
+ vhostfd = dev->vhostfds[queue_sel / 2];
+ *(unsigned int *)arg = queue_sel % 2;
+ PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
+ vhostfd, *(unsigned int *)arg);
+ break;
+ default:
+ vhostfd = -1;
+ }
+ if (vhostfd == -1) {
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ if (dev->vhostfds[i] < 0)
+ continue;
+
+ ret = ioctl(dev->vhostfds[i], req_kernel, arg);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = ioctl(vhostfd, req_kernel, arg);
+ }
+
+ if (!ret && req_kernel == VHOST_GET_FEATURES) {
+ /* with tap as the backend, all these features are supported
+ * but not claimed by vhost-net, so we add them back when
+ * reporting to upper layer.
+ */
+ *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ /* vhost_kernel will not declare this feature, but it does
+ * support multi-queue.
+ */
+ if (tap_supporte_mq())
+ *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
+ }
+
+ if (vm)
+ free(vm);
+
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "%s failed: %s",
+ vhost_msg_strings[req], strerror(errno));
+
+ return ret;
+}
+
+/**
+ * Set up environment to talk with a vhost kernel backend.
+ *
+ * @return
+ * - (-1) if fail to set up;
+ * - (>=0) if successful.
+ */
+static int
+vhost_kernel_setup(struct virtio_user_dev *dev)
+{
+ int vhostfd;
+ uint32_t i;
+
+ get_vhost_kernel_max_regions();
+
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ vhostfd = open(dev->path, O_RDWR);
+ if (vhostfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s, %s",
+ dev->path, strerror(errno));
+ return -1;
+ }
+
+ dev->vhostfds[i] = vhostfd;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_set_backend(int vhostfd, int tapfd)
+{
+ struct vhost_vring_file f;
+
+ f.fd = tapfd;
+ f.index = 0;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ f.index = 1;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
+{
+ int hdr_size;
+ int vhostfd;
+ int tapfd;
+ int req_mq = (dev->max_queue_pairs > 1);
+
+ vhostfd = dev->vhostfds[pair_idx];
+
+ if (!enable) {
+ if (dev->tapfds[pair_idx] >= 0) {
+ close(dev->tapfds[pair_idx]);
+ dev->tapfds[pair_idx] = -1;
+ }
+ return vhost_kernel_set_backend(vhostfd, -1);
+ } else if (dev->tapfds[pair_idx] >= 0) {
+ return 0;
+ }
+
+ if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
+ (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
+ hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ hdr_size = sizeof(struct virtio_net_hdr);
+
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
+ return -1;
+ }
+
+ if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
+ PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
+ close(tapfd);
+ return -1;
+ }
+
+ dev->tapfds[pair_idx] = tapfd;
+ return 0;
+}
+
+struct virtio_user_backend_ops ops_kernel = {
+ .setup = vhost_kernel_setup,
+ .send_request = vhost_kernel_ioctl,
+ .enable_qp = vhost_kernel_enable_queue_pair
+};
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
new file mode 100644
index 00000000..f585de8c
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -0,0 +1,133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <net/if.h>
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+
+#include "vhost_kernel_tap.h"
+#include "../virtio_logs.h"
+
+int
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+{
+ unsigned int tap_features;
+ int sndbuf = INT_MAX;
+ struct ifreq ifr;
+ int tapfd;
+ unsigned int offload =
+ TUN_F_CSUM |
+ TUN_F_TSO4 |
+ TUN_F_TSO6 |
+ TUN_F_TSO_ECN |
+ TUN_F_UFO;
+
+ /* TODO:
+ * 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len
+ * 2. get number of memory regions from vhost module parameter
+ * max_mem_regions, supported in newer version linux kernel
+ */
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ /* Construct ifr */
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ goto error;
+ }
+ if (tap_features & IFF_ONE_QUEUE)
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+
+ /* Let tap instead of vhost-net handle vnet header, as the latter does
+ * not support offloading. And in this case, we should not set feature
+ * bit VHOST_NET_F_VIRTIO_NET_HDR.
+ */
+ if (tap_features & IFF_VNET_HDR) {
+ ifr.ifr_flags |= IFF_VNET_HDR;
+ } else {
+ PMD_DRV_LOG(ERR, "TAP does not support IFF_VNET_HDR");
+ goto error;
+ }
+
+ if (req_mq)
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+
+ if (*p_ifname)
+ strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ);
+ else
+ strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ);
+ if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ fcntl(tapfd, F_SETFL, O_NONBLOCK);
+
+ if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETVNETHDRSZ failed: %s", strerror(errno));
+ goto error;
+ }
+
+ if (ioctl(tapfd, TUNSETSNDBUF, &sndbuf) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETSNDBUF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ /* TODO: before set the offload capabilities, we'd better (1) check
+ * negotiated features to see if necessary to offload; (2) query tap
+ * to see if it supports the offload capabilities.
+ */
+ if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0)
+ PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
+ strerror(errno));
+
+ if (!(*p_ifname))
+ *p_ifname = strdup(ifr.ifr_name);
+
+ return tapfd;
+error:
+ close(tapfd);
+ return -1;
+}
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
new file mode 100644
index 00000000..eae340cc
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -0,0 +1,67 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/ioctl.h>
+
+/* TUN ioctls */
+#define TUNSETIFF _IOW('T', 202, int)
+#define TUNGETFEATURES _IOR('T', 207, unsigned int)
+#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
+#define TUNGETIFF _IOR('T', 210, unsigned int)
+#define TUNSETSNDBUF _IOW('T', 212, int)
+#define TUNGETVNETHDRSZ _IOR('T', 215, int)
+#define TUNSETVNETHDRSZ _IOW('T', 216, int)
+#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNSETVNETBE _IOW('T', 222, int)
+
+/* TUNSETIFF ifr flags */
+#define IFF_TAP 0x0002
+#define IFF_NO_PI 0x1000
+#define IFF_ONE_QUEUE 0x2000
+#define IFF_VNET_HDR 0x4000
+#define IFF_MULTI_QUEUE 0x0100
+#define IFF_ATTACH_QUEUE 0x0200
+#define IFF_DETACH_QUEUE 0x0400
+
+/* Features for GSO (TUNSETOFFLOAD). */
+#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
+#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */
+#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */
+#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */
+#define TUN_F_UFO 0x10 /* I can handle UFO packets */
+
+/* Constants */
+#define PATH_NET_TUN "/dev/net/tun"
+
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 082e8217..4ad7b21b 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -41,6 +41,39 @@
#include <errno.h>
#include "vhost.h"
+#include "virtio_user_dev.h"
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION 0x1
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+struct vhost_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
+};
+
+struct vhost_user_msg {
+ enum vhost_user_request request;
+
+#define VHOST_USER_VERSION_MASK 0x3
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+#define VHOST_USER_VRING_IDX_MASK 0xff
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ struct vhost_memory memory;
+ } payload;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute((packed));
+
+#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
+#define VHOST_USER_PAYLOAD_SIZE \
+ (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
static int
vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num)
@@ -223,24 +256,25 @@ prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
static struct vhost_user_msg m;
-static const char * const vhost_msg_strings[] = {
- [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
- [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
- [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
- [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
- [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
- [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
- [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
- [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
- [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
- [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
- [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
- [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
- NULL,
+const char * const vhost_msg_strings[] = {
+ [VHOST_USER_SET_OWNER] = "VHOST_SET_OWNER",
+ [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
+ [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
+ [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
+ [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
+ [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
+ [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
+ [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
+ [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
+ [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
+ [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
+ [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
};
-int
-vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
+static int
+vhost_user_sock(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
{
struct vhost_user_msg msg;
struct vhost_vring_file *file = 0;
@@ -248,9 +282,9 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
int fds[VHOST_MEMORY_MAX_NREGIONS];
int fd_num = 0;
int i, len;
+ int vhostfd = dev->vhostfd;
RTE_SET_USED(m);
- RTE_SET_USED(vhost_msg_strings);
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
@@ -371,15 +405,13 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
/**
* Set up environment to talk with a vhost user backend.
- * @param path
- * - The path to vhost user unix socket file.
*
* @return
- * - (-1) if fail to set up;
- * - (>=0) if successful, and it is the fd to vhostfd.
+ * - (-1) if fail;
+ * - (0) if succeed.
*/
-int
-vhost_user_setup(const char *path)
+static int
+vhost_user_setup(struct virtio_user_dev *dev)
{
int fd;
int flag;
@@ -397,18 +429,21 @@ vhost_user_setup(const char *path)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
- snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
+ snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
close(fd);
return -1;
}
- return fd;
+ dev->vhostfd = fd;
+ return 0;
}
-int
-vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable)
+static int
+vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
{
int i;
@@ -418,10 +453,15 @@ vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable)
.num = enable,
};
- if (vhost_user_sock(vhostfd,
- VHOST_USER_SET_VRING_ENABLE, &state))
+ if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state))
return -1;
}
return 0;
}
+
+struct virtio_user_backend_ops ops_user = {
+ .setup = vhost_user_setup,
+ .send_request = vhost_user_sock,
+ .enable_qp = vhost_user_enable_queue_pair
+};
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index a38398b8..450404ba 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -39,6 +39,9 @@
#include <sys/mman.h>
#include <unistd.h>
#include <sys/eventfd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "vhost.h"
#include "virtio_user_dev.h"
@@ -51,21 +54,11 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
* firstly because vhost depends on this msg to allocate virtqueue
* pair.
*/
- int callfd;
struct vhost_vring_file file;
- /* May use invalid flag, but some backend leverages kickfd and callfd as
- * criteria to judge if dev is alive. so finally we use real event_fd.
- */
- callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (callfd < 0) {
- PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
- return -1;
- }
file.index = queue_sel;
- file.fd = callfd;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file);
- dev->callfds[queue_sel] = callfd;
+ file.fd = dev->callfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
return 0;
}
@@ -73,7 +66,6 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
static int
virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
- int kickfd;
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
@@ -88,26 +80,21 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
state.index = queue_sel;
state.num = vring->num;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
+ state.index = queue_sel;
state.num = 0; /* no reservation */
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_BASE, &state);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_ADDR, &addr);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
* lastly because vhost depends on this msg to judge if
* virtio is ready.
*/
- kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (kickfd < 0) {
- PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
- return -1;
- }
file.index = queue_sel;
- file.fd = kickfd;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file);
- dev->kickfds[queue_sel] = kickfd;
+ file.fd = dev->kickfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
return 0;
}
@@ -146,22 +133,20 @@ virtio_user_start_device(struct virtio_user_dev *dev)
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
goto error;
- /* Step 1: set features
- * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
- * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped.
- */
+ /* Step 1: set features */
features = dev->features;
- if (dev->max_queue_pairs > 1)
- features |= VHOST_USER_MQ;
+ /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
features &= ~(1ull << VIRTIO_NET_F_MAC);
+ /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
- ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
if (ret < 0)
goto error;
PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
/* Step 2: share memory regions */
- ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
if (ret < 0)
goto error;
@@ -172,7 +157,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
/* Step 4: enable queues
* we enable the 1st queue pair by default.
*/
- vhost_user_enable_queue_pair(dev->vhostfd, 0, 1);
+ dev->ops->enable_qp(dev, 0, 1);
return 0;
error:
@@ -184,13 +169,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
- for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
- close(dev->callfds[i]);
- close(dev->kickfds[i]);
- }
-
for (i = 0; i < dev->max_queue_pairs; ++i)
- vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+ dev->ops->enable_qp(dev, i, 0);
return 0;
}
@@ -217,35 +197,170 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
}
int
-virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac)
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+ uint32_t i, j;
+ int callfd;
+ int kickfd;
+
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
+ if (i >= dev->max_queue_pairs * 2) {
+ dev->kickfds[i] = -1;
+ dev->callfds[i] = -1;
+ continue;
+ }
+
+ /* May use invalid flag, but some backend uses kickfd and
+ * callfd as criteria to judge if dev is alive. so finally we
+ * use real event_fd.
+ */
+ callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (callfd < 0) {
+ PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
+ break;
+ }
+ kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (kickfd < 0) {
+ PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
+ break;
+ }
+ dev->callfds[i] = callfd;
+ dev->kickfds[i] = kickfd;
+ }
+
+ if (i < VIRTIO_MAX_VIRTQUEUES) {
+ for (j = 0; j <= i; ++j) {
+ close(dev->callfds[j]);
+ close(dev->kickfds[j]);
+ }
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
{
uint32_t i;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+ if (!eth_dev->intr_handle) {
+ eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
+ if (!eth_dev->intr_handle) {
+ PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
+ return -1;
+ }
+ memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
+ }
+
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ eth_dev->intr_handle->efds[i] = dev->callfds[i];
+ eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
+ eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
+ eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ if (dev->vhostfd >= 0)
+ eth_dev->intr_handle->fd = dev->vhostfd;
+
+ return 0;
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+ uint32_t q;
+
+ dev->vhostfd = -1;
+ dev->vhostfds = NULL;
+ dev->tapfds = NULL;
+
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
+ }
+
+ if (dev->ops->setup(dev) < 0)
+ return -1;
+
+ if (virtio_user_dev_init_notify(dev) < 0)
+ return -1;
+
+ if (virtio_user_fill_intr_handle(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Use below macro to filter features from vhost backend */
+#define VIRTIO_USER_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_NET_F_MAC | \
+ 1ULL << VIRTIO_NET_F_STATUS | \
+ 1ULL << VIRTIO_NET_F_MQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1ULL << VIRTIO_NET_F_CTRL_VQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_RX | \
+ 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
+ 1ULL << VIRTIO_NET_F_CSUM | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
+ 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_VERSION_1)
+
+int
+virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac, char **ifname)
+{
snprintf(dev->path, PATH_MAX, "%s", path);
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
parse_mac(dev, mac);
- dev->vhostfd = -1;
- for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
- dev->kickfds[i] = -1;
- dev->callfds[i] = -1;
+ if (*ifname) {
+ dev->ifname = *ifname;
+ *ifname = NULL;
}
- dev->vhostfd = vhost_user_setup(dev->path);
- if (dev->vhostfd < 0) {
+ if (virtio_user_dev_setup(dev) < 0) {
PMD_INIT_LOG(ERR, "backend set up fails");
return -1;
}
- if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
return -1;
}
- if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
&dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
return -1;
@@ -268,12 +383,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
- if (dev->max_queue_pairs > 1) {
- if (!(dev->features & VHOST_USER_MQ)) {
- PMD_INIT_LOG(ERR, "MQ not supported by the backend");
- return -1;
- }
- }
+ /* The backend will not report this feature, we add it explicitly */
+ if (is_vhost_user_by_type(dev->path))
+ dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
+
+ dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
return 0;
}
@@ -281,7 +395,25 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
void
virtio_user_dev_uninit(struct virtio_user_dev *dev)
{
+ uint32_t i;
+
+ virtio_user_stop_device(dev);
+
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ close(dev->callfds[i]);
+ close(dev->kickfds[i]);
+ }
+
close(dev->vhostfd);
+
+ if (dev->vhostfds) {
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ close(dev->vhostfds[i]);
+ free(dev->vhostfds);
+ free(dev->tapfds);
+ }
+
+ free(dev->ifname);
}
static uint8_t
@@ -297,9 +429,9 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
}
for (i = 0; i < q_pairs; ++i)
- ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1);
+ ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+ ret |= dev->ops->enable_qp(dev, i, 0);
dev->queue_pairs = q_pairs;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 28fc788e..8361b6bd 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -37,11 +37,20 @@
#include <limits.h>
#include "../virtio_pci.h"
#include "../virtio_ring.h"
+#include "vhost.h"
struct virtio_user_dev {
+ /* for vhost_user backend */
int vhostfd;
- int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
- int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+
+ /* for vhost_kernel backend */
+ char *ifname;
+ int *vhostfds;
+ int *tapfds;
+
+ /* for both vhost_user and vhost_kernel */
+ int callfds[VIRTIO_MAX_VIRTQUEUES];
+ int kickfds[VIRTIO_MAX_VIRTQUEUES];
int mac_specified;
uint32_t max_queue_pairs;
uint32_t queue_pairs;
@@ -51,15 +60,18 @@ struct virtio_user_dev {
*/
uint64_t device_features; /* supported features by device */
uint8_t status;
+ uint8_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct virtio_user_backend_ops *ops;
};
+int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac);
+ int cq, int queue_size, const char *mac, char **ifname);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 013600e4..280406c0 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -34,10 +34,15 @@
#include <stdint.h>
#include <sys/types.h>
#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
+#include <rte_ethdev_vdev.h>
#include <rte_vdev.h>
+#include <rte_alarm.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
@@ -50,6 +55,17 @@
((struct virtio_user_dev *)(hw)->virtio_user_dev)
static void
+virtio_user_delayed_handler(void *param)
+{
+ struct virtio_hw *hw = (struct virtio_hw *)param;
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
+
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+}
+
+static void
virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
{
@@ -63,8 +79,37 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
return;
}
- if (offset == offsetof(struct virtio_net_config, status))
+ if (offset == offsetof(struct virtio_net_config, status)) {
+ char buf[128];
+
+ if (dev->vhostfd >= 0) {
+ int r;
+ int flags;
+
+ flags = fcntl(dev->vhostfd, F_GETFL);
+ fcntl(dev->vhostfd, F_SETFL, flags | O_NONBLOCK);
+ r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
+ if (r == 0 || (r < 0 && errno != EAGAIN)) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ PMD_DRV_LOG(ERR, "virtio-user port %u is down",
+ hw->port_id);
+ /* Only client mode is available now. Once the
+ * connection is broken, it can never be up
+ * again. Besides, this function could be called
+ * in the process of interrupt handling,
+ * callback cannot be unregistered here, set an
+ * alarm to do it.
+ */
+ rte_eal_alarm_set(1,
+ virtio_user_delayed_handler,
+ (void *)hw);
+ } else {
+ dev->status |= VIRTIO_NET_S_LINK_UP;
+ }
+ fcntl(dev->vhostfd, F_SETFL, flags & (~O_NONBLOCK));
+ }
*(uint16_t *)dst = dev->status;
+ }
if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
*(uint16_t *)dst = dev->max_queue_pairs;
@@ -82,7 +127,7 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
for (i = 0; i < ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = ((const uint8_t *)src)[i];
else
- PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
+ PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
offset, length);
}
@@ -135,17 +180,26 @@ virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
static uint8_t
virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
{
- /* When config interrupt happens, driver calls this function to query
- * what kinds of change happen. Interrupt mode not supported for now.
+ /* rxq interrupts and config interrupt are separated in virtio-user,
+ * here we only report config change.
*/
- return 0;
+ return VIRTIO_PCI_ISR_CONFIG;
}
static uint16_t
virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
uint16_t vec __rte_unused)
{
- return VIRTIO_MSI_NO_VECTOR;
+ return 0;
+}
+
+static uint16_t
+virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
+ struct virtqueue *vq __rte_unused,
+ uint16_t vec)
+{
+ /* pretend we have done that */
+ return vec;
}
/* This function is to get the queue size, aka, number of descs, of a specified
@@ -212,7 +266,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
}
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
- PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
+ PMD_DRV_LOG(ERR, "failed to kick backend: %s",
strerror(errno));
}
@@ -226,6 +280,7 @@ const struct virtio_pci_ops virtio_user_ops = {
.set_features = virtio_user_set_features,
.get_isr = virtio_user_get_isr,
.set_config_irq = virtio_user_set_config_irq,
+ .set_queue_irq = virtio_user_set_queue_irq,
.get_queue_num = virtio_user_get_queue_num,
.setup_queue = virtio_user_setup_queue,
.del_queue = virtio_user_del_queue,
@@ -243,6 +298,8 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_PATH,
#define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
VIRTIO_USER_ARG_QUEUE_SIZE,
+#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
+ VIRTIO_USER_ARG_INTERFACE_NAME,
NULL
};
@@ -259,6 +316,9 @@ get_string_arg(const char *key __rte_unused,
*(char **)extra_args = strdup(value);
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
return 0;
}
@@ -274,28 +334,24 @@ get_integer_arg(const char *key __rte_unused,
return 0;
}
+static struct rte_vdev_driver virtio_user_driver;
+
static struct rte_eth_dev *
-virtio_user_eth_dev_alloc(const char *name)
+virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *eth_dev;
struct rte_eth_dev_data *data;
struct virtio_hw *hw;
struct virtio_user_dev *dev;
- eth_dev = rte_eth_dev_allocate(name);
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
if (!eth_dev) {
PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
return NULL;
}
data = eth_dev->data;
-
- hw = rte_zmalloc(NULL, sizeof(*hw), 0);
- if (!hw) {
- PMD_INIT_LOG(ERR, "malloc virtio_hw failed");
- rte_eth_dev_release_port(eth_dev);
- return NULL;
- }
+ hw = eth_dev->data->dev_private;
dev = rte_zmalloc(NULL, sizeof(*dev), 0);
if (!dev) {
@@ -306,17 +362,17 @@ virtio_user_eth_dev_alloc(const char *name)
}
hw->port_id = data->port_id;
+ dev->port_id = data->port_id;
virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
- hw->use_msix = 0;
+ /*
+ * MSIX is required to enable LSC (see virtio_init_device).
+ * Here just pretend that we support msix.
+ */
+ hw->use_msix = 1;
hw->modern = 0;
hw->use_simple_rxtx = 0;
hw->virtio_user_dev = dev;
- data->dev_private = hw;
- data->numa_node = SOCKET_ID_ANY;
- data->kdrv = RTE_KDRV_NONE;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- eth_dev->pci_dev = NULL;
- eth_dev->driver = NULL;
return eth_dev;
}
@@ -336,7 +392,7 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
* Returns 0 on success.
*/
static int
-virtio_user_pmd_probe(const char *name, const char *params)
+virtio_user_pmd_probe(struct rte_vdev_device *dev)
{
struct rte_kvargs *kvlist = NULL;
struct rte_eth_dev *eth_dev;
@@ -345,16 +401,11 @@ virtio_user_pmd_probe(const char *name, const char *params)
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
char *path = NULL;
+ char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
- if (!params || params[0] == '\0') {
- PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
- VIRTIO_USER_ARG_QUEUE_SIZE);
- goto end;
- }
-
- kvlist = rte_kvargs_parse(params, valid_args);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
if (!kvlist) {
PMD_INIT_LOG(ERR, "error when parsing param");
goto end;
@@ -368,11 +419,27 @@ virtio_user_pmd_probe(const char *name, const char *params)
goto end;
}
} else {
- PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user\n",
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
VIRTIO_USER_ARG_QUEUE_SIZE);
goto end;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
+ if (is_vhost_user_by_type(path)) {
+ PMD_INIT_LOG(ERR,
+ "arg %s applies only to vhost-kernel backend",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
+ &get_string_arg, &ifname) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+ }
+
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
&get_string_arg, &mac_addr) < 0) {
@@ -416,21 +483,34 @@ virtio_user_pmd_probe(const char *name, const char *params)
goto end;
}
- eth_dev = virtio_user_eth_dev_alloc(name);
- if (!eth_dev) {
- PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
+ PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
+ VIRTIO_USER_ARG_QUEUES_NUM, queues,
+ VIRTIO_MAX_VIRTQUEUE_PAIRS);
goto end;
}
- hw = eth_dev->data->dev_private;
- if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr) < 0) {
- PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
- virtio_user_eth_dev_free(eth_dev);
- goto end;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = virtio_user_eth_dev_alloc(dev);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ goto end;
+ }
+
+ hw = eth_dev->data->dev_private;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr, &ifname) < 0) {
+ PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ goto end;
}
- /* previously called by rte_eal_pci_probe() for physical dev */
+ /* previously called by rte_pci_probe() for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
@@ -445,21 +525,25 @@ end:
free(path);
if (mac_addr)
free(mac_addr);
+ if (ifname)
+ free(ifname);
return ret;
}
/** Called by rte_eth_dev_detach() */
static int
-virtio_user_pmd_remove(const char *name)
+virtio_user_pmd_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
struct rte_eth_dev *eth_dev;
struct virtio_hw *hw;
struct virtio_user_dev *dev;
- if (!name)
+ if (!vdev)
return -EINVAL;
- PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
eth_dev = rte_eth_dev_allocated(name);
if (!eth_dev)
return -ENODEV;
@@ -490,4 +574,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"mac=<mac addr> "
"cq=<int> "
"queue_size=<int> "
- "queues=<int>");
+ "queues=<int> "
+ "iface=<string>");
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 7f60e3ef..9ad77b8a 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -38,17 +38,6 @@
#include "virtio_logs.h"
#include "virtio_pci.h"
-void
-virtqueue_disable_intr(struct virtqueue *vq)
-{
- /*
- * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
- * not to interrupt when it consumes packets
- * Note: this is only considered a hint to the host
- */
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
-}
-
/*
* Two types of mbuf to be cleaned:
* 1) mbuf that has been consumed by backend but not used by virtio.
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index b1070e05..2e120861 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -71,8 +71,14 @@ struct rte_mbuf;
/**
* Return the physical address (or virtual address in case of
* virtio-user) of mbuf data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
*/
-#define VIRTIO_MBUF_ADDR(mb, vq) (*(uint64_t *)((uintptr_t)(mb) + (vq)->offset))
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
#endif
@@ -274,7 +280,21 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
/**
* Tell the backend not to interrupt us.
*/
-void virtqueue_disable_intr(struct virtqueue *vq);
+static inline void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/**
+ * Tell the backend to interrupt us.
+ */
+static inline void
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+}
+
/**
* Dump virtqueue internal structures, for debug purpose only.
*/
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 23ff1da2..84356ae2 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -76,9 +76,4 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index 68ae8b6d..bfa9622d 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -109,6 +109,9 @@ typedef enum {
VMXNET3_CMD_STOP_EMULATION,
VMXNET3_CMD_LOAD_PLUGIN,
VMXNET3_CMD_ACTIVATE_VF,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -120,7 +123,9 @@ typedef enum {
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
VMXNET3_CMD_GET_CONF_INTR,
- VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_RESERVED5,
} Vmxnet3_Cmd;
/* Adaptive Ring Info Flags */
@@ -402,12 +407,25 @@ typedef union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
/* a list of reasons for queue stop */
#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
@@ -507,7 +525,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad[1];
+ __le16 txDataRingDescSize;
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_TxQueueConf;
@@ -518,12 +538,14 @@ struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
- __le64 reserved;
+ __le64 rxDataRingBasePA;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_RxQueueConf;
@@ -695,12 +717,65 @@ Vmxnet3_RxQueueDesc;
typedef
#include "vmware_pack_begin.h"
+struct Vmxnet3_SetPolling {
+ uint8 enablePolling;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_SetPolling;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits; /* bit n corresponding to tx queue n */
+ __le16 rxQueueBits; /* bit n corresponding to rx queue n */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemoryRegion;
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ Vmxnet3_MemoryRegion memRegs[1];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemRegs;
+
+/*
+ * If the command data <= 16 bytes, use the shared memory direcly.
+ * Otherwise, use the variable length configuration descriptor.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union Vmxnet3_CmdInfo {
+ Vmxnet3_VariableLenConfDesc varConf;
+ Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_CmdInfo;
+
+typedef
+#include "vmware_pack_begin.h"
struct Vmxnet3_DriverShared {
__le32 magic;
__le32 pad; /* make devRead start at 64-bit boundaries */
Vmxnet3_DSDevRead devRead;
__le32 ecr;
- __le32 reserved[5];
+ __le32 reserved;
+
+ union {
+ __le32 reserved1[4];
+ Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the
+ * relevant command
+ */
+ } cu;
}
#include "vmware_pack_end.h"
Vmxnet3_DriverShared;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 8bb13e52..98252bb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -56,6 +56,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -69,6 +70,8 @@
#define PROCESS_SYS_EVENTS 0
+#define VMXNET3_TX_MAX_SEG UINT8_MAX
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -138,7 +141,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.driver.name, dev->data->port_id, post_string);
+ dev->data->drv_name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -223,6 +226,24 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
}
/*
+ * Gets tx data ring descriptor size.
+ */
+static uint16_t
+eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
+{
+ uint16 txdata_desc_size;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
+ txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
+ txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
+ sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
+}
+
+/*
* It returns 0 on success.
*/
static int
@@ -237,7 +258,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
- pci_dev = eth_dev->pci_dev;
+ eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/*
* for secondary processes, we don't initialize any further as primary
@@ -247,6 +269,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -261,13 +284,26 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
- if (ver & 0x1)
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
- else {
- PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
+
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ hw->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ hw->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ hw->version = VMXNET3_REV_1 + 1;
+ } else {
+ PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
return -EIO;
}
+ PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
+
/* Check UPT version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
@@ -307,6 +343,14 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+ hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
+
+ hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+ RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
+ hw->rxdata_desc_size);
+
return 0;
}
@@ -326,6 +370,7 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -333,16 +378,23 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct eth_driver rte_vmxnet3_pmd = {
- .pci_drv = {
- .id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_vmxnet3_dev_init,
- .eth_dev_uninit = eth_vmxnet3_dev_uninit,
- .dev_private_size = sizeof(struct vmxnet3_hw),
+static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
+}
+
+static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
+}
+
+static struct rte_pci_driver rte_vmxnet3_pmd = {
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_vmxnet3_pci_probe,
+ .remove = eth_vmxnet3_pci_remove,
};
static int
@@ -449,6 +501,92 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
}
static int
+vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_CmdInfo *cmdInfo;
+ struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
+ uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
+ uint32_t num, i, j, size;
+
+ if (hw->memRegsPA == 0) {
+ const struct rte_memzone *mz;
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
+ sizeof(Vmxnet3_MemoryRegion);
+
+ mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
+ 1);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ hw->memRegs = mz->addr;
+ hw->memRegsPA = mz->phys_addr;
+ }
+
+ num = hw->num_rx_queues;
+
+ for (i = 0; i < num; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ mp[i] = rxq->mp;
+ index[i] = 1 << i;
+ }
+
+ /*
+ * The same mempool could be used by multiple queues. In such a case,
+ * remove duplicate mempool entries. Only one entry is kept with
+ * bitmask indicating queues that are using this mempool.
+ */
+ for (i = 1; i < num; i++) {
+ for (j = 0; j < i; j++) {
+ if (mp[i] == mp[j]) {
+ mp[i] = NULL;
+ index[j] |= 1 << i;
+ break;
+ }
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < num; i++) {
+ if (mp[i] == NULL)
+ continue;
+
+ Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
+
+ mr->startPA =
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+ mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
+ STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
+ mr->txQueueBits = index[i];
+ mr->rxQueueBits = index[i];
+
+ PMD_INIT_LOG(INFO,
+ "index: %u startPA: %" PRIu64 " length: %u, "
+ "rxBits: %x",
+ j, mr->startPA, mr->length, mr->rxQueueBits);
+ j++;
+ }
+ hw->memRegs->numRegs = j;
+ PMD_INIT_LOG(INFO, "numRegs: %u", j);
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (j - 1) * sizeof(Vmxnet3_MemoryRegion);
+
+ cmdInfo = &shared->cu.cmdInfo;
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen = size;
+ cmdInfo->varConf.confPA = hw->memRegsPA;
+
+ return 0;
+}
+
+static int
vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
{
struct rte_eth_conf port_conf = dev->data->dev_conf;
@@ -497,6 +635,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
tqd->conf.txRingSize = txq->cmd_ring.size;
tqd->conf.compRingSize = txq->comp_ring.size;
tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
tqd->conf.intrIdx = txq->comp_ring.intr_idx;
tqd->status.stopped = TRUE;
tqd->status.error = 0;
@@ -515,6 +654,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
rqd->conf.compRingSize = rxq->comp_ring.size;
rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(hw)) {
+ rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
+ rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
+ }
rqd->status.stopped = TRUE;
rqd->status.error = 0;
memset(&rqd->stats, 0, sizeof(rqd->stats));
@@ -583,6 +726,20 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* Setup memory region for rx buffers */
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
+
/* Disable interrupts */
vmxnet3_disable_intr(hw);
@@ -596,6 +753,8 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ hw->adapter_stopped = FALSE;
+
/* Setting proper Rx Mode and issue Rx Mode Update command */
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
@@ -706,13 +865,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
-vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
@@ -728,6 +890,8 @@ vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
.nb_max = VMXNET3_TX_RING_MAX_SIZE,
.nb_min = VMXNET3_DEF_TX_RING_SIZE,
.nb_align = 1,
+ .nb_seg_max = VMXNET3_TX_MAX_SEG,
+ .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
dev_info->rx_offload_capa =
@@ -771,7 +935,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old, link;
+ struct rte_eth_link old = { 0 }, link;
uint32_t ret;
/* Link status doesn't change for stopped dev */
@@ -960,5 +1124,6 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
}
#endif
-RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 7d3b11ee..7a032629 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,6 +34,8 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
+#include <rte_io.h>
+
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
@@ -99,6 +101,11 @@ struct vmxnet3_hw {
uint8_t num_rx_queues;
uint8_t bufs_per_pkt;
+ uint8_t version;
+
+ uint16_t txdata_desc_size; /* tx data ring buffer size */
+ uint16_t rxdata_desc_size; /* rx data ring buffer size */
+
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -112,15 +119,24 @@ struct vmxnet3_hw {
uint64_t rss_confPA;
vmxnet3_mf_table_t *mf_table;
uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ Vmxnet3_MemRegs *memRegs;
+ uint64_t memRegsPA;
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
+#define VMXNET3_VERSION_GE_3(hw) ((hw)->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_2(hw) ((hw)->version >= VMXNET3_REV_2 + 1)
+
#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
/* Config space read/writes */
-#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define VMXNET3_PCI_REG(reg) rte_read32(reg)
static inline uint32_t
vmxnet3_read_addr(volatile void *addr)
@@ -128,9 +144,7 @@ vmxnet3_read_addr(volatile void *addr)
return VMXNET3_PCI_REG(addr);
}
-#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
- VMXNET3_PCI_REG((reg)) = (value); \
-} while(0)
+#define VMXNET3_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
@@ -146,6 +160,20 @@ vmxnet3_read_addr(volatile void *addr)
#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+static inline uint8_t
+vmxnet3_get_ring_idx(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= hw->num_rx_queues &&
+ rqID < 2 * hw->num_rx_queues) ? 1 : 0;
+}
+
+static inline bool
+vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= 2 * hw->num_rx_queues &&
+ rqID < 3 * hw->num_rx_queues);
+}
+
/*
* RX/TX function prototypes
*/
@@ -171,5 +199,7 @@ uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t vmxnet3_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index b50d2b00..d2e8323b 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -42,6 +42,9 @@
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 128
+/* Default rx data ring desc size */
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
#define VMXNET3_SUCCESS 0
#define VMXNET3_FAIL -1
@@ -138,9 +141,11 @@ typedef struct vmxnet3_tx_queue {
uint32_t qid;
struct Vmxnet3_TxQueueDesc *shared;
struct vmxnet3_txq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
uint8_t port_id; /**< Device port identifier. */
+ uint16_t txdata_desc_size;
} vmxnet3_tx_queue_t;
struct vmxnet3_rxq_stats {
@@ -150,17 +155,28 @@ struct vmxnet3_rxq_stats {
uint64_t rx_buf_alloc_failure;
};
+struct vmxnet3_rx_data_ring {
+ uint8_t *base;
+ uint64_t basePA;
+ uint32_t size;
+};
+
typedef struct vmxnet3_rx_queue {
struct rte_mempool *mp;
struct vmxnet3_hw *hw;
struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_data_ring data_ring;
+ uint16_t data_desc_size;
uint32_t qid1;
uint32_t qid2;
+ /* rqID in RCD for buffer from data ring */
+ uint32_t data_ring_qid;
Vmxnet3_RxQueueDesc *shared;
struct rte_mbuf *start_seg;
struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
uint8_t port_id; /**< Device port identifier. */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 93db10fb..e865c675 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -69,6 +69,7 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_net.h>
#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
@@ -76,6 +77,14 @@
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -192,6 +201,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
}
}
@@ -209,6 +220,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
}
}
@@ -235,7 +249,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += tq->txdata_desc_size * data_ring->size;
memset(ring->base, 0, size);
}
@@ -245,8 +259,10 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
{
int i;
vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_hw *hw = rq->hw;
struct vmxnet3_cmd_ring *ring0, *ring1;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
if (rq != NULL) {
@@ -271,6 +287,8 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
+ size += rq->data_desc_size * data_ring->size;
memset(ring0->base, 0, size);
}
@@ -350,6 +368,53 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -415,10 +480,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (txm->nb_segs == 1 &&
- rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
- tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)
+ ((uint8 *)txq->data_ring.base +
+ txq->cmd_ring.next2fill *
+ txq->txdata_desc_size);
copy_size = rte_pktmbuf_pkt_len(txm);
rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
}
@@ -435,12 +503,15 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- if (copy_size)
- gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
- else
+ if (copy_size) {
+ uint64 offset = txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
+ gdesc->txd.addr =
+ rte_cpu_to_le_64(txq->data_ring.basePA +
+ offset);
+ } else {
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ }
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;
@@ -696,7 +767,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
idx = rcd->rxdIdx;
- ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
@@ -762,6 +833,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
+ if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
+ uint8_t *rdd = rxq->data_ring.base +
+ idx * rxq->data_desc_size;
+
+ RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
+ rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
+ rdd, rcd->len);
+ }
+
rxq->start_seg = rxm;
vmxnet3_rx_offload(rcd, rxm);
} else {
@@ -816,30 +896,6 @@ rcd_done:
return nb_rx;
}
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -876,6 +932,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
+ txq->txdata_desc_size = hw->txdata_desc_size;
ring = &txq->cmd_ring;
comp_ring = &txq->comp_ring;
@@ -905,13 +962,15 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += txq->txdata_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ txq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring initialization */
@@ -955,6 +1014,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring;
int size;
uint8_t i;
char mem_name[32];
@@ -975,11 +1035,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
+ rxq->data_desc_size = hw->rxdata_desc_size;
rxq->stopped = TRUE;
ring0 = &rxq->cmd_ring[0];
ring1 = &rxq->cmd_ring[1];
comp_ring = &rxq->comp_ring;
+ data_ring = &rxq->data_ring;
/* Rx vmxnet rings length should be between 256-4096 */
if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
@@ -995,6 +1058,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
comp_ring->size = ring0->size + ring1->size;
+ data_ring->size = ring0->size;
/* Rx vmxnet rings structure initialization */
ring0->next2fill = 0;
@@ -1008,12 +1072,16 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
+ size += rxq->data_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ rxq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring0 initialization */
@@ -1029,6 +1097,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
ring1->size;
+ /* data_ring initialization */
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
+ data_ring->base =
+ (uint8_t *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ }
+
/* cmd_ring0-cmd_ring1 buf_info allocation */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
diff --git a/drivers/net/xenvirt/Makefile b/drivers/net/xenvirt/Makefile
index 1d05b71b..8b4b8f03 100644
--- a/drivers/net/xenvirt/Makefile
+++ b/drivers/net/xenvirt/Makefile
@@ -54,10 +54,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.
#
SYMLINK-y-include += rte_eth_xenvirt.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_cmdline
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index c08a0568..7bd29fae 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -70,8 +70,6 @@
/* virtio_idx is increased after new device is created.*/
static int virtio_idx = 0;
-static const char *drivername = "xen virtio PMD";
-
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -331,13 +329,11 @@ eth_dev_info(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
RTE_SET_USED(internals);
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)2048;
dev_info->max_rx_queues = (uint16_t)1;
dev_info->max_tx_queues = (uint16_t)1;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -620,6 +616,7 @@ enum dev_action {
DEV_ATTACH
};
+static struct rte_vdev_driver pmd_xenvirt_drv;
static int
eth_dev_xenvirt_create(const char *name, const char *params,
@@ -673,10 +670,9 @@ eth_dev_xenvirt_create(const char *name, const char *params,
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- eth_dev->data->dev_flags = RTE_PCI_DRV_DETACHABLE;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = drivername;
- eth_dev->driver = NULL;
+ eth_dev->data->drv_name = pmd_xenvirt_drv.driver.name;
eth_dev->data->numa_node = numa_node;
eth_dev->rx_pkt_burst = eth_xenvirt_rx;
@@ -729,7 +725,7 @@ eth_dev_xenvirt_free(const char *name, const unsigned numa_node)
/*TODO: Support multiple process model */
static int
-rte_pmd_xenvirt_probe(const char *name, const char *params)
+rte_pmd_xenvirt_probe(struct rte_vdev_device *dev)
{
if (virtio_idx == 0) {
if (xenstore_init() != 0) {
@@ -741,14 +737,15 @@ rte_pmd_xenvirt_probe(const char *name, const char *params)
return -1;
}
}
- eth_dev_xenvirt_create(name, params, rte_socket_id(), DEV_CREATE);
+ eth_dev_xenvirt_create(rte_vdev_device_name(dev),
+ rte_vdev_device_args(dev), rte_socket_id(), DEV_CREATE);
return 0;
}
static int
-rte_pmd_xenvirt_remove(const char *name)
+rte_pmd_xenvirt_remove(struct rte_vdev_device *dev)
{
- eth_dev_xenvirt_free(name, rte_socket_id());
+ eth_dev_xenvirt_free(rte_vdev_device_name(dev), rte_socket_id());
if (virtio_idx == 0) {
if (xenstore_uninit() != 0)
diff --git a/examples/Makefile b/examples/Makefile
index d49c7f29..6298626b 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -40,11 +40,9 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bond
DIRS-y += cmdline
DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += distributor
-ifneq ($(ICP_ROOT),)
-DIRS-y += dpdk_qat
-endif
DIRS-y += ethtool
DIRS-y += exception_path
+DIRS-$(CONFIG_RTE_LIBRTE_EFD) += server_node_efd
DIRS-y += helloworld
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += ip_pipeline
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
@@ -77,6 +75,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_LPM) += load_balancer
DIRS-y += multi_process
DIRS-y += netmap_compat/bridge
DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += packet_ordering
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+DIRS-y += performance-thread
+endif
DIRS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ptpclient
DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
diff --git a/examples/bond/Makefile b/examples/bond/Makefile
index 626d79d9..ae4cb6e1 100644
--- a/examples/bond/Makefile
+++ b/examples/bond/Makefile
@@ -54,4 +54,8 @@ endif
CFLAGS += -O3
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+LDLIBS += -lrte_pmd_bond
+endif
+
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 6402c6b3..9a4ec807 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -160,7 +160,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 537cee1f..8071f919 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,33 +44,65 @@
#include <rte_prefetch.h>
#include <rte_distributor.h>
-#define RX_RING_SIZE 256
+#define RX_RING_SIZE 512
#define TX_RING_SIZE 512
#define NUM_MBUFS ((64*1024)-1)
-#define MBUF_CACHE_SIZE 250
-#define BURST_SIZE 32
-#define RTE_RING_SZ 1024
+#define MBUF_CACHE_SIZE 128
+#define BURST_SIZE 64
+#define SCHED_RX_RING_SZ 8192
+#define SCHED_TX_RING_SZ 65536
+#define BURST_SIZE_TX 32
#define RTE_LOGTYPE_DISTRAPP RTE_LOGTYPE_USER1
+#define ANSI_COLOR_RED "\x1b[31m"
+#define ANSI_COLOR_RESET "\x1b[0m"
+
/* mask of enabled ports */
static uint32_t enabled_port_mask;
volatile uint8_t quit_signal;
volatile uint8_t quit_signal_rx;
+volatile uint8_t quit_signal_dist;
+volatile uint8_t quit_signal_work;
static volatile struct app_stats {
struct {
uint64_t rx_pkts;
uint64_t returned_pkts;
uint64_t enqueued_pkts;
+ uint64_t enqdrop_pkts;
} rx __rte_cache_aligned;
+ int pad1 __rte_cache_aligned;
+
+ struct {
+ uint64_t in_pkts;
+ uint64_t ret_pkts;
+ uint64_t sent_pkts;
+ uint64_t enqdrop_pkts;
+ } dist __rte_cache_aligned;
+ int pad2 __rte_cache_aligned;
struct {
uint64_t dequeue_pkts;
uint64_t tx_pkts;
+ uint64_t enqdrop_pkts;
} tx __rte_cache_aligned;
+ int pad3 __rte_cache_aligned;
+
+ uint64_t worker_pkts[64] __rte_cache_aligned;
+
+ int pad4 __rte_cache_aligned;
+
+ uint64_t worker_bursts[64][8] __rte_cache_aligned;
+
+ int pad5 __rte_cache_aligned;
+
+ uint64_t port_rx_pkts[64] __rte_cache_aligned;
+ uint64_t port_tx_pkts[64] __rte_cache_aligned;
} app_stats;
+struct app_stats prev_app_stats;
+
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
@@ -93,6 +124,8 @@ struct output_buffer {
struct rte_mbuf *mbufs[BURST_SIZE];
};
+static void print_stats(void);
+
/*
* Initialises a given port using global settings and with the rx buffers
* coming from the mbuf_pool passed as parameter
@@ -134,7 +167,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
- if (!link.link_status) {
+ while (!link.link_status) {
+ printf("Waiting for Link up on port %"PRIu8"\n", port);
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
@@ -161,40 +195,18 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct lcore_params {
unsigned worker_id;
struct rte_distributor *d;
- struct rte_ring *r;
+ struct rte_ring *rx_dist_ring;
+ struct rte_ring *dist_tx_ring;
struct rte_mempool *mem_pool;
};
static int
-quit_workers(struct rte_distributor *d, struct rte_mempool *p)
-{
- const unsigned num_workers = rte_lcore_count() - 2;
- unsigned i;
- struct rte_mbuf *bufs[num_workers];
-
- if (rte_mempool_get_bulk(p, (void *)bufs, num_workers) != 0) {
- printf("line %d: Error getting mbufs from pool\n", __LINE__);
- return -1;
- }
-
- for (i = 0; i < num_workers; i++)
- bufs[i]->hash.rss = i << 1;
-
- rte_distributor_process(d, bufs, num_workers);
- rte_mempool_put_bulk(p, (void *)bufs, num_workers);
-
- return 0;
-}
-
-static int
lcore_rx(struct lcore_params *p)
{
- struct rte_distributor *d = p->d;
- struct rte_mempool *mem_pool = p->mem_pool;
- struct rte_ring *r = p->r;
const uint8_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint8_t port;
+ struct rte_mbuf *bufs[BURST_SIZE*2];
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
@@ -218,7 +230,6 @@ lcore_rx(struct lcore_params *p)
port = 0;
continue;
}
- struct rte_mbuf *bufs[BURST_SIZE*2];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0, bufs,
BURST_SIZE);
if (unlikely(nb_rx == 0)) {
@@ -228,9 +239,15 @@ lcore_rx(struct lcore_params *p)
}
app_stats.rx.rx_pkts += nb_rx;
- rte_distributor_process(d, bufs, nb_rx);
- const uint16_t nb_ret = rte_distributor_returned_pkts(d,
- bufs, BURST_SIZE*2);
+/*
+ * You can run the distributor on the rx core with this code. Returned
+ * packets are then send straight to the tx core.
+ */
+#if 0
+ rte_distributor_process(d, bufs, nb_rx);
+ const uint16_t nb_ret = rte_distributor_returned_pktsd,
+ bufs, BURST_SIZE*2);
+
app_stats.rx.returned_pkts += nb_ret;
if (unlikely(nb_ret == 0)) {
if (++port == nb_ports)
@@ -238,10 +255,26 @@ lcore_rx(struct lcore_params *p)
continue;
}
- uint16_t sent = rte_ring_enqueue_burst(r, (void *)bufs, nb_ret);
+ struct rte_ring *tx_ring = p->dist_tx_ring;
+ uint16_t sent = rte_ring_enqueue_burst(tx_ring,
+ (void *)bufs, nb_ret, NULL);
+#else
+ uint16_t nb_ret = nb_rx;
+ /*
+ * Swap the following two lines if you want the rx traffic
+ * to go directly to tx, no distribution.
+ */
+ struct rte_ring *out_ring = p->rx_dist_ring;
+ /* struct rte_ring *out_ring = p->dist_tx_ring; */
+
+ uint16_t sent = rte_ring_enqueue_burst(out_ring,
+ (void *)bufs, nb_ret, NULL);
+#endif
+
app_stats.rx.enqueued_pkts += sent;
if (unlikely(sent < nb_ret)) {
- RTE_LOG(DEBUG, DISTRAPP,
+ app_stats.rx.enqdrop_pkts += nb_ret - sent;
+ RTE_LOG_DP(DEBUG, DISTRAPP,
"%s:Packet loss due to full ring\n", __func__);
while (sent < nb_ret)
rte_pktmbuf_free(bufs[sent++]);
@@ -249,33 +282,21 @@ lcore_rx(struct lcore_params *p)
if (++port == nb_ports)
port = 0;
}
- rte_distributor_process(d, NULL, 0);
- /* flush distributor to bring to known state */
- rte_distributor_flush(d);
/* set worker & tx threads quit flag */
+ printf("\nCore %u exiting rx task.\n", rte_lcore_id());
quit_signal = 1;
- /*
- * worker threads may hang in get packet as
- * distributor process is not running, just make sure workers
- * get packets till quit_signal is actually been
- * received and they gracefully shutdown
- */
- if (quit_workers(d, mem_pool) != 0)
- return -1;
- /* rx thread should quit at last */
return 0;
}
static inline void
flush_one_port(struct output_buffer *outbuf, uint8_t outp)
{
- unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs,
- outbuf->count);
- app_stats.tx.tx_pkts += nb_tx;
+ unsigned int nb_tx = rte_eth_tx_burst(outp, 0,
+ outbuf->mbufs, outbuf->count);
+ app_stats.tx.tx_pkts += outbuf->count;
if (unlikely(nb_tx < outbuf->count)) {
- RTE_LOG(DEBUG, DISTRAPP,
- "%s:Packet loss with tx_burst\n", __func__);
+ app_stats.tx.enqdrop_pkts += outbuf->count - nb_tx;
do {
rte_pktmbuf_free(outbuf->mbufs[nb_tx]);
} while (++nb_tx < outbuf->count);
@@ -287,6 +308,7 @@ static inline void
flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
{
uint8_t outp;
+
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << outp)) == 0)
@@ -299,6 +321,58 @@ flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
}
}
+
+
+static int
+lcore_distributor(struct lcore_params *p)
+{
+ struct rte_ring *in_r = p->rx_dist_ring;
+ struct rte_ring *out_r = p->dist_tx_ring;
+ struct rte_mbuf *bufs[BURST_SIZE * 4];
+ struct rte_distributor *d = p->d;
+
+ printf("\nCore %u acting as distributor core.\n", rte_lcore_id());
+ while (!quit_signal_dist) {
+ const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
+ (void *)bufs, BURST_SIZE*1, NULL);
+ if (nb_rx) {
+ app_stats.dist.in_pkts += nb_rx;
+
+ /* Distribute the packets */
+ rte_distributor_process(d, bufs, nb_rx);
+ /* Handle Returns */
+ const uint16_t nb_ret =
+ rte_distributor_returned_pkts(d,
+ bufs, BURST_SIZE*2);
+
+ if (unlikely(nb_ret == 0))
+ continue;
+ app_stats.dist.ret_pkts += nb_ret;
+
+ uint16_t sent = rte_ring_enqueue_burst(out_r,
+ (void *)bufs, nb_ret, NULL);
+ app_stats.dist.sent_pkts += sent;
+ if (unlikely(sent < nb_ret)) {
+ app_stats.dist.enqdrop_pkts += nb_ret - sent;
+ RTE_LOG(DEBUG, DISTRAPP,
+ "%s:Packet loss due to full out ring\n",
+ __func__);
+ while (sent < nb_ret)
+ rte_pktmbuf_free(bufs[sent++]);
+ }
+ }
+ }
+ printf("\nCore %u exiting distributor task.\n", rte_lcore_id());
+ quit_signal_work = 1;
+
+ rte_distributor_flush(d);
+ /* Unblock any returns so workers can exit */
+ rte_distributor_clear_returns(d);
+ quit_signal_rx = 1;
+ return 0;
+}
+
+
static int
lcore_tx(struct rte_ring *in_r)
{
@@ -327,9 +401,9 @@ lcore_tx(struct rte_ring *in_r)
if ((enabled_port_mask & (1 << port)) == 0)
continue;
- struct rte_mbuf *bufs[BURST_SIZE];
+ struct rte_mbuf *bufs[BURST_SIZE_TX];
const uint16_t nb_rx = rte_ring_dequeue_burst(in_r,
- (void *)bufs, BURST_SIZE);
+ (void *)bufs, BURST_SIZE_TX, NULL);
app_stats.tx.dequeue_pkts += nb_rx;
/* if we get no traffic, flush anything we have */
@@ -358,11 +432,12 @@ lcore_tx(struct rte_ring *in_r)
outbuf = &tx_buffers[outp];
outbuf->mbufs[outbuf->count++] = bufs[i];
- if (outbuf->count == BURST_SIZE)
+ if (outbuf->count == BURST_SIZE_TX)
flush_one_port(outbuf, outp);
}
}
}
+ printf("\nCore %u exiting tx task.\n", rte_lcore_id());
return 0;
}
@@ -371,32 +446,98 @@ int_handler(int sig_num)
{
printf("Exiting on signal %d\n", sig_num);
/* set quit flag for rx thread to exit */
- quit_signal_rx = 1;
+ quit_signal_dist = 1;
}
static void
print_stats(void)
{
struct rte_eth_stats eth_stats;
- unsigned i;
-
- printf("\nRX thread stats:\n");
- printf(" - Received: %"PRIu64"\n", app_stats.rx.rx_pkts);
- printf(" - Processed: %"PRIu64"\n", app_stats.rx.returned_pkts);
- printf(" - Enqueued: %"PRIu64"\n", app_stats.rx.enqueued_pkts);
-
- printf("\nTX thread stats:\n");
- printf(" - Dequeued: %"PRIu64"\n", app_stats.tx.dequeue_pkts);
- printf(" - Transmitted: %"PRIu64"\n", app_stats.tx.tx_pkts);
+ unsigned int i, j;
+ const unsigned int num_workers = rte_lcore_count() - 4;
for (i = 0; i < rte_eth_dev_count(); i++) {
rte_eth_stats_get(i, &eth_stats);
- printf("\nPort %u stats:\n", i);
- printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
- printf(" - Pkts out: %"PRIu64"\n", eth_stats.opackets);
- printf(" - In Errs: %"PRIu64"\n", eth_stats.ierrors);
- printf(" - Out Errs: %"PRIu64"\n", eth_stats.oerrors);
- printf(" - Mbuf Errs: %"PRIu64"\n", eth_stats.rx_nombuf);
+ app_stats.port_rx_pkts[i] = eth_stats.ipackets;
+ app_stats.port_tx_pkts[i] = eth_stats.opackets;
+ }
+
+ printf("\n\nRX Thread:\n");
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ printf("Port %u Pktsin : %5.2f\n", i,
+ (app_stats.port_rx_pkts[i] -
+ prev_app_stats.port_rx_pkts[i])/1000000.0);
+ prev_app_stats.port_rx_pkts[i] = app_stats.port_rx_pkts[i];
+ }
+ printf(" - Received: %5.2f\n",
+ (app_stats.rx.rx_pkts -
+ prev_app_stats.rx.rx_pkts)/1000000.0);
+ printf(" - Returned: %5.2f\n",
+ (app_stats.rx.returned_pkts -
+ prev_app_stats.rx.returned_pkts)/1000000.0);
+ printf(" - Enqueued: %5.2f\n",
+ (app_stats.rx.enqueued_pkts -
+ prev_app_stats.rx.enqueued_pkts)/1000000.0);
+ printf(" - Dropped: %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.rx.enqdrop_pkts -
+ prev_app_stats.rx.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ printf("Distributor thread:\n");
+ printf(" - In: %5.2f\n",
+ (app_stats.dist.in_pkts -
+ prev_app_stats.dist.in_pkts)/1000000.0);
+ printf(" - Returned: %5.2f\n",
+ (app_stats.dist.ret_pkts -
+ prev_app_stats.dist.ret_pkts)/1000000.0);
+ printf(" - Sent: %5.2f\n",
+ (app_stats.dist.sent_pkts -
+ prev_app_stats.dist.sent_pkts)/1000000.0);
+ printf(" - Dropped %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.dist.enqdrop_pkts -
+ prev_app_stats.dist.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ printf("TX thread:\n");
+ printf(" - Dequeued: %5.2f\n",
+ (app_stats.tx.dequeue_pkts -
+ prev_app_stats.tx.dequeue_pkts)/1000000.0);
+ for (i = 0; i < rte_eth_dev_count(); i++) {
+ printf("Port %u Pktsout: %5.2f\n",
+ i, (app_stats.port_tx_pkts[i] -
+ prev_app_stats.port_tx_pkts[i])/1000000.0);
+ prev_app_stats.port_tx_pkts[i] = app_stats.port_tx_pkts[i];
+ }
+ printf(" - Transmitted: %5.2f\n",
+ (app_stats.tx.tx_pkts -
+ prev_app_stats.tx.tx_pkts)/1000000.0);
+ printf(" - Dropped: %s%5.2f%s\n", ANSI_COLOR_RED,
+ (app_stats.tx.enqdrop_pkts -
+ prev_app_stats.tx.enqdrop_pkts)/1000000.0,
+ ANSI_COLOR_RESET);
+
+ prev_app_stats.rx.rx_pkts = app_stats.rx.rx_pkts;
+ prev_app_stats.rx.returned_pkts = app_stats.rx.returned_pkts;
+ prev_app_stats.rx.enqueued_pkts = app_stats.rx.enqueued_pkts;
+ prev_app_stats.rx.enqdrop_pkts = app_stats.rx.enqdrop_pkts;
+ prev_app_stats.dist.in_pkts = app_stats.dist.in_pkts;
+ prev_app_stats.dist.ret_pkts = app_stats.dist.ret_pkts;
+ prev_app_stats.dist.sent_pkts = app_stats.dist.sent_pkts;
+ prev_app_stats.dist.enqdrop_pkts = app_stats.dist.enqdrop_pkts;
+ prev_app_stats.tx.dequeue_pkts = app_stats.tx.dequeue_pkts;
+ prev_app_stats.tx.tx_pkts = app_stats.tx.tx_pkts;
+ prev_app_stats.tx.enqdrop_pkts = app_stats.tx.enqdrop_pkts;
+
+ for (i = 0; i < num_workers; i++) {
+ printf("Worker %02u Pkts: %5.2f. Bursts(1-8): ", i,
+ (app_stats.worker_pkts[i] -
+ prev_app_stats.worker_pkts[i])/1000000.0);
+ for (j = 0; j < 8; j++) {
+ printf("%"PRIu64" ", app_stats.worker_bursts[i][j]);
+ app_stats.worker_bursts[i][j] = 0;
+ }
+ printf("\n");
+ prev_app_stats.worker_pkts[i] = app_stats.worker_pkts[i];
}
}
@@ -405,17 +546,36 @@ lcore_worker(struct lcore_params *p)
{
struct rte_distributor *d = p->d;
const unsigned id = p->worker_id;
+ unsigned int num = 0;
+ unsigned int i;
+
/*
* for single port, xor_val will be zero so we won't modify the output
* port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
*/
const unsigned xor_val = (rte_eth_dev_count() > 1);
- struct rte_mbuf *buf = NULL;
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
+
+ for (i = 0; i < 8; i++)
+ buf[i] = NULL;
+
+ app_stats.worker_pkts[p->worker_id] = 1;
printf("\nCore %u acting as worker core.\n", rte_lcore_id());
- while (!quit_signal) {
- buf = rte_distributor_get_pkt(d, id, buf);
- buf->port ^= xor_val;
+ while (!quit_signal_work) {
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
+ /* Do a little bit of work for each packet */
+ for (i = 0; i < num; i++) {
+ uint64_t t = rte_rdtsc()+100;
+
+ while (rte_rdtsc() < t)
+ rte_pause();
+ buf[i]->port ^= xor_val;
+ }
+
+ app_stats.worker_pkts[p->worker_id] += num;
+ if (num > 0)
+ app_stats.worker_bursts[p->worker_id][num-1]++;
}
return 0;
}
@@ -487,7 +647,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
@@ -497,11 +657,13 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
struct rte_distributor *d;
- struct rte_ring *output_ring;
+ struct rte_ring *dist_tx_ring;
+ struct rte_ring *rx_dist_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
uint8_t portid;
uint8_t nb_ports_available;
+ uint64_t t, freq;
/* catch ctrl-c so we can print on exit */
signal(SIGINT, int_handler);
@@ -518,10 +680,12 @@ main(int argc, char *argv[])
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid distributor parameters\n");
- if (rte_lcore_count() < 3)
+ if (rte_lcore_count() < 5)
rte_exit(EXIT_FAILURE, "Error, This application needs at "
- "least 3 logical cores to run:\n"
- "1 lcore for packet RX and distribution\n"
+ "least 5 logical cores to run:\n"
+ "1 lcore for stats (can be core 0)\n"
+ "1 lcore for packet RX\n"
+ "1 lcore for distribution\n"
"1 lcore for packet TX\n"
"and at least 1 lcore for worker threads\n");
@@ -561,40 +725,82 @@ main(int argc, char *argv[])
}
d = rte_distributor_create("PKT_DIST", rte_socket_id(),
- rte_lcore_count() - 2);
+ rte_lcore_count() - 4,
+ RTE_DIST_ALG_BURST);
if (d == NULL)
rte_exit(EXIT_FAILURE, "Cannot create distributor\n");
/*
- * scheduler ring is read only by the transmitter core, but written to
- * by multiple threads
+ * scheduler ring is read by the transmitter core, and written to
+ * by scheduler core
*/
- output_ring = rte_ring_create("Output_ring", RTE_RING_SZ,
- rte_socket_id(), RING_F_SC_DEQ);
- if (output_ring == NULL)
+ dist_tx_ring = rte_ring_create("Output_ring", SCHED_TX_RING_SZ,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (dist_tx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
+
+ rx_dist_ring = rte_ring_create("Input_ring", SCHED_RX_RING_SZ,
+ rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (rx_dist_ring == NULL)
rte_exit(EXIT_FAILURE, "Cannot create output ring\n");
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- if (worker_id == rte_lcore_count() - 2)
+ if (worker_id == rte_lcore_count() - 3) {
+ printf("Starting distributor on lcore_id %d\n",
+ lcore_id);
+ /* distributor core */
+ struct lcore_params *p =
+ rte_malloc(NULL, sizeof(*p), 0);
+ if (!p)
+ rte_panic("malloc failure\n");
+ *p = (struct lcore_params){worker_id, d,
+ rx_dist_ring, dist_tx_ring, mbuf_pool};
+ rte_eal_remote_launch(
+ (lcore_function_t *)lcore_distributor,
+ p, lcore_id);
+ } else if (worker_id == rte_lcore_count() - 4) {
+ printf("Starting tx on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ /* tx core */
rte_eal_remote_launch((lcore_function_t *)lcore_tx,
- output_ring, lcore_id);
- else {
+ dist_tx_ring, lcore_id);
+ } else if (worker_id == rte_lcore_count() - 2) {
+ printf("Starting rx on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ /* rx core */
struct lcore_params *p =
rte_malloc(NULL, sizeof(*p), 0);
if (!p)
rte_panic("malloc failure\n");
- *p = (struct lcore_params){worker_id, d, output_ring, mbuf_pool};
+ *p = (struct lcore_params){worker_id, d, rx_dist_ring,
+ dist_tx_ring, mbuf_pool};
+ rte_eal_remote_launch((lcore_function_t *)lcore_rx,
+ p, lcore_id);
+ } else {
+ printf("Starting worker on worker_id %d, lcore_id %d\n",
+ worker_id, lcore_id);
+ struct lcore_params *p =
+ rte_malloc(NULL, sizeof(*p), 0);
+ if (!p)
+ rte_panic("malloc failure\n");
+ *p = (struct lcore_params){worker_id, d, rx_dist_ring,
+ dist_tx_ring, mbuf_pool};
rte_eal_remote_launch((lcore_function_t *)lcore_worker,
p, lcore_id);
}
worker_id++;
}
- /* call lcore_main on master core only */
- struct lcore_params p = { 0, d, output_ring, mbuf_pool};
- if (lcore_rx(&p) != 0)
- return -1;
+ freq = rte_get_timer_hz();
+ t = rte_rdtsc() + freq;
+ while (!quit_signal_dist) {
+ if (t < rte_rdtsc()) {
+ print_stats();
+ t = rte_rdtsc() + freq;
+ }
+ usleep(1000);
+ }
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
if (rte_eal_wait_lcore(lcore_id) < 0)
diff --git a/examples/dpdk_qat/Makefile b/examples/dpdk_qat/Makefile
deleted file mode 100644
index 01d61bcf..00000000
--- a/examples/dpdk_qat/Makefile
+++ /dev/null
@@ -1,93 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-ifeq ($(RTE_SDK),)
-$(error "Please define RTE_SDK environment variable")
-endif
-
-ifeq ($(ICP_ROOT),)
-$(error "Please define ICP_ROOT environment variable")
-endif
-
-# Default target, can be overriden by command line or environment
-RTE_TARGET ?= x86_64-native-linuxapp-gcc
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
-$(error This application can only operate in a linuxapp environment, \
-please change the definition of the RTE_TARGET environment variable)
-endif
-
-LBITS := $(shell uname -p)
-ifeq ($(CROSS_COMPILE),)
- ifneq ($(CONFIG_RTE_ARCH),"x86_64")
- ifneq ($(LBITS),i686)
- $(error The RTE_TARGET chosen is not compatible with this environment \
- (x86_64), for this application. Please change the definition of the \
- RTE_TARGET environment variable, or run the application on a i686 OS)
- endif
- endif
-endif
-
-# binary name
-APP = dpdk_qat
-
-# all source are stored in SRCS-y
-SRCS-y := main.c crypto.c
-
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -I$(ICP_ROOT)/quickassist/include \
- -I$(ICP_ROOT)/quickassist/include/lac \
- -I$(ICP_ROOT)/quickassist/lookaside/access_layer/include
-
-# From CRF 1.2 driver, library was renamed to libicp_qa_al.a
-ifneq ($(wildcard $(ICP_ROOT)/build/icp_qa_al.a),)
-ICP_LIBRARY_PATH = $(ICP_ROOT)/build/icp_qa_al.a
-else
-ICP_LIBRARY_PATH = $(ICP_ROOT)/build/libicp_qa_al.a
-endif
-
-LDLIBS += -L$(ICP_ROOT)/build
-LDLIBS += $(ICP_LIBRARY_PATH) \
- -lz \
- -losal \
- -ladf_proxy \
- -lcrypto
-
-# workaround for a gcc bug with noreturn attribute
-# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
-ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
-CFLAGS_main.o += -Wno-return-type
-endif
-
-include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf b/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf
deleted file mode 100644
index fd139e2f..00000000
--- a/examples/dpdk_qat/config_files/coleto/dh895xcc_qa_dev0.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[GENERAL]
-ServicesEnabled = cy;dc
-ConfigVersion = 2
-cyHmacAuthMode = 1
-dcTotalSRAMAvailable = 0
-Firmware_MofPath = dh895xcc/mof_firmware.bin
-Firmware_MmpPath = dh895xcc/mmp_firmware.bin
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-SRIOV_Enabled = 0
-ProcDebug = 1
-
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-[SSL]
-NumberCyInstances = 8
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0CoreAffinity = 0
-
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1CoreAffinity = 1
-
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2CoreAffinity = 2
-
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3CoreAffinity = 3
-
-
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4CoreAffinity = 4
-
-
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5CoreAffinity = 5
-
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6CoreAffinity = 6
-
-
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7CoreAffinity = 7
diff --git a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf
deleted file mode 100644
index 9e1c1d11..00000000
--- a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev0.conf
+++ /dev/null
@@ -1,293 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 16
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 0
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 1
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 2
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 3
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 4
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 5
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 6
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 7
-
-# Crypto - User instance #8
-Cy8Name = "SSL8"
-Cy8IsPolled = 1
-Cy8AcceleratorNumber = 0
-# List of core affinities
-Cy8CoreAffinity = 16
-
-# Crypto - User instance #9
-Cy9Name = "SSL9"
-Cy9IsPolled = 1
-Cy9AcceleratorNumber = 1
-# List of core affinities
-Cy9CoreAffinity = 17
-
-# Crypto - User instance #10
-Cy10Name = "SSL10"
-Cy10IsPolled = 1
-Cy10AcceleratorNumber = 2
-# List of core affinities
-Cy10CoreAffinity = 18
-
-# Crypto - User instance #11
-Cy11Name = "SSL11"
-Cy11IsPolled = 1
-Cy11AcceleratorNumber = 3
-# List of core affinities
-Cy11CoreAffinity = 19
-
-# Crypto - User instance #12
-Cy12Name = "SSL12"
-Cy12IsPolled = 1
-Cy12AcceleratorNumber = 0
-# List of core affinities
-Cy12CoreAffinity = 20
-
-# Crypto - User instance #13
-Cy13Name = "SSL13"
-Cy13IsPolled = 1
-Cy13AcceleratorNumber = 1
-# List of core affinities
-Cy13CoreAffinity = 21
-
-# Crypto - User instance #14
-Cy14Name = "SSL14"
-Cy14IsPolled = 1
-Cy14AcceleratorNumber = 2
-# List of core affinities
-Cy14CoreAffinity = 22
-
-# Crypto - User instance #15
-Cy15Name = "SSL15"
-Cy15IsPolled = 1
-Cy15AcceleratorNumber = 3
-# List of core affinities
-Cy15CoreAffinity = 23
-
-
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf b/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf
deleted file mode 100644
index 3e8d8b6b..00000000
--- a/examples/dpdk_qat/config_files/shumway/dh89xxcc_qa_dev1.conf
+++ /dev/null
@@ -1,292 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 16
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 8
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 9
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 10
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 11
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 12
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 13
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 14
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 15
-
-# Crypto - User instance #8
-Cy8Name = "SSL8"
-Cy8IsPolled = 1
-Cy8AcceleratorNumber = 0
-# List of core affinities
-Cy8CoreAffinity = 24
-
-# Crypto - User instance #9
-Cy9Name = "SSL9"
-Cy9IsPolled = 1
-Cy9AcceleratorNumber = 1
-# List of core affinities
-Cy9CoreAffinity = 25
-
-# Crypto - User instance #10
-Cy10Name = "SSL10"
-Cy10IsPolled = 1
-Cy10AcceleratorNumber = 2
-# List of core affinities
-Cy10CoreAffinity = 26
-
-# Crypto - User instance #11
-Cy11Name = "SSL11"
-Cy11IsPolled = 1
-Cy11AcceleratorNumber = 3
-# List of core affinities
-Cy11CoreAffinity = 27
-
-# Crypto - User instance #12
-Cy12Name = "SSL12"
-Cy12IsPolled = 1
-Cy12AcceleratorNumber = 0
-# List of core affinities
-Cy12CoreAffinity = 28
-
-# Crypto - User instance #13
-Cy13Name = "SSL13"
-Cy13IsPolled = 1
-Cy13AcceleratorNumber = 1
-# List of core affinities
-Cy13CoreAffinity = 29
-
-# Crypto - User instance #14
-Cy14Name = "SSL14"
-Cy14IsPolled = 1
-Cy14AcceleratorNumber = 2
-# List of core affinities
-Cy14CoreAffinity = 30
-
-# Crypto - User instance #15
-Cy15Name = "SSL15"
-Cy15IsPolled = 1
-Cy15AcceleratorNumber = 3
-# List of core affinities
-Cy15CoreAffinity = 31
-
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf
deleted file mode 100644
index c3a85dea..00000000
--- a/examples/dpdk_qat/config_files/stargo/dh89xxcc_qa_dev0.conf
+++ /dev/null
@@ -1,235 +0,0 @@
-#########################################################################
-#
-# @par
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# #########################################################################
-# ########################################################
-#
-# This file is the configuration for a single dh89xxcc_qa
-# device.
-#
-# Each device has up to two accelerators.
-# - The client may load balance between these
-# accelerators.
-# Each accelerator has 8 independent ring banks.
-# - The interrupt for each can be directed to a
-# specific core.
-# Each ring bank as 16 rings (hardware assisted queues).
-#
-#########################################################
-# General Section
-##############################################
-
-[GENERAL]
-ServicesEnabled = cy0;cy1
-
-# Use version 2 of the config file
-ConfigVersion = 2
-# Look Aside Cryptographic Configuration
-cyHmacAuthMode = 1
-
-# Look Aside Compression Configuration
-dcTotalSRAMAvailable = 0
-
-# Firmware Location Configuration
-Firmware_MofPath = mof_firmware.bin
-Firmware_MmpPath = mmp_firmware.bin
-
-#Default values for number of concurrent requests*/
-CyNumConcurrentSymRequests = 512
-CyNumConcurrentAsymRequests = 64
-DcNumConcurrentRequests = 512
-
-#Statistics, valid values: 1,0
-statsGeneral = 1
-statsDc = 1
-statsDh = 1
-statsDrbg = 1
-statsDsa = 1
-statsEcc = 1
-statsKeyGen = 1
-statsLn = 1
-statsPrime = 1
-statsRsa = 1
-statsSym = 1
-
-# Enables or disables Single Root Complex IO Virtualization.
-# If this is enabled (1) then SRIOV and VT-d need to be enabled in
-# BIOS and there can be no Cy or Dc instances created in PF (Dom0).
-# If this i disabled (0) then SRIOV and VT-d need to be disabled
-# in BIOS and Cy and/or Dc instances can be used in PF (Dom0)
-SRIOV_Enabled = 0
-
-#Debug feature, if set to 1 it enables additional entries in /proc filesystem
-ProcDebug = 1
-
-#######################################################
-#
-# Logical Instances Section
-# A logical instance allows each address domain
-# (kernel space and individual user space processes)
-# to configure rings (i.e. hardware assisted queues)
-# to be used by that address domain and to define the
-# behavior of that ring.
-#
-# The address domains are in the following format
-# - For kernel address domains
-# [KERNEL]
-# - For user process address domains
-# [xxxxx]
-# Where xxxxx may be any ascii value which uniquely identifies
-# the user mode process.
-# To allow the driver correctly configure the
-# logical instances associated with this user process,
-# the process must call the icp_sal_userStartMultiProcess(...)
-# passing the xxxxx string during process initialisation.
-# When the user space process is finished it must call
-# icp_sal_userStop(...) to free resources.
-# NumProcesses will indicate the maximum number of processes
-# that can call icp_sal_userStartMultiProcess on this instance.
-# Warning: the resources are preallocated: if NumProcesses
-# is too high, the driver will fail to load
-#
-# Items configurable by a logical instance are:
-# - Name of the logical instance
-# - The accelerator associated with this logical
-# instance
-# - The core the instance is affinitized to (optional)
-#
-# Note: Logical instances may not share the same ring, but
-# may share a ring bank.
-#
-# The format of the logical instances are:
-# - For crypto:
-# Cy<n>Name = "xxxx"
-# Cy<n>AcceleratorNumber = 0-3
-# Cy<n>CoreAffinity = 0-7
-#
-# - For Data Compression
-# Dc<n>Name = "xxxx"
-# Dc<n>AcceleratorNumber = 0-1
-# Dc<n>CoreAffinity = 0-7
-#
-# Where:
-# - n is the number of this logical instance starting at 0.
-# - xxxx may be any ascii value which identifies the logical instance.
-#
-# Note: for user space processes, a list of values can be specified for
-# the accelerator number and the core affinity: for example
-# Cy0AcceleratorNumber = 0,2
-# Cy0CoreAffinity = 0,2,4
-# These comma-separated lists will allow the multiple processes to use
-# different accelerators and cores, and will wrap around the numbers
-# in the list. In the above example, process 0 will use accelerator 0,
-# and process 1 will use accelerator 2
-#
-########################################################
-
-##############################################
-# Kernel Instances Section
-##############################################
-[KERNEL]
-NumberCyInstances = 0
-NumberDcInstances = 0
-
-##############################################
-# User Process Instance Section
-##############################################
-[SSL]
-NumberCyInstances = 8
-NumberDcInstances = 0
-NumProcesses = 1
-LimitDevAccess = 0
-
-# Crypto - User instance #0
-Cy0Name = "SSL0"
-Cy0IsPolled = 1
-Cy0AcceleratorNumber = 0
-# List of core affinities
-Cy0CoreAffinity = 0
-
-# Crypto - User instance #1
-Cy1Name = "SSL1"
-Cy1IsPolled = 1
-Cy1AcceleratorNumber = 1
-# List of core affinities
-Cy1CoreAffinity = 1
-
-# Crypto - User instance #2
-Cy2Name = "SSL2"
-Cy2IsPolled = 1
-Cy2AcceleratorNumber = 2
-# List of core affinities
-Cy2CoreAffinity = 2
-
-# Crypto - User instance #3
-Cy3Name = "SSL3"
-Cy3IsPolled = 1
-Cy3AcceleratorNumber = 3
-# List of core affinities
-Cy3CoreAffinity = 3
-
-# Crypto - User instance #4
-Cy4Name = "SSL4"
-Cy4IsPolled = 1
-Cy4AcceleratorNumber = 0
-# List of core affinities
-Cy4CoreAffinity = 4
-
-# Crypto - User instance #5
-Cy5Name = "SSL5"
-Cy5IsPolled = 1
-Cy5AcceleratorNumber = 1
-# List of core affinities
-Cy5CoreAffinity = 5
-
-# Crypto - User instance #6
-Cy6Name = "SSL6"
-Cy6IsPolled = 1
-Cy6AcceleratorNumber = 2
-# List of core affinities
-Cy6CoreAffinity = 6
-
-# Crypto - User instance #7
-Cy7Name = "SSL7"
-Cy7IsPolled = 1
-Cy7AcceleratorNumber = 3
-# List of core affinities
-Cy7CoreAffinity = 7
-
-##############################################
-# Wireless Process Instance Section
-##############################################
-[WIRELESS]
-NumberCyInstances = 0
-NumberDcInstances = 0
-NumProcesses = 0
diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c
deleted file mode 100644
index 02032f30..00000000
--- a/examples/dpdk_qat/crypto.c
+++ /dev/null
@@ -1,943 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <string.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_ether.h>
-#include <rte_malloc.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-
-#define CPA_CY_SYM_DP_TMP_WORKAROUND 1
-
-#include "cpa.h"
-#include "cpa_types.h"
-#include "cpa_cy_sym_dp.h"
-#include "cpa_cy_common.h"
-#include "cpa_cy_im.h"
-#include "icp_sal_user.h"
-#include "icp_sal_poll.h"
-
-#include "crypto.h"
-
-/* CIPHER KEY LENGTHS */
-#define KEY_SIZE_64_IN_BYTES (64 / 8)
-#define KEY_SIZE_56_IN_BYTES (56 / 8)
-#define KEY_SIZE_128_IN_BYTES (128 / 8)
-#define KEY_SIZE_168_IN_BYTES (168 / 8)
-#define KEY_SIZE_192_IN_BYTES (192 / 8)
-#define KEY_SIZE_256_IN_BYTES (256 / 8)
-
-/* HMAC AUTH KEY LENGTHS */
-#define AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-#define SHA1_AUTH_KEY_LENGTH_IN_BYTES (160 / 8)
-#define SHA224_AUTH_KEY_LENGTH_IN_BYTES (224 / 8)
-#define SHA256_AUTH_KEY_LENGTH_IN_BYTES (256 / 8)
-#define SHA384_AUTH_KEY_LENGTH_IN_BYTES (384 / 8)
-#define SHA512_AUTH_KEY_LENGTH_IN_BYTES (512 / 8)
-#define MD5_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-#define KASUMI_AUTH_KEY_LENGTH_IN_BYTES (128 / 8)
-
-/* HASH DIGEST LENGHTS */
-#define AES_XCBC_DIGEST_LENGTH_IN_BYTES (128 / 8)
-#define AES_XCBC_96_DIGEST_LENGTH_IN_BYTES (96 / 8)
-#define MD5_DIGEST_LENGTH_IN_BYTES (128 / 8)
-#define SHA1_DIGEST_LENGTH_IN_BYTES (160 / 8)
-#define SHA1_96_DIGEST_LENGTH_IN_BYTES (96 / 8)
-#define SHA224_DIGEST_LENGTH_IN_BYTES (224 / 8)
-#define SHA256_DIGEST_LENGTH_IN_BYTES (256 / 8)
-#define SHA384_DIGEST_LENGTH_IN_BYTES (384 / 8)
-#define SHA512_DIGEST_LENGTH_IN_BYTES (512 / 8)
-#define KASUMI_DIGEST_LENGTH_IN_BYTES (32 / 8)
-
-#define IV_LENGTH_16_BYTES (16)
-#define IV_LENGTH_8_BYTES (8)
-
-
-/*
- * rte_memzone is used to allocate physically contiguous virtual memory.
- * In this application we allocate a single block and divide between variables
- * which require a virtual to physical mapping for use by the QAT driver.
- * Virt2phys is only performed during initialisation and not on the data-path.
- */
-
-#define LCORE_MEMZONE_SIZE (1 << 22)
-
-struct lcore_memzone
-{
- const struct rte_memzone *memzone;
- void *next_free_address;
-};
-
-/*
- * Size the qa software response queue.
- * Note: Head and Tail are 8 bit, therefore, the queue is
- * fixed to 256 entries.
- */
-#define CRYPTO_SOFTWARE_QUEUE_SIZE 256
-
-struct qa_callbackQueue {
- uint8_t head;
- uint8_t tail;
- uint16_t numEntries;
- struct rte_mbuf *qaCallbackRing[CRYPTO_SOFTWARE_QUEUE_SIZE];
-};
-
-struct qa_core_conf {
- CpaCySymDpSessionCtx *encryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
- CpaCySymDpSessionCtx *decryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
- CpaInstanceHandle instanceHandle;
- struct qa_callbackQueue callbackQueue;
- uint64_t qaOutstandingRequests;
- uint64_t numResponseAttempts;
- uint8_t kickFreq;
- void *pPacketIV;
- CpaPhysicalAddr packetIVPhy;
- struct lcore_memzone lcoreMemzone;
-} __rte_cache_aligned;
-
-#define MAX_CORES (RTE_MAX_LCORE)
-
-static struct qa_core_conf qaCoreConf[MAX_CORES];
-
-/*
- *Create maximum possible key size,
- *One for cipher and one for hash
- */
-struct glob_keys {
- uint8_t cipher_key[32];
- uint8_t hash_key[64];
- uint8_t iv[16];
-};
-
-struct glob_keys g_crypto_hash_keys = {
- .cipher_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
- 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
- 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20},
- .hash_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
- 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
- 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20,
- 0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,
- 0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f,0x30,
- 0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,
- 0x39,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f,0x50},
- .iv = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
- 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10}
-};
-
-/*
- * Offsets from the start of the packet.
- *
- */
-#define PACKET_DATA_START_PHYS(p) \
- ((p)->buf_physaddr + (p)->data_off)
-
-/*
- * A fixed offset to where the crypto is to be performed, which is the first
- * byte after the Ethernet(14 bytes) and IPv4 headers(20 bytes)
- */
-#define CRYPTO_START_OFFSET (14+20)
-#define HASH_START_OFFSET (14+20)
-#define CIPHER_BLOCK_DEFAULT_SIZE (16)
-#define HASH_BLOCK_DEFAULT_SIZE (16)
-
-/*
- * Offset to the opdata from the start of the data portion of packet.
- * Assumption: The buffer is physically contiguous.
- * +18 takes this to the next cache line.
- */
-
-#define CRYPTO_OFFSET_TO_OPDATA (ETHER_MAX_LEN+18)
-
-/*
- * Default number of requests to place on the hardware ring before kicking the
- * ring pointers.
- */
-#define CRYPTO_BURST_TX (16)
-
-/*
- * Only call the qa poll function when the number responses in the software
- * queue drops below this number.
- */
-#define CRYPTO_QUEUED_RESP_POLL_THRESHOLD (32)
-
-/*
- * Limit the number of polls per call to get_next_response.
- */
-#define GET_NEXT_RESPONSE_FREQ (32)
-
-/*
- * Max number of responses to pull from the qa in one poll.
- */
-#define CRYPTO_MAX_RESPONSE_QUOTA \
- (CRYPTO_SOFTWARE_QUEUE_SIZE-CRYPTO_QUEUED_RESP_POLL_THRESHOLD-1)
-
-#if (CRYPTO_QUEUED_RESP_POLL_THRESHOLD + CRYPTO_MAX_RESPONSE_QUOTA >= \
- CRYPTO_SOFTWARE_QUEUE_SIZE)
-#error Its possible to overflow the qa response Q with current poll and \
- response quota.
-#endif
-
-static void
-crypto_callback(CpaCySymDpOpData *pOpData,
- __rte_unused CpaStatus status,
- __rte_unused CpaBoolean verifyResult)
-{
- uint32_t lcore_id;
- lcore_id = rte_lcore_id();
- struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
-
- /*
- * Received a completion from the QA hardware.
- * Place the response on the return queue.
- */
- callbackQ->qaCallbackRing[callbackQ->head] = pOpData->pCallbackTag;
- callbackQ->head++;
- callbackQ->numEntries++;
- qaCoreConf[lcore_id].qaOutstandingRequests--;
-}
-
-static void
-qa_crypto_callback(CpaCySymDpOpData *pOpData, CpaStatus status,
- CpaBoolean verifyResult)
-{
- crypto_callback(pOpData, status, verifyResult);
-}
-
-/*
- * Each allocation from a particular memzone lasts for the life-time of
- * the application. No freeing of previous allocations will occur.
- */
-static void *
-alloc_memzone_region(uint32_t length, uint32_t lcore_id)
-{
- char *current_free_addr_ptr = NULL;
- struct lcore_memzone *lcore_memzone = &(qaCoreConf[lcore_id].lcoreMemzone);
-
- current_free_addr_ptr = lcore_memzone->next_free_address;
-
- if (current_free_addr_ptr + length >=
- (char *)lcore_memzone->memzone->addr + lcore_memzone->memzone->len) {
- printf("Crypto: No memory available in memzone\n");
- return NULL;
- }
- lcore_memzone->next_free_address = current_free_addr_ptr + length;
-
- return (void *)current_free_addr_ptr;
-}
-
-/*
- * Virtual to Physical Address translation is only executed during initialization
- * and not on the data-path.
- */
-static CpaPhysicalAddr
-qa_v2p(void *ptr)
-{
- const struct rte_memzone *memzone = NULL;
- uint32_t lcore_id = 0;
- RTE_LCORE_FOREACH(lcore_id) {
- memzone = qaCoreConf[lcore_id].lcoreMemzone.memzone;
-
- if ((char*) ptr >= (char *) memzone->addr &&
- (char*) ptr < ((char*) memzone->addr + memzone->len)) {
- return (CpaPhysicalAddr)
- (memzone->phys_addr + ((char *) ptr - (char*) memzone->addr));
- }
- }
- printf("Crypto: Corresponding physical address not found in memzone\n");
- return (CpaPhysicalAddr) 0;
-}
-
-static CpaStatus
-getCoreAffinity(Cpa32U *coreAffinity, const CpaInstanceHandle instanceHandle)
-{
- CpaInstanceInfo2 info;
- Cpa16U i = 0;
- CpaStatus status = CPA_STATUS_SUCCESS;
-
- memset(&info, 0, sizeof(CpaInstanceInfo2));
-
- status = cpaCyInstanceGetInfo2(instanceHandle, &info);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Error getting instance info\n");
- return CPA_STATUS_FAIL;
- }
- for (i = 0; i < MAX_CORES; i++) {
- if (CPA_BITMAP_BIT_TEST(info.coreAffinity, i)) {
- *coreAffinity = i;
- return CPA_STATUS_SUCCESS;
- }
- }
- return CPA_STATUS_FAIL;
-}
-
-static CpaStatus
-get_crypto_instance_on_core(CpaInstanceHandle *pInstanceHandle,
- uint32_t lcore_id)
-{
- Cpa16U numInstances = 0, i = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- CpaInstanceHandle *pLocalInstanceHandles = NULL;
- Cpa32U coreAffinity = 0;
-
- status = cpaCyGetNumInstances(&numInstances);
- if (CPA_STATUS_SUCCESS != status || numInstances == 0) {
- return CPA_STATUS_FAIL;
- }
-
- pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
- sizeof(CpaInstanceHandle) * numInstances, RTE_CACHE_LINE_SIZE);
-
- if (NULL == pLocalInstanceHandles) {
- return CPA_STATUS_FAIL;
- }
- status = cpaCyGetInstances(numInstances, pLocalInstanceHandles);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCyGetInstances failed with status: %"PRId32"\n", status);
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
- }
-
- for (i = 0; i < numInstances; i++) {
- status = getCoreAffinity(&coreAffinity, pLocalInstanceHandles[i]);
- if (CPA_STATUS_SUCCESS != status) {
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
- }
- if (coreAffinity == lcore_id) {
- printf("Crypto: instance found on core %d\n", i);
- *pInstanceHandle = pLocalInstanceHandles[i];
- return CPA_STATUS_SUCCESS;
- }
- }
- /* core affinity not found */
- rte_free((void *) pLocalInstanceHandles);
- return CPA_STATUS_FAIL;
-}
-
-static CpaStatus
-initCySymSession(const int pkt_cipher_alg,
- const int pkt_hash_alg, const CpaCySymHashMode hashMode,
- const CpaCySymCipherDirection crypto_direction,
- CpaCySymSessionCtx **ppSessionCtx,
- const CpaInstanceHandle cyInstanceHandle,
- const uint32_t lcore_id)
-{
- Cpa32U sessionCtxSizeInBytes = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- CpaBoolean isCrypto = CPA_TRUE, isHmac = CPA_TRUE;
- CpaCySymSessionSetupData sessionSetupData;
-
- memset(&sessionSetupData, 0, sizeof(CpaCySymSessionSetupData));
-
- /* Assumption: key length is set to each algorithm's max length */
- switch (pkt_cipher_alg) {
- case NO_CIPHER:
- isCrypto = CPA_FALSE;
- break;
- case CIPHER_DES:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_DES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_64_IN_BYTES;
- break;
- case CIPHER_DES_CBC:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_DES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_64_IN_BYTES;
- break;
- case CIPHER_DES3:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_3DES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_192_IN_BYTES;
- break;
- case CIPHER_DES3_CBC:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_3DES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_192_IN_BYTES;
- break;
- case CIPHER_AES:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_AES_ECB;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- case CIPHER_AES_CBC_128:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_AES_CBC;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- case CIPHER_KASUMI_F8:
- sessionSetupData.cipherSetupData.cipherAlgorithm =
- CPA_CY_SYM_CIPHER_KASUMI_F8;
- sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
- KEY_SIZE_128_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Cipher specified\n");
- break;
- }
- /* Set the cipher direction */
- if (isCrypto) {
- sessionSetupData.cipherSetupData.cipherDirection = crypto_direction;
- sessionSetupData.cipherSetupData.pCipherKey =
- g_crypto_hash_keys.cipher_key;
- sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER;
- }
-
- /* Setup Hash common fields */
- switch (pkt_hash_alg) {
- case NO_HASH:
- isHmac = CPA_FALSE;
- break;
- case HASH_AES_XCBC:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- AES_XCBC_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_AES_XCBC_96:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- AES_XCBC_96_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_MD5:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- MD5_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA1_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1_96:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA1_96_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA224:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA224_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA256:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA256_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA384:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA384_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_SHA512:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- SHA512_DIGEST_LENGTH_IN_BYTES;
- break;
- case HASH_KASUMI_F9:
- sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_KASUMI_F9;
- sessionSetupData.hashSetupData.digestResultLenInBytes =
- KASUMI_DIGEST_LENGTH_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Hash specified\n");
- break;
- }
- if (isHmac) {
- sessionSetupData.hashSetupData.hashMode = hashMode;
- sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH;
- /* If using authenticated hash setup key lengths */
- if (CPA_CY_SYM_HASH_MODE_AUTH == hashMode) {
- /* Use a common max length key */
- sessionSetupData.hashSetupData.authModeSetupData.authKey =
- g_crypto_hash_keys.hash_key;
- switch (pkt_hash_alg) {
- case HASH_AES_XCBC:
- case HASH_AES_XCBC_96:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_MD5:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA1_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA1:
- case HASH_SHA1_96:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA1_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA224:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA224_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA256:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA256_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA384:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA384_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_SHA512:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- SHA512_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- case HASH_KASUMI_F9:
- sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
- KASUMI_AUTH_KEY_LENGTH_IN_BYTES;
- break;
- default:
- printf("Crypto: Undefined Hash specified\n");
- return CPA_STATUS_FAIL;
- }
- }
- }
-
- /* Only high priority supported */
- sessionSetupData.sessionPriority = CPA_CY_PRIORITY_HIGH;
-
- /* If chaining algorithms */
- if (isCrypto && isHmac) {
- sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING;
- /* @assumption Alg Chain order is cipher then hash for encrypt
- * and hash then cipher then has for decrypt*/
- if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == crypto_direction) {
- sessionSetupData.algChainOrder =
- CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
- } else {
- sessionSetupData.algChainOrder =
- CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
- }
- }
- if (!isCrypto && !isHmac) {
- *ppSessionCtx = NULL;
- return CPA_STATUS_SUCCESS;
- }
-
- /* Set flags for digest operations */
- sessionSetupData.digestIsAppended = CPA_FALSE;
- sessionSetupData.verifyDigest = CPA_TRUE;
-
- /* Get the session context size based on the crypto and/or hash operations*/
- status = cpaCySymDpSessionCtxGetSize(cyInstanceHandle, &sessionSetupData,
- &sessionCtxSizeInBytes);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpSessionCtxGetSize error, status: %"PRId32"\n",
- status);
- return CPA_STATUS_FAIL;
- }
-
- *ppSessionCtx = alloc_memzone_region(sessionCtxSizeInBytes, lcore_id);
- if (NULL == *ppSessionCtx) {
- printf("Crypto: Failed to allocate memory for Session Context\n");
- return CPA_STATUS_FAIL;
- }
-
- status = cpaCySymDpInitSession(cyInstanceHandle, &sessionSetupData,
- *ppSessionCtx);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpInitSession failed with status %"PRId32"\n", status);
- return CPA_STATUS_FAIL;
- }
- return CPA_STATUS_SUCCESS;
-}
-
-static CpaStatus
-initSessionDataTables(struct qa_core_conf *qaCoreConf,uint32_t lcore_id)
-{
- Cpa32U i = 0, j = 0;
- CpaStatus status = CPA_STATUS_FAIL;
- for (i = 0; i < NUM_CRYPTO; i++) {
- for (j = 0; j < NUM_HMAC; j++) {
- if (((i == CIPHER_KASUMI_F8) && (j != NO_HASH) && (j != HASH_KASUMI_F9)) ||
- ((i != NO_CIPHER) && (i != CIPHER_KASUMI_F8) && (j == HASH_KASUMI_F9)))
- continue;
- status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
- CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT,
- &qaCoreConf->encryptSessionHandleTbl[i][j],
- qaCoreConf->instanceHandle,
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to initialize Encrypt sessions\n");
- return CPA_STATUS_FAIL;
- }
- status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
- CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT,
- &qaCoreConf->decryptSessionHandleTbl[i][j],
- qaCoreConf->instanceHandle,
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to initialize Decrypt sessions\n");
- return CPA_STATUS_FAIL;
- }
- }
- }
- return CPA_STATUS_SUCCESS;
-}
-
-int
-crypto_init(void)
-{
- if (CPA_STATUS_SUCCESS != icp_sal_userStartMultiProcess("SSL",CPA_FALSE)) {
- printf("Crypto: Could not start sal for user space\n");
- return CPA_STATUS_FAIL;
- }
- printf("Crypto: icp_sal_userStartMultiProcess(\"SSL\",CPA_FALSE)\n");
- return 0;
-}
-
-/*
- * Per core initialisation
- */
-int
-per_core_crypto_init(uint32_t lcore_id)
-{
- CpaStatus status = CPA_STATUS_FAIL;
- char memzone_name[RTE_MEMZONE_NAMESIZE];
-
- int socketID = rte_lcore_to_socket_id(lcore_id);
-
- /* Allocate software ring for response messages. */
-
- qaCoreConf[lcore_id].callbackQueue.head = 0;
- qaCoreConf[lcore_id].callbackQueue.tail = 0;
- qaCoreConf[lcore_id].callbackQueue.numEntries = 0;
- qaCoreConf[lcore_id].kickFreq = 0;
- qaCoreConf[lcore_id].qaOutstandingRequests = 0;
- qaCoreConf[lcore_id].numResponseAttempts = 0;
-
- /* Initialise and reserve lcore memzone for virt2phys translation */
- snprintf(memzone_name,
- RTE_MEMZONE_NAMESIZE,
- "lcore_%u",
- lcore_id);
-
- qaCoreConf[lcore_id].lcoreMemzone.memzone = rte_memzone_reserve(
- memzone_name,
- LCORE_MEMZONE_SIZE,
- socketID,
- 0);
- if (NULL == qaCoreConf[lcore_id].lcoreMemzone.memzone) {
- printf("Crypto: Error allocating memzone on lcore %u\n",lcore_id);
- return -1;
- }
- qaCoreConf[lcore_id].lcoreMemzone.next_free_address =
- qaCoreConf[lcore_id].lcoreMemzone.memzone->addr;
-
- qaCoreConf[lcore_id].pPacketIV = alloc_memzone_region(IV_LENGTH_16_BYTES,
- lcore_id);
-
- if (NULL == qaCoreConf[lcore_id].pPacketIV ) {
- printf("Crypto: Failed to allocate memory for Initialization Vector\n");
- return -1;
- }
-
- memcpy(qaCoreConf[lcore_id].pPacketIV, &g_crypto_hash_keys.iv,
- IV_LENGTH_16_BYTES);
-
- qaCoreConf[lcore_id].packetIVPhy = qa_v2p(qaCoreConf[lcore_id].pPacketIV);
- if (0 == qaCoreConf[lcore_id].packetIVPhy) {
- printf("Crypto: Invalid physical address for Initialization Vector\n");
- return -1;
- }
-
- /*
- * Obtain the instance handle that is mapped to the current lcore.
- * This can fail if an instance is not mapped to a bank which has been
- * affinitized to the current lcore.
- */
- status = get_crypto_instance_on_core(&(qaCoreConf[lcore_id].instanceHandle),
- lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: get_crypto_instance_on_core failed with status: %"PRId32"\n",
- status);
- return -1;
- }
-
- status = cpaCySymDpRegCbFunc(qaCoreConf[lcore_id].instanceHandle,
- (CpaCySymDpCbFunc) qa_crypto_callback);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySymDpRegCbFunc failed with status: %"PRId32"\n", status);
- return -1;
- }
-
- /*
- * Set the address translation callback for virtual to physcial address
- * mapping. This will be called by the QAT driver during initialisation only.
- */
- status = cpaCySetAddressTranslation(qaCoreConf[lcore_id].instanceHandle,
- (CpaVirtualToPhysical) qa_v2p);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: cpaCySetAddressTranslation failed with status: %"PRId32"\n",
- status);
- return -1;
- }
-
- status = initSessionDataTables(&qaCoreConf[lcore_id],lcore_id);
- if (CPA_STATUS_SUCCESS != status) {
- printf("Crypto: Failed to allocate all session tables.");
- return -1;
- }
- return 0;
-}
-
-static CpaStatus
-enqueueOp(CpaCySymDpOpData *opData, uint32_t lcore_id)
-{
-
- CpaStatus status;
-
- /*
- * Assumption is there is no requirement to do load balancing between
- * acceleration units - that is one acceleration unit is tied to a core.
- */
- opData->instanceHandle = qaCoreConf[lcore_id].instanceHandle;
-
- if ((++qaCoreConf[lcore_id].kickFreq) % CRYPTO_BURST_TX == 0) {
- status = cpaCySymDpEnqueueOp(opData, CPA_TRUE);
- } else {
- status = cpaCySymDpEnqueueOp(opData, CPA_FALSE);
- }
-
- qaCoreConf[lcore_id].qaOutstandingRequests++;
-
- return status;
-}
-
-void
-crypto_flush_tx_queue(uint32_t lcore_id)
-{
-
- cpaCySymDpPerformOpNow(qaCoreConf[lcore_id].instanceHandle);
-}
-
-enum crypto_result
-crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
-{
- CpaCySymDpOpData *opData =
- rte_pktmbuf_mtod_offset(rte_buff, CpaCySymDpOpData *,
- CRYPTO_OFFSET_TO_OPDATA);
- uint32_t lcore_id;
-
- if (unlikely(c >= NUM_CRYPTO || h >= NUM_HMAC))
- return CRYPTO_RESULT_FAIL;
-
- lcore_id = rte_lcore_id();
-
- memset(opData, 0, sizeof(CpaCySymDpOpData));
-
- opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
- opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
- opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
- + CRYPTO_OFFSET_TO_OPDATA;
- opData->pCallbackTag = rte_buff;
-
- /* if no crypto or hash operations are specified return fail */
- if (NO_CIPHER == c && NO_HASH == h)
- return CRYPTO_RESULT_FAIL;
-
- if (NO_CIPHER != c) {
- opData->pIv = qaCoreConf[lcore_id].pPacketIV;
- opData->iv = qaCoreConf[lcore_id].packetIVPhy;
-
- if (CIPHER_AES_CBC_128 == c)
- opData->ivLenInBytes = IV_LENGTH_16_BYTES;
- else
- opData->ivLenInBytes = IV_LENGTH_8_BYTES;
-
- opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->data_len
- - CRYPTO_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of
- * block size.
- */
- opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
- % CIPHER_BLOCK_DEFAULT_SIZE;
- }
-
- if (NO_HASH != h) {
-
- opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->data_len
- - HASH_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
- % HASH_BLOCK_DEFAULT_SIZE;
-
- /*
- * Assumption: Ok ignore the passed digest pointer and place HMAC at end
- * of packet.
- */
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
- }
-
- if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
- /*
- * Failed to place a packet on the hardware queue.
- * Most likely because the QA hardware is busy.
- */
- return CRYPTO_RESULT_FAIL;
- }
- return CRYPTO_RESULT_IN_PROGRESS;
-}
-
-enum crypto_result
-crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
-{
-
- CpaCySymDpOpData *opData = rte_pktmbuf_mtod_offset(rte_buff, void *,
- CRYPTO_OFFSET_TO_OPDATA);
- uint32_t lcore_id;
-
- if (unlikely(c >= NUM_CRYPTO || h >= NUM_HMAC))
- return CRYPTO_RESULT_FAIL;
-
- lcore_id = rte_lcore_id();
-
- memset(opData, 0, sizeof(CpaCySymDpOpData));
-
- opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
- opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
- opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
- + CRYPTO_OFFSET_TO_OPDATA;
- opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
- opData->pCallbackTag = rte_buff;
-
- /* if no crypto or hmac operations are specified return fail */
- if (NO_CIPHER == c && NO_HASH == h)
- return CRYPTO_RESULT_FAIL;
-
- if (NO_CIPHER != c) {
- opData->pIv = qaCoreConf[lcore_id].pPacketIV;
- opData->iv = qaCoreConf[lcore_id].packetIVPhy;
-
- if (CIPHER_AES_CBC_128 == c)
- opData->ivLenInBytes = IV_LENGTH_16_BYTES;
- else
- opData->ivLenInBytes = IV_LENGTH_8_BYTES;
-
- opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
- opData->messageLenToCipherInBytes = rte_buff->data_len
- - CRYPTO_START_OFFSET;
-
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
- % CIPHER_BLOCK_DEFAULT_SIZE;
- }
- if (NO_HASH != h) {
- opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
- opData->messageLenToHashInBytes = rte_buff->data_len
- - HASH_START_OFFSET;
- /*
- * Work around for padding, message length has to be a multiple of block
- * size.
- */
- opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
- % HASH_BLOCK_DEFAULT_SIZE;
- opData->digestResult = rte_buff->buf_physaddr + rte_buff->data_len;
- }
-
- if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
- /*
- * Failed to place a packet on the hardware queue.
- * Most likely because the QA hardware is busy.
- */
- return CRYPTO_RESULT_FAIL;
- }
- return CRYPTO_RESULT_IN_PROGRESS;
-}
-
-void *
-crypto_get_next_response(void)
-{
- uint32_t lcore_id;
- lcore_id = rte_lcore_id();
- struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
- void *entry = NULL;
-
- if (callbackQ->numEntries) {
- entry = callbackQ->qaCallbackRing[callbackQ->tail];
- callbackQ->tail++;
- callbackQ->numEntries--;
- }
-
- /* If there are no outstanding requests no need to poll, return entry */
- if (qaCoreConf[lcore_id].qaOutstandingRequests == 0)
- return entry;
-
- if (callbackQ->numEntries < CRYPTO_QUEUED_RESP_POLL_THRESHOLD
- && qaCoreConf[lcore_id].numResponseAttempts++
- % GET_NEXT_RESPONSE_FREQ == 0) {
- /*
- * Only poll the hardware when there is less than
- * CRYPTO_QUEUED_RESP_POLL_THRESHOLD elements in the software queue
- */
- icp_sal_CyPollDpInstance(qaCoreConf[lcore_id].instanceHandle,
- CRYPTO_MAX_RESPONSE_QUOTA);
- }
- return entry;
-}
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
deleted file mode 100644
index aa9b1d5c..00000000
--- a/examples/dpdk_qat/main.c
+++ /dev/null
@@ -1,821 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <string.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <getopt.h>
-
-#include <rte_common.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_launch.h>
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_random.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_ip.h>
-#include <rte_string_fns.h>
-
-#include "crypto.h"
-
-#define NB_MBUF (32 * 1024)
-
-#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
-#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
-#define TSC_COUNT_LIMIT 1000
-
-#define ACTION_ENCRYPT 1
-#define ACTION_DECRYPT 2
-
-/*
- * Configurable number of RX/TX ring descriptors
- */
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
-static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
-static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
-
-/* ethernet addresses of ports */
-static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
-
-/* mask of enabled ports */
-static unsigned enabled_port_mask = 0;
-static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
-
-/* list of enabled ports */
-static uint32_t dst_ports[RTE_MAX_ETHPORTS];
-
-struct mbuf_table {
- uint16_t len;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
-struct lcore_rx_queue {
- uint8_t port_id;
- uint8_t queue_id;
-};
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-
-#define MAX_LCORE_PARAMS 1024
-struct lcore_params {
- uint8_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
-};
-
-static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
-static struct lcore_params lcore_params_array_default[] = {
- {0, 0, 2},
- {0, 1, 2},
- {0, 2, 2},
- {1, 0, 2},
- {1, 1, 2},
- {1, 2, 2},
- {2, 0, 2},
- {3, 0, 3},
- {3, 1, 3},
-};
-
-static struct lcore_params * lcore_params = lcore_params_array_default;
-static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
- sizeof(lcore_params_array_default[0]);
-
-static struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
- },
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
-};
-
-static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
-
-struct lcore_conf {
- uint64_t tsc;
- uint64_t tsc_count;
- uint32_t tx_mask;
- uint16_t n_rx_queue;
- uint16_t rx_queue_list_pos;
- struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
- uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
- struct mbuf_table rx_mbuf;
- uint32_t rx_mbuf_pos;
- uint32_t rx_curr_queue;
- struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
-} __rte_cache_aligned;
-
-static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
-
-static inline struct rte_mbuf *
-nic_rx_get_packet(struct lcore_conf *qconf)
-{
- struct rte_mbuf *pkt;
-
- if (unlikely(qconf->n_rx_queue == 0))
- return NULL;
-
- /* Look for the next queue with packets; return if none */
- if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
- uint32_t i;
-
- qconf->rx_mbuf_pos = 0;
- for (i = 0; i < qconf->n_rx_queue; i++) {
- qconf->rx_mbuf.len = rte_eth_rx_burst(
- qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
- qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
- qconf->rx_mbuf.m_table, MAX_PKT_BURST);
-
- qconf->rx_curr_queue++;
- if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
- qconf->rx_curr_queue = 0;
- if (likely(qconf->rx_mbuf.len > 0))
- break;
- }
- if (unlikely(i == qconf->n_rx_queue))
- return NULL;
- }
-
- /* Get the next packet from the current queue; if last packet, go to next queue */
- pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
- qconf->rx_mbuf_pos++;
-
- return pkt;
-}
-
-static inline void
-nic_tx_flush_queues(struct lcore_conf *qconf)
-{
- uint8_t portid;
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- struct rte_mbuf **m_table = NULL;
- uint16_t queueid, len;
- uint32_t n, i;
-
- if (likely((qconf->tx_mask & (1 << portid)) == 0))
- continue;
-
- len = qconf->tx_mbufs[portid].len;
- if (likely(len == 0))
- continue;
-
- queueid = qconf->tx_queue_id[portid];
- m_table = qconf->tx_mbufs[portid].m_table;
-
- n = rte_eth_tx_burst(portid, queueid, m_table, len);
- for (i = n; i < len; i++){
- rte_pktmbuf_free(m_table[i]);
- }
-
- qconf->tx_mbufs[portid].len = 0;
- }
-
- qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
-}
-
-static inline void
-nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
-{
- struct lcore_conf *qconf;
- uint32_t lcoreid;
- uint16_t len;
-
- if (unlikely(pkt == NULL)) {
- return;
- }
-
- lcoreid = rte_lcore_id();
- qconf = &lcore_conf[lcoreid];
-
- len = qconf->tx_mbufs[port].len;
- qconf->tx_mbufs[port].m_table[len] = pkt;
- len++;
-
- /* enough pkts to be sent */
- if (unlikely(len == MAX_PKT_BURST)) {
- uint32_t n, i;
- uint16_t queueid;
-
- queueid = qconf->tx_queue_id[port];
- n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
- for (i = n; i < MAX_PKT_BURST; i++){
- rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
- }
-
- qconf->tx_mask &= ~(1 << port);
- len = 0;
- }
-
- qconf->tx_mbufs[port].len = len;
-}
-
-/* main processing loop */
-static __attribute__((noreturn)) int
-main_loop(__attribute__((unused)) void *dummy)
-{
- uint32_t lcoreid;
- struct lcore_conf *qconf;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
-
- lcoreid = rte_lcore_id();
- qconf = &lcore_conf[lcoreid];
-
- printf("Thread %u starting...\n", lcoreid);
-
- for (;;) {
- struct rte_mbuf *pkt;
- uint32_t pkt_from_nic_rx = 0;
- uint8_t port;
-
- /* Flush TX queues */
- qconf->tsc_count++;
- if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
- uint64_t tsc, diff_tsc;
-
- tsc = rte_rdtsc();
-
- diff_tsc = tsc - qconf->tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
- nic_tx_flush_queues(qconf);
- crypto_flush_tx_queue(lcoreid);
- qconf->tsc = tsc;
- }
-
- qconf->tsc_count = 0;
- }
-
- /*
- * Check the Intel QuickAssist queues first
- *
- ***/
- pkt = (struct rte_mbuf *) crypto_get_next_response();
- if (pkt == NULL) {
- pkt = nic_rx_get_packet(qconf);
- pkt_from_nic_rx = 1;
- }
- if (pkt == NULL)
- continue;
- /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
- if (pkt_from_nic_rx) {
- struct ipv4_hdr *ip = rte_pktmbuf_mtod_offset(pkt,
- struct ipv4_hdr *,
- sizeof(struct ether_hdr));
- if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
- if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
- (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
- (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
- rte_pktmbuf_free(pkt);
- continue;
- }
-
- if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
- if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
- (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
- (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
- rte_pktmbuf_free(pkt);
- continue;
- }
- }
-
- port = dst_ports[pkt->port];
-
- /* Transmit the packet */
- nic_tx_send_packet(pkt, (uint8_t)port);
- }
-}
-
-static inline unsigned
-get_port_max_rx_queues(uint8_t port_id)
-{
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- return dev_info.max_rx_queues;
-}
-
-static inline unsigned
-get_port_max_tx_queues(uint8_t port_id)
-{
- struct rte_eth_dev_info dev_info;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- return dev_info.max_tx_queues;
-}
-
-static int
-check_lcore_params(void)
-{
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
- printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
- return -1;
- }
- if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
- printf("error: lcore %hhu is not enabled in lcore mask\n",
- lcore_params[i].lcore_id);
- return -1;
- }
- }
- return 0;
-}
-
-static int
-check_port_config(const unsigned nb_ports)
-{
- unsigned portid;
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- portid = lcore_params[i].port_id;
- if ((enabled_port_mask & (1 << portid)) == 0) {
- printf("port %u is not enabled in port mask\n", portid);
- return -1;
- }
- if (portid >= nb_ports) {
- printf("port %u is not present on the board\n", portid);
- return -1;
- }
- }
- return 0;
-}
-
-static uint8_t
-get_port_n_rx_queues(const uint8_t port)
-{
- int queue = -1;
- uint16_t i;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
- queue = lcore_params[i].queue_id;
- }
- return (uint8_t)(++queue);
-}
-
-static int
-init_lcore_rx_queues(void)
-{
- uint16_t i, nb_rx_queue;
- uint8_t lcore;
-
- for (i = 0; i < nb_lcore_params; ++i) {
- lcore = lcore_params[i].lcore_id;
- nb_rx_queue = lcore_conf[lcore].n_rx_queue;
- if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
- printf("error: too many queues (%u) for lcore: %u\n",
- (unsigned)nb_rx_queue + 1, (unsigned)lcore);
- return -1;
- }
- lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
- lcore_params[i].port_id;
- lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
- lcore_params[i].queue_id;
- lcore_conf[lcore].n_rx_queue++;
- }
- return 0;
-}
-
-/* display usage */
-static void
-print_usage(const char *prgname)
-{
- printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
- " [--config '(port,queue,lcore)[,(port,queue,lcore)]'\n"
- " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " --no-promisc: disable promiscuous mode (default is ON)\n"
- " --config '(port,queue,lcore)': rx queues configuration\n",
- prgname);
-}
-
-static unsigned
-parse_portmask(const char *portmask)
-{
- char *end = NULL;
- unsigned pm;
-
- /* parse hexadecimal string */
- pm = strtoul(portmask, &end, 16);
- if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
- return 0;
-
- return pm;
-}
-
-static int
-parse_config(const char *q_arg)
-{
- char s[256];
- const char *p, *p_end = q_arg;
- char *end;
- enum fieldnames {
- FLD_PORT = 0,
- FLD_QUEUE,
- FLD_LCORE,
- _NUM_FLD
- };
- unsigned long int_fld[_NUM_FLD];
- char *str_fld[_NUM_FLD];
- int i;
- unsigned size;
-
- nb_lcore_params = 0;
-
- while ((p = strchr(p_end,'(')) != NULL) {
- if (nb_lcore_params >= MAX_LCORE_PARAMS) {
- printf("exceeded max number of lcore params: %hu\n",
- nb_lcore_params);
- return -1;
- }
- ++p;
- if((p_end = strchr(p,')')) == NULL)
- return -1;
-
- size = p_end - p;
- if(size >= sizeof(s))
- return -1;
-
- snprintf(s, sizeof(s), "%.*s", size, p);
- if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
- return -1;
- for (i = 0; i < _NUM_FLD; i++) {
- errno = 0;
- int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
- return -1;
- }
- lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
- lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
- lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
- ++nb_lcore_params;
- }
- lcore_params = lcore_params_array;
- return 0;
-}
-
-/* Parse the argument given in the command line of the application */
-static int
-parse_args(int argc, char **argv)
-{
- int opt, ret;
- char **argvopt;
- int option_index;
- char *prgname = argv[0];
- static struct option lgopts[] = {
- {"config", 1, 0, 0},
- {"no-promisc", 0, 0, 0},
- {NULL, 0, 0, 0}
- };
-
- argvopt = argv;
-
- while ((opt = getopt_long(argc, argvopt, "p:",
- lgopts, &option_index)) != EOF) {
-
- switch (opt) {
- /* portmask */
- case 'p':
- enabled_port_mask = parse_portmask(optarg);
- if (enabled_port_mask == 0) {
- printf("invalid portmask\n");
- print_usage(prgname);
- return -1;
- }
- break;
-
- /* long options */
- case 0:
- if (strcmp(lgopts[option_index].name, "config") == 0) {
- ret = parse_config(optarg);
- if (ret) {
- printf("invalid config\n");
- print_usage(prgname);
- return -1;
- }
- }
- if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
- printf("Promiscuous mode disabled\n");
- promiscuous_on = 0;
- }
- break;
- default:
- print_usage(prgname);
- return -1;
- }
- }
-
- if (enabled_port_mask == 0) {
- printf("portmask not specified\n");
- print_usage(prgname);
- return -1;
- }
-
- if (optind >= 0)
- argv[optind-1] = prgname;
-
- ret = optind-1;
- optind = 0; /* reset getopt lib */
- return ret;
-}
-
-static void
-print_ethaddr(const char *name, const struct ether_addr *eth_addr)
-{
- char buf[ETHER_ADDR_FMT_SIZE];
- ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
- printf("%s%s", name, buf);
-}
-
-static int
-init_mem(void)
-{
- int socketid;
- unsigned lcoreid;
- char s[64];
-
- RTE_LCORE_FOREACH(lcoreid) {
- socketid = rte_lcore_to_socket_id(lcoreid);
- if (socketid >= RTE_MAX_NUMA_NODES) {
- printf("Socket %d of lcore %u is out of range %d\n",
- socketid, lcoreid, RTE_MAX_NUMA_NODES);
- return -1;
- }
- if (pktmbuf_pool[socketid] == NULL) {
- snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
- pktmbuf_pool[socketid] =
- rte_pktmbuf_pool_create(s, NB_MBUF, 32, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE, socketid);
- if (pktmbuf_pool[socketid] == NULL) {
- printf("Cannot init mbuf pool on socket %d\n", socketid);
- return -1;
- }
- printf("Allocated mbuf pool on socket %d\n", socketid);
- }
- }
- return 0;
-}
-
-int
-main(int argc, char **argv)
-{
- struct lcore_conf *qconf;
- struct rte_eth_link link;
- int ret;
- unsigned nb_ports;
- uint16_t queueid;
- unsigned lcoreid;
- uint32_t nb_tx_queue;
- uint8_t portid, nb_rx_queue, queue, socketid, last_port;
- unsigned nb_ports_in_mask = 0;
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- return -1;
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
- ret = parse_args(argc, argv);
- if (ret < 0)
- return -1;
-
- if (check_lcore_params() < 0)
- rte_panic("check_lcore_params failed\n");
-
- ret = init_lcore_rx_queues();
- if (ret < 0)
- return -1;
-
- ret = init_mem();
- if (ret < 0)
- return -1;
-
- nb_ports = rte_eth_dev_count();
-
- if (check_port_config(nb_ports) < 0)
- rte_panic("check_port_config failed\n");
-
- /* reset dst_ports */
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- dst_ports[portid] = 0;
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- dst_ports[portid] = last_port;
- dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
- }
- if (nb_ports_in_mask % 2) {
- printf("Notice: odd number of ports in portmask.\n");
- dst_ports[last_port] = last_port;
- }
-
- /* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0) {
- printf("\nSkipping disabled port %d\n", portid);
- continue;
- }
-
- /* init port */
- printf("Initializing port %d ... ", portid );
- fflush(stdout);
-
- nb_rx_queue = get_port_n_rx_queues(portid);
- if (nb_rx_queue > get_port_max_rx_queues(portid))
- rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
- " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
- portid);
- nb_tx_queue = rte_lcore_count();
- if (nb_tx_queue > get_port_max_tx_queues(portid))
- rte_panic("Number of lcores %u exceeds max number of tx queues %u"
- " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
- portid);
- printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
- nb_rx_queue, (unsigned)nb_tx_queue );
- ret = rte_eth_dev_configure(portid, nb_rx_queue,
- (uint16_t)nb_tx_queue, &port_conf);
- if (ret < 0)
- rte_panic("Cannot configure device: err=%d, port=%d\n",
- ret, portid);
-
- rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
- print_ethaddr(" Address:", &ports_eth_addr[portid]);
- printf(", ");
-
- /* init one TX queue per couple (lcore,port) */
- queueid = 0;
- RTE_LCORE_FOREACH(lcoreid) {
- socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
- printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
- fflush(stdout);
- ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socketid,
- NULL);
- if (ret < 0)
- rte_panic("rte_eth_tx_queue_setup: err=%d, "
- "port=%d\n", ret, portid);
-
- qconf = &lcore_conf[lcoreid];
- qconf->tx_queue_id[portid] = queueid;
- queueid++;
- }
- printf("\n");
- }
-
- RTE_LCORE_FOREACH(lcoreid) {
- qconf = &lcore_conf[lcoreid];
- printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
- fflush(stdout);
- /* init RX queues */
- for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
- portid = qconf->rx_queue_list[queue].port_id;
- queueid = qconf->rx_queue_list[queue].queue_id;
- socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
- printf("rxq=%d,%d,%d ", portid, queueid, socketid);
- fflush(stdout);
-
- ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
- socketid,
- NULL,
- pktmbuf_pool[socketid]);
- if (ret < 0)
- rte_panic("rte_eth_rx_queue_setup: err=%d,"
- "port=%d\n", ret, portid);
- }
- }
-
- printf("\n");
-
- /* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
- /* Start device */
- ret = rte_eth_dev_start(portid);
- if (ret < 0)
- rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
- ret, portid);
-
- printf("done: Port %d ", portid);
-
- /* get link status */
- rte_eth_link_get(portid, &link);
- if (link.link_status)
- printf(" Link Up - speed %u Mbps - %s\n",
- (unsigned) link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- else
- printf(" Link Down\n");
- /*
- * If enabled, put device in promiscuous mode.
- * This allows IO forwarding mode to forward packets
- * to itself through 2 cross-connected ports of the
- * target machine.
- */
- if (promiscuous_on)
- rte_eth_promiscuous_enable(portid);
- }
- printf("Crypto: Initializing Crypto...\n");
- if (crypto_init() != 0)
- return -1;
-
- RTE_LCORE_FOREACH(lcoreid) {
- if (per_core_crypto_init(lcoreid) != 0) {
- printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
- return -1;
- }
- }
- printf("Crypto: Initialization complete\n");
- /* launch per-lcore init on every lcore */
- rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
- RTE_LCORE_FOREACH_SLAVE(lcoreid) {
- if (rte_eal_wait_lcore(lcoreid) < 0)
- return -1;
- }
-
- return 0;
-}
diff --git a/examples/ethtool/Makefile b/examples/ethtool/Makefile
index 995cd25b..30b42b70 100644
--- a/examples/ethtool/Makefile
+++ b/examples/ethtool/Makefile
@@ -46,4 +46,7 @@ else
DIRS-y += lib ethtool-app
endif
+DEPDIRS-ethtool-app := lib
+DEPDIRS-lib := librte_eal librte_ether
+
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/ethtool/ethtool-app/Makefile b/examples/ethtool/ethtool-app/Makefile
index 09c66ad1..96abf53b 100644
--- a/examples/ethtool/ethtool-app/Makefile
+++ b/examples/ethtool/ethtool-app/Makefile
@@ -50,5 +50,10 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -L$(subst ethtool-app,lib,$(RTE_OUTPUT))/lib
LDLIBS += -lrte_ethtool
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+endif
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c
index 6aeaa061..35269ea2 100644
--- a/examples/ethtool/ethtool-app/ethapp.c
+++ b/examples/ethtool/ethtool-app/ethapp.c
@@ -185,6 +185,8 @@ pcmd_drvinfo_callback(__rte_unused void *ptr_params,
printf("Port %i driver: %s (ver: %s)\n",
id_port, info.driver, info.version
);
+ printf("firmware-version: %s\n", info.fw_version);
+ printf("bus-info: %s\n", info.bus_info);
}
}
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 2c655d83..6d50d463 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -172,7 +172,6 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
"%s:%i: rte_eth_dev_start failed",
__FILE__, __LINE__
);
- rte_eth_promiscuous_enable(idx_port);
rte_eth_macaddr_get(idx_port, &ptr_port->mac_addr);
rte_spinlock_init(&ptr_port->lock);
}
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index 5b4991e2..266babad 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -54,8 +54,10 @@ SRCS-y := rte_ethtool.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-# internal dependencies
-DEPDIRS-y += lib/librte_eal
-DEPDIRS-y += lib/librte_ether
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+endif
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 6f0ce848..7e465206 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,9 @@
#include <rte_version.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
+#ifdef RTE_LIBRTE_IXGBE_PMD
+#include <rte_pmd_ixgbe.h>
+#endif
#include "rte_ethtool.h"
#define PKTPOOL_SIZE 512
@@ -48,12 +51,21 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
struct rte_eth_dev_info dev_info;
struct rte_dev_reg_info reg_info;
int n;
+ int ret;
if (drvinfo == NULL)
return -EINVAL;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ ret = rte_eth_dev_fw_version_get(port_id, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
+ if (ret < 0)
+ printf("firmware version get error: (%s)\n", strerror(-ret));
+ else if (ret > 0)
+ printf("Insufficient fw version buffer size, "
+ "the minimun size should be %d\n", ret);
+
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
@@ -358,9 +370,12 @@ rte_ethtool_net_set_rx_mode(uint8_t port_id)
num_vfs = dev_info.max_vfs;
/* Set VF vf_rx_mode, VF unsupport status is discard */
- for (vf = 0; vf < num_vfs; vf++)
- rte_eth_dev_set_vf_rxmode(port_id, vf,
+ for (vf = 0; vf < num_vfs; vf++) {
+#ifdef RTE_LIBRTE_IXGBE_PMD
+ rte_pmd_ixgbe_set_vf_rxmode(port_id, vf,
ETH_VMDQ_ACCEPT_UNTAG, 0);
+#endif
+ }
/* Enable Rx vlan filter, VF unspport status is discard */
rte_eth_dev_set_vlan_offload(port_id, ETH_VLAN_FILTER_MASK);
diff --git a/examples/exception_path/Makefile b/examples/exception_path/Makefile
index 959914a2..4b6e0717 100644
--- a/examples/exception_path/Makefile
+++ b/examples/exception_path/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 73d50b69..89bf1cc0 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -114,7 +114,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index e1e32c66..71c1d12f 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -168,7 +168,7 @@ struct lcore_queue_conf {
} __rte_cache_aligned;
struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
@@ -176,7 +176,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support enabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -265,8 +265,8 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
uint8_t queueid, uint8_t port_in)
{
struct rx_queue *rxq;
- uint32_t i, len, next_hop_ipv4;
- uint8_t next_hop_ipv6, port_out, ipv6;
+ uint32_t i, len, next_hop;
+ uint8_t port_out, ipv6;
int32_t len2;
ipv6 = 0;
@@ -290,9 +290,9 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
- if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop_ipv4) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv4) != 0) {
- port_out = next_hop_ipv4;
+ if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ port_out = next_hop;
/* Build transmission burst for new port */
len = qconf->tx_mbufs[port_out].len;
@@ -326,9 +326,10 @@ l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
ip_hdr = rte_pktmbuf_mtod(m, struct ipv6_hdr *);
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop_ipv6) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv6) != 0) {
- port_out = next_hop_ipv6;
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ port_out = next_hop;
/* Build transmission burst for new port */
len = qconf->tx_mbufs[port_out].len;
@@ -586,7 +587,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -653,6 +654,74 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
+/* Check L3 packet type detection capablity of the NIC port */
+static int
+check_ptype(int portid)
+{
+ int i, ret;
+ int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ptype_l3_ipv4 = 1;
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ptype_l3_ipv6 = 1;
+ }
+
+ if (ptype_l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+ if (ptype_l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+
+ if (ptype_l3_ipv4 && ptype_l3_ipv6)
+ return 1;
+
+ return 0;
+
+}
+
+/* Parse packet type of a packet by SW */
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+/* callback function to detect packet type for a queue of a port */
+static uint16_t
+cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ uint16_t i;
+
+ for (i = 0; i < nb_pkts; ++i)
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
static int
init_routing_table(void)
{
@@ -846,6 +915,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -911,7 +985,6 @@ main(int argc, char **argv)
printf("txq=%u,%d ", lcore_id, queueid);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
@@ -944,6 +1017,12 @@ main(int argc, char **argv)
ret, portid);
rte_eth_promiscuous_enable(portid);
+
+ if (check_ptype(portid) == 0) {
+ rte_eth_add_rx_callback(portid, 0, cb_parse_ptype, NULL);
+ printf("Add Rx callback funciton to detect L3 packet type by SW :"
+ " port = %d\n", portid);
+ }
}
if (init_routing_table() < 0)
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 58271173..dc7e0ddd 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -36,8 +36,6 @@ endif
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
-DIRS-(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline
-
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
diff --git a/examples/ip_pipeline/config/diagram-generator.py b/examples/ip_pipeline/config/diagram-generator.py
index 6b7170b0..17488330 100755
--- a/examples/ip_pipeline/config/diagram-generator.py
+++ b/examples/ip_pipeline/config/diagram-generator.py
@@ -36,7 +36,8 @@
# the DPDK ip_pipeline application.
#
# The input configuration file is translated to an output file in DOT syntax,
-# which is then used to create the image file using graphviz (www.graphviz.org).
+# which is then used to create the image file using graphviz
+# (www.graphviz.org).
#
from __future__ import print_function
@@ -94,6 +95,7 @@ DOT_GRAPH_END = \
# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx
# SINKx | SINKx | PIPELINEy | SINKx | SINKx
+
#
# Parse the input configuration file to detect the graph nodes and edges
#
@@ -321,16 +323,17 @@ def process_config_file(cfgfile):
#
print('Creating image file "%s" ...' % imgfile)
if os.system('which dot > /dev/null'):
- print('Error: Unable to locate "dot" executable.' \
- 'Please install the "graphviz" package (www.graphviz.org).')
+ print('Error: Unable to locate "dot" executable.'
+ 'Please install the "graphviz" package (www.graphviz.org).')
return
os.system(dot_cmd)
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description=\
- 'Create diagram for IP pipeline configuration file.')
+ parser = argparse.ArgumentParser(description='Create diagram for IP '
+ 'pipeline configuration '
+ 'file.')
parser.add_argument(
'-f',
diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
index c2050b82..7a4eaa20 100755
--- a/examples/ip_pipeline/config/pipeline-to-core-mapping.py
+++ b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
@@ -39,15 +39,14 @@
#
from __future__ import print_function
-import sys
-import errno
-import os
-import re
+from collections import namedtuple
+import argparse
import array
+import errno
import itertools
+import os
import re
-import argparse
-from collections import namedtuple
+import sys
# default values
enable_stage0_traceout = 1
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 8b372e94..0b761346 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -1,4 +1,4 @@
-/*-
+/*-
* BSD LICENSE
*
* Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
@@ -103,7 +103,7 @@ static const struct app_link_params link_params_default = {
.hw_vlan_strip = 0, /* VLAN strip */
.hw_vlan_extend = 0, /* Extended VLAN */
.jumbo_frame = 0, /* Jumbo frame support */
- .hw_strip_crc = 0, /* CRC strip by HW */
+ .hw_strip_crc = 1, /* CRC strip by HW */
.enable_scatter = 0, /* Scattered packets RX handler */
.max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
@@ -3407,7 +3407,7 @@ app_config_args(struct app_params *app, int argc, char **argv)
app_print_usage(argv[0]);
}
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
/* Check dependencies between args */
if (preproc_params_present && (preproc_present == 0))
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index d46bd365..be148fca 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -69,7 +69,8 @@ static void
app_init_core_map(struct app_params *app)
{
APP_LOG(app, HIGH, "Initializing CPU core map ...");
- app->core_map = cpu_core_map_init(4, 32, 4, 0);
+ app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
+ 4, 0);
if (app->core_map == NULL)
rte_panic("Cannot create CPU core map\n");
@@ -329,16 +330,14 @@ app_init_mempool(struct app_params *app)
struct app_mempool_params *p = &app->mempool_params[i];
APP_LOG(app, HIGH, "Initializing %s ...", p->name);
- app->mempool[i] = rte_mempool_create(
- p->name,
- p->pool_size,
- p->buffer_size,
- p->cache_size,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- p->cpu_socket_id,
- 0);
+ app->mempool[i] = rte_pktmbuf_pool_create(
+ p->name,
+ p->pool_size,
+ p->cache_size,
+ 0, /* priv_size */
+ p->buffer_size -
+ sizeof(struct rte_mbuf), /* mbuf data size */
+ p->cpu_socket_id);
if (app->mempool[i] == NULL)
rte_panic("%s init error\n", p->name);
@@ -718,7 +717,8 @@ app_link_up_internal(struct app_params *app, struct app_link_params *cp)
/* PMD link up */
status = rte_eth_dev_set_link_up(cp->pmd_id);
- if (status < 0)
+ /* Do not panic if PMD does not provide link up functionality */
+ if (status < 0 && status != -ENOTSUP)
rte_panic("%s (%" PRIu32 "): PMD set link up error %"
PRId32 "\n", cp->name, cp->pmd_id, status);
@@ -734,7 +734,8 @@ app_link_down_internal(struct app_params *app, struct app_link_params *cp)
/* PMD link down */
status = rte_eth_dev_set_link_down(cp->pmd_id);
- if (status < 0)
+ /* Do not panic if PMD does not provide link down functionality */
+ if (status < 0 && status != -ENOTSUP)
rte_panic("%s (%" PRIu32 "): PMD set link down error %"
PRId32 "\n", cp->name, cp->pmd_id, status);
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
index b61f3034..2980492b 100644
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -161,7 +161,7 @@ static struct rte_acl_field_def field_format_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
sizeof(struct ipv4_hdr) +
offsetof(struct tcp_hdr, dst_port),
@@ -221,7 +221,7 @@ static struct rte_acl_field_def field_format_vlan_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
SIZEOF_VLAN_HDR +
sizeof(struct ipv4_hdr) +
@@ -282,7 +282,7 @@ static struct rte_acl_field_def field_format_qinq_ipv4[] = {
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = 4,
- .input_index = 4,
+ .input_index = 3,
.offset = sizeof(struct ether_hdr) +
SIZEOF_QINQ_HEADER +
sizeof(struct ipv4_hdr) +
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
index 3aadbf91..3deaff9c 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -494,6 +494,26 @@ app_pipeline_routing_add_route(struct app_params *app,
/* data */
if (data->port_id >= p->n_ports_out)
return -1;
+
+ /* Valid range of VLAN tags 12 bits */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_QINQ)
+ if ((data->l2.qinq.svlan & 0xF000) ||
+ (data->l2.qinq.cvlan & 0xF000))
+ return -1;
+
+ /* Max number of MPLS labels supported */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_MPLS) {
+ uint32_t i;
+
+ if (data->l2.mpls.n_labels >
+ PIPELINE_ROUTING_MPLS_LABELS_MAX)
+ return -1;
+
+ /* Max MPLS label value 20 bits */
+ for (i = 0; i < data->l2.mpls.n_labels; i++)
+ if (data->l2.mpls.labels[i] & 0xFFF00000)
+ return -1;
+ }
}
break;
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 50fe4228..c0f3ced6 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -200,7 +200,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -346,8 +346,8 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
struct rte_ip_frag_death_row *dr;
struct rx_queue *rxq;
void *d_addr_bytes;
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6, dst_port;
+ uint32_t next_hop;
+ uint8_t dst_port;
rxq = &qconf->rx_queue_list[queue];
@@ -390,9 +390,9 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
/* Find destination port */
- if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop_ipv4) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv4) != 0) {
- dst_port = next_hop_ipv4;
+ if (rte_lpm_lookup(rxq->lpm, ip_dst, &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ dst_port = next_hop;
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
@@ -427,9 +427,10 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
}
/* Find destination port */
- if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr, &next_hop_ipv6) == 0 &&
- (enabled_port_mask & 1 << next_hop_ipv6) != 0) {
- dst_port = next_hop_ipv6;
+ if (rte_lpm6_lookup(rxq->lpm6, ip_hdr->dst_addr,
+ &next_hop) == 0 &&
+ (enabled_port_mask & 1 << next_hop) != 0) {
+ dst_port = next_hop;
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv6);
@@ -718,7 +719,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -1062,6 +1063,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -1128,7 +1134,6 @@ main(int argc, char **argv)
printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index ec5a2e62..e77afa0e 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -78,7 +78,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
- RTE_LOG(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
+ RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
payload_len, sa->block_size);
return -EINVAL;
}
@@ -122,6 +122,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->auth_algo) {
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + payload_len;
@@ -354,6 +355,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
switch (sa->auth_algo) {
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
sym_cop->auth.data.offset = ip_hdr_len;
sym_cop->auth.data.length = sizeof(struct esp_hdr) +
sa->iv_len + pad_payload_len;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 5a4c9b71..8cbf6ac4 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -208,7 +208,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -618,7 +618,7 @@ route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
static inline void
route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
{
- int16_t hop[MAX_PKT_BURST * 2];
+ int32_t hop[MAX_PKT_BURST * 2];
uint8_t dst_ip[MAX_PKT_BURST * 2][16];
uint8_t *ip6_dst;
uint16_t i, offset;
@@ -1039,7 +1039,7 @@ parse_args(int32_t argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index f49143b9..edca5f02 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -47,6 +47,7 @@
static inline int
create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
{
+ struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
int32_t ret;
struct cdev_key key = { 0 };
@@ -65,7 +66,7 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
return -1;
}
- RTE_LOG(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
+ RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
"%u qp %u\n", sa->spi,
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
@@ -73,6 +74,18 @@ create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
sa->crypto_session = rte_cryptodev_sym_session_create(
ipsec_ctx->tbl[cdev_id_qp].id, sa->xforms);
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
+ if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
+ ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].qp,
+ sa->crypto_session);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Session cannot be attached to qp %u ",
+ ipsec_ctx->tbl[cdev_id_qp].qp);
+ return -1;
+ }
+ }
sa->cdev_id_qp = cdev_id_qp;
return 0;
@@ -89,7 +102,7 @@ enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
cqp->buf, cqp->len);
if (ret < cqp->len) {
- RTE_LOG(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
cqp->id, cqp->qp,
ret, cqp->len);
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index dbc8c2cb..fe426614 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -90,7 +90,7 @@ struct ip_addr {
} ip;
};
-#define MAX_KEY_SIZE 20
+#define MAX_KEY_SIZE 32
struct ipsec_sa {
uint32_t spi;
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 8c4406cf..39624c49 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -114,6 +114,12 @@ const struct supported_auth_algo auth_algos[] = {
.key_len = 20
},
{
+ .keyword = "sha256-hmac",
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .digest_len = 12,
+ .key_len = 32
+ },
+ {
.keyword = "aes-128-gcm",
.algo = RTE_CRYPTO_AUTH_AES_GCM,
.digest_len = 16,
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 708d76e9..96a4ab6e 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -137,7 +137,7 @@ struct lcore_queue_conf {
} __rte_cache_aligned;
static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
@@ -145,7 +145,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 1, /**< Jumbo Frame Support enabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -575,7 +575,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -725,6 +725,11 @@ main(int argc, char **argv)
qconf = &lcore_queue_conf[rx_lcore_id];
+ /* limit the frame size to the maximum supported by NIC */
+ rte_eth_dev_info_get(portid, &dev_info);
+ port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
+ dev_info.max_rx_pktlen, port_conf.rxmode.max_rx_pkt_len);
+
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
@@ -777,7 +782,6 @@ main(int argc, char **argv)
printf("txq=%u,%hu ", lcore_id, queueid);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
txconf->txq_flags = 0;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 57313d11..0be57d83 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -130,7 +130,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/l2fwd-cat/cat.c b/examples/l2fwd-cat/cat.c
index bad39305..6133bf5b 100644
--- a/examples/l2fwd-cat/cat.c
+++ b/examples/l2fwd-cat/cat.c
@@ -686,7 +686,7 @@ parse_args(int argc, char **argv)
exit:
/* reset getopt lib */
- optind = 0;
+ optind = 1;
/* Restore opterr value */
opterr = oldopterr;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index bc88be5e..94921935 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -135,9 +135,6 @@ struct l2fwd_key {
phys_addr_t phys_addr;
};
-char supported_auth_algo[RTE_CRYPTO_AUTH_LIST_END][MAX_STR_LEN];
-char supported_cipher_algo[RTE_CRYPTO_CIPHER_LIST_END][MAX_STR_LEN];
-
/** l2fwd crypto application command line options */
struct l2fwd_crypto_options {
unsigned portmask;
@@ -170,6 +167,8 @@ struct l2fwd_crypto_options {
uint16_t block_size;
char string_type[MAX_STR_LEN];
+
+ uint64_t cryptodev_mask;
};
/** l2fwd crypto lcore params */
@@ -215,7 +214,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -331,50 +330,6 @@ print_stats(void)
printf("\n====================================================\n");
}
-static void
-fill_supported_algorithm_tables(void)
-{
- unsigned i;
-
- for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++)
- strcpy(supported_auth_algo[i], "NOT_SUPPORTED");
-
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GCM], "AES_GCM");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_GMAC], "AES_GMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5_HMAC], "MD5_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_MD5], "MD5");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_NULL], "NULL");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_AES_XCBC_MAC],
- "AES_XCBC_MAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1_HMAC], "SHA1_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA1], "SHA1");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224_HMAC], "SHA224_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA224], "SHA224");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256_HMAC], "SHA256_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA256], "SHA256");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384_HMAC], "SHA384_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA384], "SHA384");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512_HMAC], "SHA512_HMAC");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SHA512], "SHA512");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_SNOW3G_UIA2], "SNOW3G_UIA2");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_ZUC_EIA3], "ZUC_EIA3");
- strcpy(supported_auth_algo[RTE_CRYPTO_AUTH_KASUMI_F9], "KASUMI_F9");
-
- for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++)
- strcpy(supported_cipher_algo[i], "NOT_SUPPORTED");
-
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CBC], "AES_CBC");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_CTR], "AES_CTR");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_AES_GCM], "AES_GCM");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_NULL], "NULL");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_SNOW3G_UEA2], "SNOW3G_UEA2");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_ZUC_EEA3], "ZUC_EEA3");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_KASUMI_F8], "KASUMI_F8");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CTR], "3DES_CTR");
- strcpy(supported_cipher_algo[RTE_CRYPTO_CIPHER_3DES_CBC], "3DES_CBC");
-}
-
-
static int
l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
struct l2fwd_crypto_params *cparams)
@@ -432,7 +387,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ip_hdr;
- unsigned ipdata_offset, pad_len, data_len;
+ uint32_t ipdata_offset, data_len;
+ uint32_t pad_len = 0;
char *padding;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -455,16 +411,33 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
if (cparams->do_hash && cparams->hash_verify)
data_len -= cparams->digest_length;
- pad_len = data_len % cparams->block_size ? cparams->block_size -
- (data_len % cparams->block_size) : 0;
+ if (cparams->do_cipher) {
+ /*
+ * Following algorithms are block cipher algorithms,
+ * and might need padding
+ */
+ switch (cparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ if (data_len % cparams->block_size)
+ pad_len = cparams->block_size -
+ (data_len % cparams->block_size);
+ break;
+ default:
+ pad_len = 0;
+ }
- if (pad_len) {
- padding = rte_pktmbuf_append(m, pad_len);
- if (unlikely(!padding))
- return -1;
+ if (pad_len) {
+ padding = rte_pktmbuf_append(m, pad_len);
+ if (unlikely(!padding))
+ return -1;
- data_len += pad_len;
- memset(padding, 0, pad_len);
+ data_len += pad_len;
+ memset(padding, 0, pad_len);
+ }
}
/* Set crypto operation data parameters */
@@ -499,6 +472,10 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
op->sym->auth.aad.data = cparams->aad.data;
op->sym->auth.aad.phys_addr = cparams->aad.phys_addr;
op->sym->auth.aad.length = cparams->aad.length;
+ } else {
+ op->sym->auth.aad.data = NULL;
+ op->sym->auth.aad.phys_addr = 0;
+ op->sym->auth.aad.length = 0;
}
}
@@ -699,7 +676,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
generate_random_key(port_cparams[i].aad.data,
port_cparams[i].aad.length);
- }
+ } else
+ port_cparams[i].aad.length = 0;
if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
port_cparams[i].hash_verify = 1;
@@ -810,7 +788,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
ops_burst, nb_rx) !=
nb_rx) {
for (j = 0; j < nb_rx; j++)
- rte_pktmbuf_free(pkts_burst[i]);
+ rte_pktmbuf_free(pkts_burst[j]);
nb_rx = 0;
}
@@ -881,7 +859,8 @@ l2fwd_crypto_usage(const char *prgname)
" --aad_random_size SIZE: size of AAD when generated randomly\n"
" --digest_size SIZE: size of digest to be generated/verified\n"
- " --sessionless\n",
+ " --sessionless\n"
+ " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n",
prgname);
}
@@ -928,17 +907,14 @@ parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
static int
parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
{
- unsigned i;
- for (i = 0; i < RTE_CRYPTO_CIPHER_LIST_END; i++) {
- if (!strcmp(supported_cipher_algo[i], optarg)) {
- *algo = (enum rte_crypto_cipher_algorithm)i;
- return 0;
- }
+ if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Cipher algorithm specified "
+ "not supported!\n");
+ return -1;
}
- printf("Cipher algorithm not supported!\n");
- return -1;
+ return 0;
}
/** Parse crypto cipher operation command line argument */
@@ -1004,17 +980,13 @@ parse_size(int *size, const char *q_arg)
static int
parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
{
- unsigned i;
-
- for (i = 0; i < RTE_CRYPTO_AUTH_LIST_END; i++) {
- if (!strcmp(supported_auth_algo[i], optarg)) {
- *algo = (enum rte_crypto_auth_algorithm)i;
- return 0;
- }
+ if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Authentication algorithm specified "
+ "not supported!\n");
+ return -1;
}
- printf("Authentication algorithm specified not supported!\n");
- return -1;
+ return 0;
}
static int
@@ -1032,6 +1004,27 @@ parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
return -1;
}
+static int
+parse_cryptodev_mask(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ uint64_t pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(q_arg, &end, 16);
+ if ((pm == '\0') || (end == NULL) || (*end != '\0'))
+ pm = 0;
+
+ options->cryptodev_mask = pm;
+ if (options->cryptodev_mask == 0) {
+ printf("invalid cryptodev_mask specified\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/** Parse long options */
static int
l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
@@ -1132,6 +1125,9 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
return 0;
}
+ else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
+ return parse_cryptodev_mask(options, optarg);
+
return -1;
}
@@ -1246,6 +1242,7 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
options->type = CDEV_TYPE_ANY;
+ options->cryptodev_mask = UINT64_MAX;
}
static void
@@ -1253,7 +1250,7 @@ display_cipher_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Cipher information ---\n");
printf("Algorithm: %s\n",
- supported_cipher_algo[options->cipher_xform.cipher.algo]);
+ rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]);
rte_hexdump(stdout, "Cipher key:",
options->cipher_xform.cipher.key.data,
options->cipher_xform.cipher.key.length);
@@ -1265,7 +1262,7 @@ display_auth_info(struct l2fwd_crypto_options *options)
{
printf("\n---- Authentication information ---\n");
printf("Algorithm: %s\n",
- supported_auth_algo[options->auth_xform.auth.algo]);
+ rte_crypto_auth_algorithm_strings[options->auth_xform.cipher.algo]);
rte_hexdump(stdout, "Auth key:",
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
@@ -1368,6 +1365,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
{ "digest_size", required_argument, 0, 0 },
{ "sessionless", no_argument, 0, 0 },
+ { "cryptodev_mask", required_argument, 0, 0},
{ NULL, 0, 0, 0 }
};
@@ -1432,7 +1430,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
argv[optind-1] = prgname;
retval = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return retval;
}
@@ -1508,6 +1506,17 @@ check_type(struct l2fwd_crypto_options *options, struct rte_cryptodev_info *dev_
return -1;
}
+/* Check if the device is enabled by cryptodev_mask */
+static int
+check_cryptodev_mask(struct l2fwd_crypto_options *options,
+ uint8_t cdev_id)
+{
+ if (options->cryptodev_mask & (1 << cdev_id))
+ return 0;
+
+ return -1;
+}
+
static inline int
check_supported_size(uint16_t length, uint16_t min, uint16_t max,
uint16_t increment)
@@ -1562,6 +1571,9 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
}
};
+ if (check_cryptodev_mask(options, (uint8_t)cdev_id))
+ continue;
+
rte_cryptodev_info_get(cdev_id, &dev_info);
/* Set cipher parameters */
@@ -1587,7 +1599,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
- supported_cipher_algo[opt_cipher_algo],
+ rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
cdev_id,
options->string_type);
continue;
@@ -1687,13 +1699,12 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
printf("Algorithm %s not supported by cryptodev %u"
" or device not of preferred type (%s)\n",
- supported_auth_algo[opt_auth_algo],
+ rte_crypto_auth_algorithm_strings[opt_auth_algo],
cdev_id,
options->string_type);
continue;
}
- options->block_size = cap->sym.auth.block_size;
/*
* Check if length of provided AAD is supported
* by the algorithm chosen.
@@ -1967,9 +1978,6 @@ main(int argc, char **argv)
/* reserve memory for Cipher/Auth key and IV */
reserve_key_memory(&options);
- /* fill out the supported algorithm tables */
- fill_supported_algorithm_tables();
-
/* parse application arguments (after the EAL ones) */
ret = l2fwd_crypto_parse_args(&options, argc, argv);
if (ret < 0)
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index dd9201b2..e6e6c228 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -126,7 +126,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -709,7 +709,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 60cccdb1..37453483 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -44,6 +44,7 @@
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
+#include <signal.h>
#include <rte_common.h>
#include <rte_log.h>
@@ -116,7 +117,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -142,6 +143,15 @@ static int64_t check_period = 5; /* default check cycle is 5ms */
/* Keepalive structure */
struct rte_keepalive *rte_global_keepalive_info;
+/* Termination signalling */
+static int terminate_signal_received;
+
+/* Termination signal handler */
+static void handle_sigterm(__rte_unused int value)
+{
+ terminate_signal_received = 1;
+}
+
/* Print out statistics on packets dropped */
static void
print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
@@ -251,7 +261,7 @@ l2fwd_main_loop(void)
uint64_t tsc_initial = rte_rdtsc();
uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
- while (1) {
+ while (!terminate_signal_received) {
/* Keepalive heartbeat */
rte_keepalive_mark_alive(rte_global_keepalive_info);
@@ -464,7 +474,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -526,6 +536,8 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
static void
dead_core(__rte_unused void *ptr_data, const int id_core)
{
+ if (terminate_signal_received)
+ return;
printf("Dead core %i - restarting..\n", id_core);
if (rte_eal_get_lcore_state(id_core) == FINISHED) {
rte_eal_wait_lcore(id_core);
@@ -554,6 +566,16 @@ main(int argc, char **argv)
uint8_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
+ struct sigaction signal_handler;
+ struct rte_keepalive_shm *ka_shm;
+
+ memset(&signal_handler, 0, sizeof(signal_handler));
+ terminate_signal_received = 0;
+ signal_handler.sa_handler = &handle_sigterm;
+ if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
+ sigaction(SIGTERM, &signal_handler, NULL) == -1)
+ rte_exit(EXIT_FAILURE, "SIGNAL\n");
+
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -730,9 +752,8 @@ main(int argc, char **argv)
rte_timer_subsystem_init();
rte_timer_init(&stats_timer);
+ ka_shm = NULL;
if (check_period > 0) {
- struct rte_keepalive_shm *ka_shm;
-
ka_shm = rte_keepalive_shm_create();
if (ka_shm == NULL)
rte_exit(EXIT_FAILURE,
@@ -782,7 +803,7 @@ main(int argc, char **argv)
lcore_id);
}
}
- for (;;) {
+ while (!terminate_signal_received) {
rte_timer_manage();
rte_delay_ms(5);
}
@@ -792,5 +813,7 @@ main(int argc, char **argv)
return -1;
}
+ if (ka_shm != NULL)
+ rte_keepalive_shm_cleanup(ka_shm);
return 0;
}
diff --git a/examples/l2fwd-keepalive/shm.c b/examples/l2fwd-keepalive/shm.c
index 177aa5b8..fbf5bd79 100644
--- a/examples/l2fwd-keepalive/shm.c
+++ b/examples/l2fwd-keepalive/shm.c
@@ -129,3 +129,13 @@ void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
strerror(errno));
}
}
+
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm)
+{
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ if (ka_shm && munmap(ka_shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+}
diff --git a/examples/l2fwd-keepalive/shm.h b/examples/l2fwd-keepalive/shm.h
index 25e1b61d..66a60600 100644
--- a/examples/l2fwd-keepalive/shm.h
+++ b/examples/l2fwd-keepalive/shm.h
@@ -87,3 +87,12 @@ struct rte_keepalive_shm *rte_keepalive_shm_create(void);
void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
const int id_core, const enum rte_keepalive_state core_state,
uint64_t last_alive);
+
+/** Shutdown cleanup of shared host memory keepalive object.
+ * @param *shm
+ * Pointer to SHM keepalive structure. May be NULL.
+ *
+ * If *shm is NULL, this function will only attempt to remove the
+ * shared host memory handle and not unmap the underlying memory.
+ */
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm);
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index b2f58519..f9667272 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -120,7 +120,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -392,6 +392,29 @@ l2fwd_parse_timer_period(const char *q_arg)
return n;
}
+static const char short_options[] =
+ "p:" /* portmask */
+ "q:" /* number of queues */
+ "T:" /* timer period */
+ ;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ CMD_LINE_OPT_MIN_NUM = 256,
+};
+
+static const struct option lgopts[] = {
+ { CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+ { CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ {NULL, 0, 0, 0}
+};
+
/* Parse the argument given in the command line of the application */
static int
l2fwd_parse_args(int argc, char **argv)
@@ -400,15 +423,10 @@ l2fwd_parse_args(int argc, char **argv)
char **argvopt;
int option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- { "mac-updating", no_argument, &mac_updating, 1},
- { "no-mac-updating", no_argument, &mac_updating, 0},
- {NULL, 0, 0, 0}
- };
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ while ((opt = getopt_long(argc, argvopt, short_options,
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -457,7 +475,7 @@ l2fwd_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 3cfbb40e..ea0b5b1e 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -163,7 +163,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -1776,7 +1776,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index b65d683c..9d57fdef 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -147,7 +147,7 @@
/*
* Configurable number of RX/TX ring descriptors
*/
-#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_RX_DESC_DEFAULT 512
#define RTE_TEST_TX_DESC_DEFAULT 512
static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
@@ -164,6 +164,8 @@ static uint32_t enabled_port_mask = 0;
static int promiscuous_on = 0;
/* NUMA is enabled by default. */
static int numa_on = 1;
+static int parse_ptype; /**< Parse packet type using rx callback, and */
+ /**< disabled by default */
enum freq_scale_hint_t
{
@@ -221,7 +223,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -377,6 +379,7 @@ static void
signal_exit_now(int sigtype)
{
unsigned lcore_id;
+ unsigned int portid, nb_ports;
int ret;
if (sigtype == SIGINT) {
@@ -391,6 +394,15 @@ signal_exit_now(int sigtype)
"library de-initialization failed on "
"core%u\n", lcore_id);
}
+
+ nb_ports = rte_eth_dev_count();
+ for (portid = 0; portid < nb_ports; portid++) {
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ }
}
rte_exit(EXIT_SUCCESS, "User forced exit\n");
@@ -607,6 +619,48 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
#endif
static inline void
+parse_ptype_one(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_param __rte_unused)
+{
+ unsigned int i;
+
+ for (i = 0; i < nb_pkts; ++i)
+ parse_ptype_one(pkts[i]);
+
+ return nb_pkts;
+}
+
+static int
+add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
+{
+ printf("Port %d: softly parse packet type info\n", portid);
+ if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
+ return 0;
+
+ printf("Failed to add rx callback: port=%d\n", portid);
+ return -1;
+}
+
+static inline void
l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
struct lcore_conf *qconf)
{
@@ -1108,7 +1162,8 @@ print_usage(const char *prgname)
" --config (port,queue,lcore): rx queues configuration\n"
" --no-numa: optional, disable numa awareness\n"
" --enable-jumbo: enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n",
+ " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --parse-ptype: parse packet type by software\n",
prgname);
}
@@ -1202,6 +1257,8 @@ parse_config(const char *q_arg)
return 0;
}
+#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+
/* Parse the argument given in the command line of the application */
static int
parse_args(int argc, char **argv)
@@ -1214,6 +1271,7 @@ parse_args(int argc, char **argv)
{"config", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
@@ -1284,6 +1342,13 @@ parse_args(int argc, char **argv)
(unsigned int)port_conf.rxmode.max_rx_pkt_len);
}
+ if (!strncmp(lgopts[option_index].name,
+ CMD_LINE_OPT_PARSE_PTYPE,
+ sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
+ printf("soft parse-ptype is enabled\n");
+ parse_ptype = 1;
+ }
+
break;
default:
@@ -1296,7 +1361,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -1531,6 +1596,50 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
+static int check_ptype(uint8_t portid)
+{
+ int i, ret;
+ int ptype_l3_ipv4 = 0;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ int ptype_l3_ipv6 = 0;
+#endif
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ptype_l3_ipv4 = 1;
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ptype_l3_ipv6 = 1;
+#endif
+ }
+
+ if (ptype_l3_ipv4 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+ if (ptype_l3_ipv6 == 0)
+ printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+ if (ptype_l3_ipv4)
+#else /* APP_LOOKUP_EXACT_MATCH */
+ if (ptype_l3_ipv4 && ptype_l3_ipv6)
+#endif
+ return 1;
+
+ return 0;
+
+}
+
int
main(int argc, char **argv)
{
@@ -1545,6 +1654,7 @@ main(int argc, char **argv)
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
uint8_t portid, nb_rx_queue, queue, socketid;
+ uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
signal(SIGINT, signal_exit_now);
@@ -1605,8 +1715,13 @@ main(int argc, char **argv)
n_tx_queue = dev_txq_num;
printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
nb_rx_queue, (unsigned)n_tx_queue );
+ /* If number of Rx queue is 0, no need to enable Rx interrupt */
+ if (nb_rx_queue == 0)
+ port_conf.intr_conf.rxq = 0;
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &port_conf);
+ /* Revert to original value */
+ port_conf.intr_conf.rxq = org_rxq_intr;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: "
"err=%d, port=%d\n", ret, portid);
@@ -1716,6 +1831,14 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup: err=%d, "
"port=%d\n", ret, portid);
+
+ if (parse_ptype) {
+ if (add_cb_parse_ptype(portid, queueid) < 0)
+ rte_exit(EXIT_FAILURE,
+ "Fail to add ptype cb\n");
+ } else if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "PMD can not provide needed ptypes\n");
}
}
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index f56e8db9..797f722a 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -197,7 +197,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -816,7 +816,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index a43c5070..258a82fe 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -49,7 +49,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
static inline uint8_t
lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
{
- uint8_t next_hop;
+ uint32_t next_hop;
struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
(struct rte_lpm6 *)lookup_struct;
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 538fe3d7..aa06b6d3 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -40,8 +40,7 @@ static inline __attribute__((always_inline)) uint16_t
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ipv4_hdr *ipv4_hdr;
struct ether_hdr *eth_hdr;
@@ -51,9 +50,11 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
- return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop_ipv4) == 0) ?
- next_hop_ipv4 : portid);
+ return (uint16_t) (
+ (rte_lpm_lookup(qconf->ipv4_lookup_struct,
+ rte_be_to_cpu_32(ipv4_hdr->dst_addr),
+ &next_hop) == 0) ?
+ next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -61,8 +62,8 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0)
- ? next_hop_ipv6 : portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0)
+ ? next_hop : portid);
}
@@ -78,14 +79,13 @@ static inline __attribute__((always_inline)) uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
uint32_t dst_ipv4, uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct, dst_ipv4,
- &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+ &next_hop) == 0) ? next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -93,8 +93,8 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0)
- ? next_hop_ipv6 : portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0)
+ ? next_hop : portid);
}
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 7223e773..fd6605bf 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -156,7 +156,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -474,6 +474,13 @@ parse_eth_dest(const char *optarg)
#define MAX_JUMBO_PKT_LEN 9600
#define MEMPOOL_CACHE_SIZE 256
+static const char short_options[] =
+ "p:" /* portmask */
+ "P" /* promiscuous */
+ "L" /* enable long prefix match */
+ "E" /* enable exact match */
+ ;
+
#define CMD_LINE_OPT_CONFIG "config"
#define CMD_LINE_OPT_ETH_DEST "eth-dest"
#define CMD_LINE_OPT_NO_NUMA "no-numa"
@@ -481,6 +488,31 @@ parse_eth_dest(const char *optarg)
#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_CONFIG_NUM,
+ CMD_LINE_OPT_ETH_DEST_NUM,
+ CMD_LINE_OPT_NO_NUMA_NUM,
+ CMD_LINE_OPT_IPV6_NUM,
+ CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+ CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
+ CMD_LINE_OPT_PARSE_PTYPE_NUM,
+};
+
+static const struct option lgopts[] = {
+ {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
+ {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
+ {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
+ {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
+ {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+ {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
+ {NULL, 0, 0, 0}
+};
/*
* This expression is used to calculate the number of mbufs needed
@@ -504,16 +536,6 @@ parse_args(int argc, char **argv)
char **argvopt;
int option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- {CMD_LINE_OPT_CONFIG, 1, 0, 0},
- {CMD_LINE_OPT_ETH_DEST, 1, 0, 0},
- {CMD_LINE_OPT_NO_NUMA, 0, 0, 0},
- {CMD_LINE_OPT_IPV6, 0, 0, 0},
- {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
- {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
- {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
- {NULL, 0, 0, 0}
- };
argvopt = argv;
@@ -534,7 +556,7 @@ parse_args(int argc, char **argv)
"L3FWD: LPM and EM are mutually exclusive, select only one";
const char *str13 = "L3FWD: LPM or EM none selected, default LPM on";
- while ((opt = getopt_long(argc, argvopt, "p:PLE",
+ while ((opt = getopt_long(argc, argvopt, short_options,
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -547,6 +569,7 @@ parse_args(int argc, char **argv)
return -1;
}
break;
+
case 'P':
printf("%s\n", str2);
promiscuous_on = 1;
@@ -563,89 +586,71 @@ parse_args(int argc, char **argv)
break;
/* long options */
- case 0:
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_CONFIG,
- sizeof(CMD_LINE_OPT_CONFIG))) {
-
- ret = parse_config(optarg);
- if (ret) {
- printf("%s\n", str5);
- print_usage(prgname);
- return -1;
- }
- }
-
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_ETH_DEST,
- sizeof(CMD_LINE_OPT_ETH_DEST))) {
- parse_eth_dest(optarg);
- }
-
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_NO_NUMA,
- sizeof(CMD_LINE_OPT_NO_NUMA))) {
- printf("%s\n", str6);
- numa_on = 0;
+ case CMD_LINE_OPT_CONFIG_NUM:
+ ret = parse_config(optarg);
+ if (ret) {
+ printf("%s\n", str5);
+ print_usage(prgname);
+ return -1;
}
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_IPV6,
- sizeof(CMD_LINE_OPT_IPV6))) {
- printf("%sn", str7);
- ipv6 = 1;
- }
+ case CMD_LINE_OPT_ETH_DEST_NUM:
+ parse_eth_dest(optarg);
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_ENABLE_JUMBO,
- sizeof(CMD_LINE_OPT_ENABLE_JUMBO))) {
- struct option lenopts = {
- "max-pkt-len", required_argument, 0, 0
- };
-
- printf("%s\n", str8);
- port_conf.rxmode.jumbo_frame = 1;
-
- /*
- * if no max-pkt-len set, use the default
- * value ETHER_MAX_LEN.
- */
- if (0 == getopt_long(argc, argvopt, "",
- &lenopts, &option_index)) {
- ret = parse_max_pkt_len(optarg);
- if ((ret < 64) ||
- (ret > MAX_JUMBO_PKT_LEN)) {
- printf("%s\n", str9);
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
- printf("%s %u\n", str10,
- (unsigned int)port_conf.rxmode.max_rx_pkt_len);
- }
+ case CMD_LINE_OPT_NO_NUMA_NUM:
+ printf("%s\n", str6);
+ numa_on = 0;
+ break;
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_HASH_ENTRY_NUM,
- sizeof(CMD_LINE_OPT_HASH_ENTRY_NUM))) {
+ case CMD_LINE_OPT_IPV6_NUM:
+ printf("%sn", str7);
+ ipv6 = 1;
+ break;
- ret = parse_hash_entry_number(optarg);
- if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
- hash_entry_number = ret;
- } else {
- printf("%s\n", str11);
+ case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
+ struct option lenopts = {
+ "max-pkt-len", required_argument, 0, 0
+ };
+
+ printf("%s\n", str8);
+ port_conf.rxmode.jumbo_frame = 1;
+
+ /*
+ * if no max-pkt-len set, use the default
+ * value ETHER_MAX_LEN.
+ */
+ if (getopt_long(argc, argvopt, "",
+ &lenopts, &option_index) == 0) {
+ ret = parse_max_pkt_len(optarg);
+ if ((ret < 64) ||
+ (ret > MAX_JUMBO_PKT_LEN)) {
+ printf("%s\n", str9);
print_usage(prgname);
return -1;
}
+ port_conf.rxmode.max_rx_pkt_len = ret;
}
+ printf("%s %u\n", str10,
+ (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+ break;
+ }
- if (!strncmp(lgopts[option_index].name,
- CMD_LINE_OPT_PARSE_PTYPE,
- sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
- printf("soft parse-ptype is enabled\n");
- parse_ptype = 1;
+ case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
+ ret = parse_hash_entry_number(optarg);
+ if ((ret > 0) && (ret <= L3FWD_HASH_ENTRIES)) {
+ hash_entry_number = ret;
+ } else {
+ printf("%s\n", str11);
+ print_usage(prgname);
+ return -1;
}
+ break;
+ case CMD_LINE_OPT_PARSE_PTYPE_NUM:
+ printf("soft parse-ptype is enabled\n");
+ parse_ptype = 1;
break;
default:
@@ -683,7 +688,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index 14a038b7..25da28eb 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -116,7 +116,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -451,7 +451,7 @@ lsi_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index 157fd528..07f92a1a 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -758,7 +758,7 @@ app_parse_args(int argc, char **argv)
argv[optind - 1] = prgname;
ret = optind - 1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index e07850be..abd05a31 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -81,7 +81,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index 6944325d..7f918aa4 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -144,9 +144,10 @@ app_lcore_io_rx_buffer_to_send (
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- bsz);
+ bsz,
+ NULL);
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz; k ++) {
struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
@@ -310,9 +311,10 @@ app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
ret = rte_ring_sp_enqueue_bulk(
lp->rx.rings[worker],
(void **) lp->rx.mbuf_out[worker].array,
- lp->rx.mbuf_out[worker].n_mbufs);
+ lp->rx.mbuf_out[worker].n_mbufs,
+ NULL);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
@@ -347,11 +349,11 @@ app_lcore_io_tx(
ret = rte_ring_sc_dequeue_bulk(
ring,
(void **) &lp->tx.mbuf_out[port].array[n_mbufs],
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
n_mbufs += bsz_rd;
@@ -418,10 +420,12 @@ static inline void
app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
{
uint8_t port;
+ uint32_t i;
- for (port = 0; port < lp->tx.n_nic_ports; port ++) {
+ for (i = 0; i < lp->tx.n_nic_ports; i++) {
uint32_t n_pkts;
+ port = lp->tx.nic_ports[i];
if (likely((lp->tx.mbuf_out_flush[port] == 0) ||
(lp->tx.mbuf_out[port].n_mbufs == 0))) {
lp->tx.mbuf_out_flush[port] = 1;
@@ -503,11 +507,11 @@ app_lcore_worker(
ret = rte_ring_sc_dequeue_bulk(
ring_in,
(void **) lp->mbuf_in.array,
- bsz_rd);
+ bsz_rd,
+ NULL);
- if (unlikely(ret == -ENOENT)) {
+ if (unlikely(ret == 0))
continue;
- }
#if APP_WORKER_DROP_ALL_PACKETS
for (j = 0; j < bsz_rd; j ++) {
@@ -555,11 +559,12 @@ app_lcore_worker(
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- bsz_wr);
+ bsz_wr,
+ NULL);
#if APP_STATS
lp->rings_out_iters[port] ++;
- if (ret == 0) {
+ if (ret > 0) {
lp->rings_out_count[port] += 1;
}
if (lp->rings_out_iters[port] == APP_STATS){
@@ -572,7 +577,7 @@ app_lcore_worker(
}
#endif
- if (unlikely(ret == -ENOBUFS)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < bsz_wr; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
@@ -607,9 +612,10 @@ app_lcore_worker_flush(struct app_lcore_params_worker *lp)
ret = rte_ring_sp_enqueue_bulk(
lp->rings_out[port],
(void **) lp->mbuf_out[port].array,
- lp->mbuf_out[port].n_mbufs);
+ lp->mbuf_out[port].n_mbufs,
+ NULL);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret == 0)) {
uint32_t k;
for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index d4f9ca37..01b535c2 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -276,14 +276,11 @@ main(int argc, char *argv[])
printf("[Press Ctrl-C to quit ...]\n");
for (;;) {
- uint16_t i, rx_pkts = PKT_READ_SIZE;
+ uint16_t i, rx_pkts;
uint8_t port;
- /* try dequeuing max possible packets first, if that fails, get the
- * most we can. Loop body should only execute once, maximum */
- while (rx_pkts > 0 &&
- unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
- rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+ rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
+ PKT_READ_SIZE, NULL);
if (unlikely(rx_pkts == 0)){
if (need_flush)
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index a6dc12d5..c2b0261d 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -227,7 +227,7 @@ flush_rx_queue(uint16_t client)
cl = &clients[client];
if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
- cl_rx_buf[client].count) != 0){
+ cl_rx_buf[client].count, NULL) == 0){
for (j = 0; j < cl_rx_buf[client].count; j++)
rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
cl->stats.rx_drop += cl_rx_buf[client].count;
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index 2d951d93..d922522f 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -77,8 +77,7 @@
#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
#define MBUF_NAME "mbuf_pool_%d"
-#define MBUF_SIZE \
-(RTE_MBUF_DEFAULT_DATAROOM + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define NB_MBUF 8192
#define RING_MASTER_NAME "l2fwd_ring_m2s_"
#define RING_SLAVE_NAME "l2fwd_ring_s2m_"
@@ -163,7 +162,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -672,6 +671,8 @@ l2fwd_main_loop(void)
port_statistics[portid].tx += sent;
}
+
+ prev_tsc = cur_tsc;
}
/*
@@ -865,7 +866,7 @@ l2fwd_parse_args(int argc, char **argv)
return -1;
}
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -989,14 +990,10 @@ main(int argc, char **argv)
flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
l2fwd_pktmbuf_pool[portid] =
- rte_mempool_create(buf_name, NB_MBUF,
- MBUF_SIZE, 32,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), flags);
+ rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
+ 0, MBUF_DATA_SIZE, rte_socket_id());
if (l2fwd_pktmbuf_pool[portid] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
printf("Create mbuf %s\n", buf_name);
}
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index d30ff4a4..0990d965 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -193,7 +193,7 @@ smp_parse_args(int argc, char **argv)
ports[num_ports++] = (uint8_t)i;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -213,7 +213,7 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/examples/netmap_compat/bridge/Makefile b/examples/netmap_compat/bridge/Makefile
index 50d96e81..1d4ddfff 100644
--- a/examples/netmap_compat/bridge/Makefile
+++ b/examples/netmap_compat/bridge/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/netmap_compat/bridge/bridge.c b/examples/netmap_compat/bridge/bridge.c
index 53f5fdb6..2f2b6baa 100644
--- a/examples/netmap_compat/bridge/bridge.c
+++ b/examples/netmap_compat/bridge/bridge.c
@@ -59,7 +59,7 @@ struct rte_eth_conf eth_conf = {
.hw_ip_checksum = 0,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
- .hw_strip_crc = 0,
+ .hw_strip_crc = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 3c88b86e..49ae35b8 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -216,7 +216,7 @@ parse_args(int argc, char **argv)
}
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
@@ -229,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
/* free the mbufs which failed from transmit */
app_stats.tx.ro_tx_failed_pkts += count;
- RTE_LOG(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+ RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
pktmbuf_free_bulk(unsent, count);
}
@@ -410,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
pkts, MAX_PKTS_BURST);
if (nb_rx_pkts == 0) {
- RTE_LOG(DEBUG, REORDERAPP,
+ RTE_LOG_DP(DEBUG, REORDERAPP,
"%s():Received zero packets\n", __func__);
continue;
}
@@ -421,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
pkts[i++]->seqn = seqn++;
/* enqueue to rx_to_workers ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
- nb_rx_pkts);
+ ret = rte_ring_enqueue_burst(ring_out,
+ (void *)pkts, nb_rx_pkts, NULL);
app_stats.rx.enqueue_pkts += ret;
if (unlikely(ret < nb_rx_pkts)) {
app_stats.rx.enqueue_failed_pkts +=
@@ -462,7 +462,7 @@ worker_thread(void *args_ptr)
/* dequeue the mbufs from rx_to_workers ring */
burst_size = rte_ring_dequeue_burst(ring_in,
- (void *)burst_buffer, MAX_PKTS_BURST);
+ (void *)burst_buffer, MAX_PKTS_BURST, NULL);
if (unlikely(burst_size == 0))
continue;
@@ -473,7 +473,8 @@ worker_thread(void *args_ptr)
burst_buffer[i++]->port ^= xor_val;
/* enqueue the modified mbufs to workers_to_tx ring */
- ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+ ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+ burst_size, NULL);
__sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
if (unlikely(ret < burst_size)) {
/* Return the mbufs to their respective pool, dropping packets */
@@ -509,7 +510,7 @@ send_thread(struct send_thread_args *args)
/* deque the mbufs from workers_to_tx ring */
nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(nb_dq_mbufs == 0))
continue;
@@ -522,7 +523,7 @@ send_thread(struct send_thread_args *args)
if (ret == -1 && rte_errno == ERANGE) {
/* Too early pkts should be transmitted out directly */
- RTE_LOG(DEBUG, REORDERAPP,
+ RTE_LOG_DP(DEBUG, REORDERAPP,
"%s():Cannot reorder early packet "
"direct enqueuing to TX\n", __func__);
outp = mbufs[i]->port;
@@ -594,7 +595,7 @@ tx_thread(struct rte_ring *ring_in)
/* deque the mbufs from workers_to_tx ring */
dqnum = rte_ring_dequeue_burst(ring_in,
- (void *)mbufs, MAX_PKTS_BURST);
+ (void *)mbufs, MAX_PKTS_BURST, NULL);
if (unlikely(dqnum == 0))
continue;
diff --git a/examples/performance-thread/common/arch/x86/ctx.h b/examples/performance-thread/common/arch/x86/ctx.h
index 03860508..a41ce05a 100644
--- a/examples/performance-thread/common/arch/x86/ctx.h
+++ b/examples/performance-thread/common/arch/x86/ctx.h
@@ -35,6 +35,10 @@
#ifndef CTX_H
#define CTX_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
* CPU context registers
*/
@@ -54,4 +58,8 @@ void
ctx_switch(struct ctx *new_ctx, struct ctx *curr_ctx);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* RTE_CTX_H_ */
diff --git a/examples/performance-thread/common/common.mk b/examples/performance-thread/common/common.mk
index d3de5fc6..f6cab771 100644
--- a/examples/performance-thread/common/common.mk
+++ b/examples/performance-thread/common/common.mk
@@ -30,13 +30,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# list the C files belonhing to the lthread subsystem, these are common to all lthread apps
-SRCS-y += ../common/lthread.c \
- ../common/lthread_sched.c \
- ../common/lthread_cond.c \
- ../common/lthread_tls.c \
- ../common/lthread_mutex.c \
- ../common/lthread_diag.c \
- ../common/arch/x86/ctx.c
+# list the C files belonging to the lthread subsystem, these are common to all
+# lthread apps. Any makefile including this should set VPATH to include this
+# directory path
+#
+
+MKFILE_PATH=$(abspath $(dir $(lastword $(MAKEFILE_LIST))))
+
+VPATH := $(MKFILE_PATH) $(MKFILE_PATH)/arch/x86
+
+SRCS-y += lthread.c lthread_sched.c lthread_cond.c lthread_tls.c lthread_mutex.c lthread_diag.c ctx.c
-INCLUDES += -I$(RTE_SDK)/examples/performance-thread/common/ -I$(RTE_SDK)/examples/performance-thread/common/arch/x86/
+INCLUDES += -I$(MKFILE_PATH) -I$(MKFILE_PATH)/arch/x86/
diff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h
index 8c77af82..5c2c1a5f 100644
--- a/examples/performance-thread/common/lthread.h
+++ b/examples/performance-thread/common/lthread.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_H_
#define LTHREAD_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <rte_per_lcore.h>
#include "lthread_api.h"
@@ -96,4 +100,8 @@ _lthread_init(struct lthread *lt,
void _lthread_set_stack(struct lthread *lt, void *stack, size_t stack_size);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_H_ */
diff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h
index ec976103..ff245a08 100644
--- a/examples/performance-thread/common/lthread_api.h
+++ b/examples/performance-thread/common/lthread_api.h
@@ -124,6 +124,10 @@
#ifndef LTHREAD_H
#define LTHREAD_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <sys/socket.h>
#include <fcntl.h>
@@ -829,4 +833,8 @@ int lthread_cond_signal(struct lthread_cond *c);
*/
int lthread_cond_broadcast(struct lthread_cond *c);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_H */
diff --git a/examples/performance-thread/common/lthread_cond.h b/examples/performance-thread/common/lthread_cond.h
index 5bd02a7d..5e5f14be 100644
--- a/examples/performance-thread/common/lthread_cond.h
+++ b/examples/performance-thread/common/lthread_cond.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_COND_H_
#define LTHREAD_COND_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_queue.h"
#define MAX_COND_NAME_SIZE 64
@@ -74,4 +78,8 @@ struct lthread_cond {
uint64_t diag_ref; /* optional ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_COND_H_ */
diff --git a/examples/performance-thread/common/lthread_diag.h b/examples/performance-thread/common/lthread_diag.h
index 2877d311..3dce8e0e 100644
--- a/examples/performance-thread/common/lthread_diag.h
+++ b/examples/performance-thread/common/lthread_diag.h
@@ -34,6 +34,10 @@
#ifndef LTHREAD_DIAG_H_
#define LTHREAD_DIAG_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <inttypes.h>
@@ -129,4 +133,9 @@ extern uint64_t diag_mask;
#define DIAG_USED __rte_unused
#endif /* LTHREAD_DIAG */
+
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_DIAG_H_ */
diff --git a/examples/performance-thread/common/lthread_diag_api.h b/examples/performance-thread/common/lthread_diag_api.h
index 7ee514f8..2fda0951 100644
--- a/examples/performance-thread/common/lthread_diag_api.h
+++ b/examples/performance-thread/common/lthread_diag_api.h
@@ -33,6 +33,10 @@
#ifndef LTHREAD_DIAG_API_H_
#define LTHREAD_DIAG_API_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <inttypes.h>
@@ -322,4 +326,8 @@ lthread_cond_diag_ref(struct lthread_cond *c);
uint64_t
lthread_mutex_diag_ref(struct lthread_mutex *m);
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_DIAG_API_H_ */
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index 031d8afc..3f7fb92d 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -62,6 +62,10 @@
#include <lthread_api.h>
#define LTHREAD_INT_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stdint.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -197,4 +201,8 @@ struct lthread {
uint64_t diag_ref; /* ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_INT_H */
diff --git a/examples/performance-thread/common/lthread_mutex.h b/examples/performance-thread/common/lthread_mutex.h
index 4d30b2e7..e78db91d 100644
--- a/examples/performance-thread/common/lthread_mutex.h
+++ b/examples/performance-thread/common/lthread_mutex.h
@@ -35,6 +35,10 @@
#ifndef LTHREAD_MUTEX_H_
#define LTHREAD_MUTEX_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_queue.h"
@@ -49,4 +53,8 @@ struct lthread_mutex {
uint64_t diag_ref; /* optional ref to user diag data */
} __rte_cache_aligned;
+#ifdef __cplusplus
+}
+#endif
+
#endif /* LTHREAD_MUTEX_H_ */
diff --git a/examples/performance-thread/common/lthread_objcache.h b/examples/performance-thread/common/lthread_objcache.h
index d7e35825..6e5195ba 100644
--- a/examples/performance-thread/common/lthread_objcache.h
+++ b/examples/performance-thread/common/lthread_objcache.h
@@ -33,6 +33,10 @@
#ifndef LTHREAD_OBJCACHE_H_
#define LTHREAD_OBJCACHE_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <string.h>
#include <rte_per_lcore.h>
@@ -154,5 +158,8 @@ _lthread_objcache_free(struct lthread_objcache *c, void *obj)
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_OBJCACHE_H_ */
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index 27680eab..fb0c578b 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -69,6 +69,10 @@
#ifndef LTHREAD_POOL_H_
#define LTHREAD_POOL_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <rte_malloc.h>
#include <rte_per_lcore.h>
#include <rte_log.h>
@@ -328,5 +332,8 @@ _qnode_pool_destroy(struct qnode_pool *p)
return 0;
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_POOL_H_ */
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 2c55fcec..4fc2074e 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -69,6 +69,10 @@
#ifndef LTHREAD_QUEUE_H_
#define LTHREAD_QUEUE_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <string.h>
#include <rte_prefetch.h>
@@ -298,5 +302,8 @@ _lthread_queue_remove(struct lthread_queue *q)
return NULL;
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_QUEUE_H_ */
diff --git a/examples/performance-thread/common/lthread_sched.h b/examples/performance-thread/common/lthread_sched.h
index 4ce56c27..7cddda9c 100644
--- a/examples/performance-thread/common/lthread_sched.h
+++ b/examples/performance-thread/common/lthread_sched.h
@@ -62,6 +62,10 @@
#ifndef LTHREAD_SCHED_H_
#define LTHREAD_SCHED_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_int.h"
#include "lthread_queue.h"
#include "lthread_objcache.h"
@@ -148,5 +152,8 @@ extern struct lthread_sched *schedcore[];
void _sched_timer_cb(struct rte_timer *tim, void *arg);
void _sched_shutdown(__rte_unused void *arg);
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_SCHED_H_ */
diff --git a/examples/performance-thread/common/lthread_timer.h b/examples/performance-thread/common/lthread_timer.h
index b5e6fb0e..7c03d673 100644
--- a/examples/performance-thread/common/lthread_timer.h
+++ b/examples/performance-thread/common/lthread_timer.h
@@ -35,6 +35,10 @@
#ifndef LTHREAD_TIMER_H_
#define LTHREAD_TIMER_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_int.h"
#include "lthread_sched.h"
@@ -42,11 +46,22 @@
static inline uint64_t
_ns_to_clks(uint64_t ns)
{
- unsigned __int128 clkns = rte_get_tsc_hz();
+ /*
+ * clkns needs to be divided by 1E9 to get ns clocks. However,
+ * dividing by this first would lose a lot of accuracy.
+ * Dividing after a multiply by ns, could cause overflow of
+ * uint64_t if ns is about 5 seconds [if we assume a max tsc
+ * rate of 4GHz]. Therefore we first divide by 1E4, then
+ * multiply and finally divide by 1E5. This allows ns to be
+ * values many hours long, without overflow, while still keeping
+ * reasonable accuracy.
+ */
+ uint64_t clkns = rte_get_tsc_hz() / 1e4;
clkns *= ns;
- clkns /= 1000000000;
- return (uint64_t) clkns;
+ clkns /= 1e5;
+
+ return clkns;
}
@@ -75,5 +90,8 @@ _timer_stop(struct lthread *lt)
}
}
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_TIMER_H_ */
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 6876f831..47505f2d 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -42,7 +42,6 @@
#include <fcntl.h>
#include <sys/time.h>
#include <sys/mman.h>
-#include <execinfo.h>
#include <sched.h>
#include <rte_malloc.h>
diff --git a/examples/performance-thread/common/lthread_tls.h b/examples/performance-thread/common/lthread_tls.h
index 86cbfadc..fff3c0db 100644
--- a/examples/performance-thread/common/lthread_tls.h
+++ b/examples/performance-thread/common/lthread_tls.h
@@ -34,6 +34,10 @@
#ifndef LTHREAD_TLS_H_
#define LTHREAD_TLS_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include "lthread_api.h"
#define RTE_PER_LTHREAD_SECTION_SIZE \
@@ -53,5 +57,8 @@ void _lthread_tls_destroy(struct lthread *lt);
void _lthread_key_pool_init(void);
void _lthread_tls_alloc(struct lthread *lt);
+#ifdef __cplusplus
+}
+#endif
#endif /* LTHREAD_TLS_H_ */
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index fdc90b28..2d98473e 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -90,6 +90,68 @@
#define APP_LOOKUP_METHOD APP_LOOKUP_LPM
#endif
+#ifndef __GLIBC__ /* sched_getcpu() is glibc specific */
+#define sched_getcpu() rte_lcore_id()
+#endif
+
+static int
+check_ptype(int portid)
+{
+ int i, ret;
+ int ipv4 = 0, ipv6 = 0;
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, RTE_PTYPE_L3_MASK, NULL,
+ 0);
+ if (ret <= 0)
+ return 0;
+
+ uint32_t ptypes[ret];
+
+ ret = rte_eth_dev_get_supported_ptypes(portid, RTE_PTYPE_L3_MASK,
+ ptypes, ret);
+ for (i = 0; i < ret; ++i) {
+ if (ptypes[i] & RTE_PTYPE_L3_IPV4)
+ ipv4 = 1;
+ if (ptypes[i] & RTE_PTYPE_L3_IPV6)
+ ipv6 = 1;
+ }
+
+ if (ipv4 && ipv6)
+ return 1;
+
+ return 0;
+}
+
+static inline void
+parse_ptype(struct rte_mbuf *m)
+{
+ struct ether_hdr *eth_hdr;
+ uint32_t packet_type = RTE_PTYPE_UNKNOWN;
+ uint16_t ether_type;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ ether_type = eth_hdr->ether_type;
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ m->packet_type = packet_type;
+}
+
+static uint16_t
+cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkts[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, __rte_unused void *user_param)
+{
+ unsigned int i;
+
+ for (i = 0; i < nb_pkts; i++)
+ parse_ptype(pkts[i]);
+
+ return nb_pkts;
+}
+
/*
* When set to zero, simple forwaring path is eanbled.
* When set to one, optimized forwarding path is enabled.
@@ -170,8 +232,9 @@ static __m128i val_eth[RTE_MAX_ETHPORTS];
/* mask of enabled ports */
static uint32_t enabled_port_mask;
-static int promiscuous_on; /**< $et in promiscuous mode off by default. */
+static int promiscuous_on; /**< Set in promiscuous mode off by default. */
static int numa_on = 1; /**< NUMA is enabled by default. */
+static int parse_ptype_on;
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
static int ipv6; /**< ipv6 is false by default. */
@@ -282,7 +345,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
@@ -850,7 +913,7 @@ static inline uint8_t
get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
lookup6_struct_t *ipv6_l3fwd_lookup_struct)
{
- uint8_t next_hop;
+ uint32_t next_hop;
return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
@@ -1337,15 +1400,14 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
static inline __attribute__((always_inline)) uint16_t
get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
{
- uint32_t next_hop_ipv4;
- uint8_t next_hop_ipv6;
+ uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
struct ether_hdr *eth_hdr;
if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
return (uint16_t) ((rte_lpm_lookup(
RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct, dst_ipv4,
- &next_hop_ipv4) == 0) ? next_hop_ipv4 : portid);
+ &next_hop) == 0) ? next_hop : portid);
} else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
@@ -1354,8 +1416,8 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
return (uint16_t) ((rte_lpm6_lookup(
RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
- ipv6_hdr->dst_addr, &next_hop_ipv6) == 0) ? next_hop_ipv6 :
- portid);
+ ipv6_hdr->dst_addr, &next_hop) == 0) ?
+ next_hop : portid);
}
@@ -2019,7 +2081,7 @@ lthread_tx_per_ring(void *dummy)
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(ring, (void **)pkts_burst,
- MAX_PKT_BURST);
+ MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (nb_rx > 0) {
@@ -2155,7 +2217,7 @@ lthread_rx(void *dummy)
ret = rte_ring_sp_enqueue_burst(
rx_conf->ring[worker_id],
(void **) pkts_burst,
- nb_rx);
+ nb_rx, NULL);
new_len = old_len + ret;
@@ -2323,7 +2385,7 @@ pthread_tx(void *dummy)
*/
SET_CPU_BUSY(tx_conf, CPU_POLL);
nb_rx = rte_ring_sc_dequeue_burst(tx_conf->ring,
- (void **)pkts_burst, MAX_PKT_BURST);
+ (void **)pkts_burst, MAX_PKT_BURST, NULL);
SET_CPU_IDLE(tx_conf, CPU_POLL);
if (unlikely(nb_rx == 0)) {
@@ -2395,7 +2457,7 @@ pthread_rx(void *dummy)
SET_CPU_BUSY(rx_conf, CPU_PROCESS);
worker_id = (worker_id + 1) % rx_conf->n_ring;
n = rte_ring_sp_enqueue_burst(rx_conf->ring[worker_id],
- (void **)pkts_burst, nb_rx);
+ (void **)pkts_burst, nb_rx, NULL);
if (unlikely(n != nb_rx)) {
uint32_t k;
@@ -2610,6 +2672,7 @@ print_usage(const char *prgname)
" [--rx (port,queue,lcore,thread)[,(port,queue,lcore,thread]]"
" [--tx (lcore,thread)[,(lcore,thread]]"
" [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+ " [--parse-ptype]\n\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" --rx (port,queue,lcore,thread): rx queues configuration\n"
@@ -2621,7 +2684,8 @@ print_usage(const char *prgname)
" --enable-jumbo: enable jumbo frame"
" which max packet len is PKTLEN in decimal (64-9600)\n"
" --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n"
- " --no-lthreads: turn off lthread model\n",
+ " --no-lthreads: turn off lthread model\n"
+ " --parse-ptype: set to use software to analyze packet type\n\n",
prgname);
}
@@ -2840,6 +2904,7 @@ parse_eth_dest(const char *optarg)
#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_NO_LTHREADS "no-lthreads"
+#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
/* Parse the argument given in the command line of the application */
static int
@@ -2859,6 +2924,7 @@ parse_args(int argc, char **argv)
{CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, 0},
{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, 0},
{CMD_LINE_OPT_NO_LTHREADS, 0, 0, 0},
+ {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{NULL, 0, 0, 0}
};
@@ -2935,6 +3001,12 @@ parse_args(int argc, char **argv)
lthreads_on = 0;
}
+ if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_PARSE_PTYPE,
+ sizeof(CMD_LINE_OPT_PARSE_PTYPE))) {
+ printf("software packet type parsing enabled\n");
+ parse_ptype_on = 1;
+ }
+
if (!strncmp(lgopts[option_index].name, CMD_LINE_OPT_ENABLE_JUMBO,
sizeof(CMD_LINE_OPT_ENABLE_JUMBO))) {
struct option lenopts = {"max-pkt-len", required_argument, 0,
@@ -2983,7 +3055,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
ret = optind-1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
@@ -3623,6 +3695,31 @@ main(int argc, char **argv)
rte_eth_promiscuous_enable(portid);
}
+ for (i = 0; i < n_rx_thread; i++) {
+ lcore_id = rx_thread[i].conf.lcore_id;
+ if (rte_lcore_is_enabled(lcore_id) == 0)
+ continue;
+
+ /* check if hw packet type is supported */
+ for (queue = 0; queue < rx_thread[i].n_rx_queue; ++queue) {
+ portid = rx_thread[i].rx_queue_list[queue].port_id;
+ queueid = rx_thread[i].rx_queue_list[queue].queue_id;
+
+ if (parse_ptype_on) {
+ if (!rte_eth_add_rx_callback(portid, queueid,
+ cb_parse_ptype, NULL))
+ rte_exit(EXIT_FAILURE,
+ "Failed to add rx callback: "
+ "port=%d\n", portid);
+ } else if (!check_ptype(portid))
+ rte_exit(EXIT_FAILURE,
+ "Port %d cannot parse packet type.\n\n"
+ "Please add --parse-ptype to use sw "
+ "packet type analyzer.\n\n",
+ portid);
+ }
+ }
+
check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
if (lthreads_on) {
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index f0357218..850b009d 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -59,6 +59,10 @@
#define DEBUG_APP 0
#define HELLOW_WORLD_MAX_LTHREADS 10
+#ifndef __GLIBC__ /* sched_getcpu() is glibc-specific */
+#define sched_getcpu() rte_lcore_id()
+#endif
+
__thread int print_count;
__thread pthread_mutex_t print_lock;
@@ -175,12 +179,12 @@ static void initial_lthread(void *args __attribute__((unused)))
* use an attribute to pass the desired lcore
*/
pthread_attr_t attr;
- cpu_set_t cpuset;
+ rte_cpuset_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(lcore, &cpuset);
pthread_attr_init(&attr);
- pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
+ pthread_attr_setaffinity_np(&attr, sizeof(rte_cpuset_t), &cpuset);
/* create the thread */
pthread_create(&tid[i], &attr, helloworld_pthread, (void *) i);
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index 0d6100c9..113bafa0 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -48,6 +48,21 @@
#define POSIX_ERRNO(x) (x)
+/* some releases of FreeBSD 10, e.g. 10.0, don't have CPU_COUNT macro */
+#ifndef CPU_COUNT
+#define CPU_COUNT(x) __cpu_count(x)
+
+static inline unsigned int
+__cpu_count(const rte_cpuset_t *cpuset)
+{
+ unsigned int i, count = 0;
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (CPU_ISSET(i, cpuset))
+ count++;
+ return count;
+}
+#endif
+
/*
* this flag determines at run time if we override pthread
* calls and map then to equivalent lthread calls
@@ -159,7 +174,7 @@ int (*f_pthread_setschedparam)
int (*f_pthread_yield)
(void);
int (*f_pthread_setaffinity_np)
- (pthread_t thread, size_t cpusetsize, const cpu_set_t *cpuset);
+ (pthread_t thread, size_t cpusetsize, const rte_cpuset_t *cpuset);
int (*f_nanosleep)
(const struct timespec *req, struct timespec *rem);
} _sys_pthread_funcs = {
@@ -390,11 +405,11 @@ pthread_create(pthread_t *__restrict tid,
if (attr != NULL) {
/* determine CPU being requested */
- cpu_set_t cpuset;
+ rte_cpuset_t cpuset;
CPU_ZERO(&cpuset);
pthread_attr_getaffinity_np(attr,
- sizeof(cpu_set_t),
+ sizeof(rte_cpuset_t),
&cpuset);
if (CPU_COUNT(&cpuset) != 1)
@@ -576,15 +591,26 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *a)
return _sys_pthread_funcs.f_pthread_rwlock_wrlock(a);
}
-int pthread_yield(void)
+#ifdef RTE_EXEC_ENV_LINUXAPP
+int
+pthread_yield(void)
{
if (override) {
lthread_yield();
return 0;
}
return _sys_pthread_funcs.f_pthread_yield();
-
}
+#else
+void
+pthread_yield(void)
+{
+ if (override)
+ lthread_yield();
+ else
+ _sys_pthread_funcs.f_pthread_yield();
+}
+#endif
pthread_t pthread_self(void)
{
@@ -686,7 +712,7 @@ int nanosleep(const struct timespec *req, struct timespec *rem)
int
pthread_setaffinity_np(pthread_t thread, size_t cpusetsize,
- const cpu_set_t *cpuset)
+ const rte_cpuset_t *cpuset)
{
if (override) {
/* we only allow affinity with a single CPU */
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.h b/examples/performance-thread/pthread_shim/pthread_shim.h
index 78bbb5ac..10f87894 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.h
+++ b/examples/performance-thread/pthread_shim/pthread_shim.h
@@ -33,7 +33,8 @@
#ifndef _PTHREAD_SHIM_H_
#define _PTHREAD_SHIM_H_
-#include <pthread.h>
+
+#include <rte_lcore.h>
/*
* This pthread shim is an example that demonstrates how legacy code
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 0af4f3b6..a80961d3 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -708,7 +708,7 @@ ptp_parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* Reset getopt lib. */
+ optind = 1; /* Reset getopt lib. */
return 0;
}
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index 15656155..d8a2107d 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -89,7 +89,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
- .hw_strip_crc = 0,
+ .hw_strip_crc = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -300,7 +300,7 @@ parse_args(int argc, char **argv)
argv[optind-1] = prgname;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return 0;
}
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
index f59645f5..e41ac500 100644
--- a/examples/qos_sched/Makefile
+++ b/examples/qos_sched/Makefile
@@ -42,6 +42,7 @@ ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
+clean:
else
# binary name
diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c
index 70fdcdb2..15f117f5 100644
--- a/examples/qos_sched/app_thread.c
+++ b/examples/qos_sched/app_thread.c
@@ -107,7 +107,7 @@ app_rx_thread(struct thread_conf **confs)
}
if (unlikely(rte_ring_sp_enqueue_bulk(conf->rx_ring,
- (void **)rx_mbufs, nb_rx) != 0)) {
+ (void **)rx_mbufs, nb_rx, NULL) == 0)) {
for(i = 0; i < nb_rx; i++) {
rte_pktmbuf_free(rx_mbufs[i]);
@@ -179,8 +179,8 @@ app_tx_thread(struct thread_conf **confs)
while ((conf = confs[conf_idx])) {
retval = rte_ring_sc_dequeue_bulk(conf->tx_ring, (void **)mbufs,
- burst_conf.qos_dequeue);
- if (likely(retval == 0)) {
+ burst_conf.qos_dequeue, NULL);
+ if (likely(retval != 0)) {
app_send_packets(conf, mbufs, burst_conf.qos_dequeue);
conf->counter = 0; /* reset empty read loop counter */
@@ -218,7 +218,7 @@ app_worker_thread(struct thread_conf **confs)
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
@@ -230,7 +230,9 @@ app_worker_thread(struct thread_conf **confs)
nb_pkt = rte_sched_port_dequeue(conf->sched_port, mbufs,
burst_conf.qos_dequeue);
if (likely(nb_pkt > 0))
- while (rte_ring_sp_enqueue_bulk(conf->tx_ring, (void **)mbufs, nb_pkt) != 0);
+ while (rte_ring_sp_enqueue_bulk(conf->tx_ring,
+ (void **)mbufs, nb_pkt, NULL) == 0)
+ ; /* empty body */
conf_idx++;
if (confs[conf_idx] == NULL)
@@ -252,7 +254,7 @@ app_mixed_thread(struct thread_conf **confs)
/* Read packet from the ring */
nb_pkt = rte_ring_sc_dequeue_burst(conf->rx_ring, (void **)mbufs,
- burst_conf.ring_burst);
+ burst_conf.ring_burst, NULL);
if (likely(nb_pkt)) {
int nb_sent = rte_sched_port_enqueue(conf->sched_port, mbufs,
nb_pkt);
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 70e12bb4..fe0221c6 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -92,7 +92,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
diff --git a/examples/quota_watermark/qw/args.c b/examples/quota_watermark/qw/args.c
index 408b54d1..6ba77bc0 100644
--- a/examples/quota_watermark/qw/args.c
+++ b/examples/quota_watermark/qw/args.c
@@ -47,9 +47,9 @@ unsigned int portmask = 0;
static void
usage(const char *prgname)
{
- fprintf(stderr, "Usage: %s [EAL args] -- -p <portmask>\n"
- "-p PORTMASK: hexadecimal bitmask of NIC ports to configure\n",
- prgname);
+ fprintf(stderr, "Usage: %s [EAL args] -- -p <portmask>\n"
+ "-p PORTMASK: hexadecimal bitmask of NIC ports to configure\n",
+ prgname);
}
static unsigned long
@@ -61,44 +61,47 @@ parse_portmask(const char *portmask_str)
static void
check_core_count(void)
{
- if (rte_lcore_count() < 3)
- rte_exit(EXIT_FAILURE, "At least 3 cores need to be passed in the coremask\n");
+ if (rte_lcore_count() < 3)
+ rte_exit(EXIT_FAILURE,
+ "At least 3 cores need to be passed in the coremask\n");
}
static void
check_portmask_value(unsigned int portmask)
{
- unsigned int port_nb = 0;
+ unsigned int port_nb = 0;
- port_nb = __builtin_popcount(portmask);
+ port_nb = __builtin_popcount(portmask);
- if (port_nb == 0)
- rte_exit(EXIT_FAILURE, "At least 2 ports need to be passed in the portmask\n");
+ if (port_nb == 0)
+ rte_exit(EXIT_FAILURE,
+ "At least 2 ports need to be passed in the portmask\n");
- if (port_nb % 2 != 0)
- rte_exit(EXIT_FAILURE, "An even number of ports is required in the portmask\n");
+ if (port_nb % 2 != 0)
+ rte_exit(EXIT_FAILURE,
+ "An even number of ports is required in the portmask\n");
}
int
parse_qw_args(int argc, char **argv)
{
- int opt;
-
- while ((opt = getopt(argc, argv, "h:p:")) != -1) {
- switch (opt) {
- case 'h':
- usage(argv[0]);
- break;
- case 'p':
- portmask = parse_portmask(optarg);
- break;
- default:
- usage(argv[0]);
- }
- }
-
- check_core_count();
- check_portmask_value(portmask);
-
- return 0;
+ int opt;
+
+ while ((opt = getopt(argc, argv, "h:p:")) != -1) {
+ switch (opt) {
+ case 'h':
+ usage(argv[0]);
+ break;
+ case 'p':
+ portmask = parse_portmask(optarg);
+ break;
+ default:
+ usage(argv[0]);
+ }
+ }
+
+ check_core_count();
+ check_portmask_value(portmask);
+
+ return 0;
}
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index c2087218..b6264fcf 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -51,124 +51,128 @@
static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
- },
- .txmode = {
- .mq_mode = ETH_DCB_NONE,
- },
+ .rxmode = {
+ .split_hdr_size = 0,
+ .header_split = 0, /**< Header Split disabled */
+ .hw_ip_checksum = 0, /**< IP csum offload disabled */
+ .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+ .jumbo_frame = 0, /**< Jumbo Frame disabled */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ },
+ .txmode = {
+ .mq_mode = ETH_DCB_NONE,
+ },
};
static struct rte_eth_fc_conf fc_conf = {
- .mode = RTE_FC_TX_PAUSE,
- .high_water = 80 * 510 / 100,
- .low_water = 60 * 510 / 100,
- .pause_time = 1337,
- .send_xon = 0,
+ .mode = RTE_FC_TX_PAUSE,
+ .high_water = 80 * 510 / 100,
+ .low_water = 60 * 510 / 100,
+ .pause_time = 1337,
+ .send_xon = 0,
};
void configure_eth_port(uint8_t port_id)
{
- int ret;
-
- rte_eth_dev_stop(port_id);
-
- ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
- (unsigned) port_id, ret);
-
- /* Initialize the port's RX queue */
- ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id),
- NULL,
- mbuf_pool);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup RX queue on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Initialize the port's TX queue */
- ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
- rte_eth_dev_socket_id(port_id),
- NULL);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup TX queue on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Initialize the port's flow control */
- ret = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to setup hardware flow control on "
- "port %u (error %d)\n", (unsigned) port_id, ret);
-
- /* Start the port */
- ret = rte_eth_dev_start(port_id);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to start port %u (error %d)\n",
- (unsigned) port_id, ret);
-
- /* Put it in promiscuous mode */
- rte_eth_promiscuous_enable(port_id);
+ int ret;
+
+ rte_eth_dev_stop(port_id);
+
+ ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's RX queue */
+ ret = rte_eth_rx_queue_setup(port_id, 0, RX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup RX queue on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's TX queue */
+ ret = rte_eth_tx_queue_setup(port_id, 0, TX_DESC_PER_QUEUE,
+ rte_eth_dev_socket_id(port_id),
+ NULL);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup TX queue on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Initialize the port's flow control */
+ ret = rte_eth_dev_flow_ctrl_set(port_id, &fc_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Failed to setup hardware flow control on port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Start the port */
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start port %u (error %d)\n",
+ (unsigned int) port_id, ret);
+
+ /* Put it in promiscuous mode */
+ rte_eth_promiscuous_enable(port_id);
}
void
init_dpdk(void)
{
- if (rte_eth_dev_count() < 2)
- rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
+ if (rte_eth_dev_count() < 2)
+ rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
}
void init_ring(int lcore_id, uint8_t port_id)
{
- struct rte_ring *ring;
- char ring_name[RTE_RING_NAMESIZE];
+ struct rte_ring *ring;
+ char ring_name[RTE_RING_NAMESIZE];
- snprintf(ring_name, RTE_RING_NAMESIZE,
- "core%d_port%d", lcore_id, port_id);
- ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(),
- RING_F_SP_ENQ | RING_F_SC_DEQ);
+ snprintf(ring_name, RTE_RING_NAMESIZE,
+ "core%d_port%d", lcore_id, port_id);
+ ring = rte_ring_create(ring_name, RING_SIZE, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (ring == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ if (ring == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- rte_ring_set_water_mark(ring, 80 * RING_SIZE / 100);
+ *high_watermark = 80 * RING_SIZE / 100;
- rings[lcore_id][port_id] = ring;
+ rings[lcore_id][port_id] = ring;
}
void
pair_ports(void)
{
- uint8_t i, j;
-
- /* Pair ports with their "closest neighbour" in the portmask */
- for (i = 0; i < RTE_MAX_ETHPORTS; i++)
- if (is_bit_set(i, portmask))
- for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
- if (is_bit_set(j, portmask)) {
- port_pairs[i] = j;
- port_pairs[j] = i;
- i = j;
- break;
- }
+ uint8_t i, j;
+
+ /* Pair ports with their "closest neighbour" in the portmask */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+ if (is_bit_set(i, portmask))
+ for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
+ if (is_bit_set(j, portmask)) {
+ port_pairs[i] = j;
+ port_pairs[j] = i;
+ i = j;
+ break;
+ }
}
void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
+ const struct rte_memzone *qw_memzone;
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int),
- rte_socket_id(), RTE_MEMZONE_2MB);
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 8ed02148..d4fcfde4 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -60,13 +60,14 @@
#define ETHER_TYPE_FLOW_CONTROL 0x8808
struct ether_fc_frame {
- uint16_t opcode;
- uint16_t param;
+ uint16_t opcode;
+ uint16_t param;
} __attribute__((__packed__));
int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;
uint8_t port_pairs[RTE_MAX_ETHPORTS];
@@ -76,38 +77,39 @@ struct rte_mempool *mbuf_pool;
static void send_pause_frame(uint8_t port_id, uint16_t duration)
{
- struct rte_mbuf *mbuf;
- struct ether_fc_frame *pause_frame;
- struct ether_hdr *hdr;
- struct ether_addr mac_addr;
+ struct rte_mbuf *mbuf;
+ struct ether_fc_frame *pause_frame;
+ struct ether_hdr *hdr;
+ struct ether_addr mac_addr;
- RTE_LOG(DEBUG, USER1, "Sending PAUSE frame (duration=%d) on port %d\n",
- duration, port_id);
+ RTE_LOG_DP(DEBUG, USER1,
+ "Sending PAUSE frame (duration=%d) on port %d\n",
+ duration, port_id);
- /* Get a mbuf from the pool */
- mbuf = rte_pktmbuf_alloc(mbuf_pool);
- if (unlikely(mbuf == NULL))
- return;
+ /* Get a mbuf from the pool */
+ mbuf = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(mbuf == NULL))
+ return;
- /* Prepare a PAUSE frame */
- hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- pause_frame = (struct ether_fc_frame *) &hdr[1];
+ /* Prepare a PAUSE frame */
+ hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ pause_frame = (struct ether_fc_frame *) &hdr[1];
- rte_eth_macaddr_get(port_id, &mac_addr);
- ether_addr_copy(&mac_addr, &hdr->s_addr);
+ rte_eth_macaddr_get(port_id, &mac_addr);
+ ether_addr_copy(&mac_addr, &hdr->s_addr);
- void *tmp = &hdr->d_addr.addr_bytes[0];
- *((uint64_t *)tmp) = 0x010000C28001ULL;
+ void *tmp = &hdr->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x010000C28001ULL;
- hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
+ hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
- pause_frame->opcode = rte_cpu_to_be_16(0x0001);
- pause_frame->param = rte_cpu_to_be_16(duration);
+ pause_frame->opcode = rte_cpu_to_be_16(0x0001);
+ pause_frame->param = rte_cpu_to_be_16(duration);
- mbuf->pkt_len = 60;
- mbuf->data_len = 60;
+ mbuf->pkt_len = 60;
+ mbuf->data_len = 60;
- rte_eth_tx_burst(port_id, 0, &mbuf, 1);
+ rte_eth_tx_burst(port_id, 0, &mbuf, 1);
}
/**
@@ -121,13 +123,13 @@ static void send_pause_frame(uint8_t port_id, uint16_t duration)
static unsigned int
get_previous_lcore_id(unsigned int lcore_id)
{
- int i;
+ int i;
- for (i = lcore_id - 1; i >= 0; i--)
- if (rte_lcore_is_enabled(i))
- return i;
+ for (i = lcore_id - 1; i >= 0; i--)
+ if (rte_lcore_is_enabled(i))
+ return i;
- return -1;
+ return -1;
}
/**
@@ -139,125 +141,137 @@ get_previous_lcore_id(unsigned int lcore_id)
static unsigned int
get_last_lcore_id(void)
{
- int i;
+ int i;
- for (i = RTE_MAX_LCORE; i >= 0; i--)
- if (rte_lcore_is_enabled(i))
- return i;
+ for (i = RTE_MAX_LCORE; i >= 0; i--)
+ if (rte_lcore_is_enabled(i))
+ return i;
- return 0;
+ return 0;
}
static void
receive_stage(__attribute__((unused)) void *args)
{
- int i, ret;
+ int i, ret;
- uint8_t port_id;
- uint16_t nb_rx_pkts;
+ uint8_t port_id;
+ uint16_t nb_rx_pkts;
- unsigned int lcore_id;
+ unsigned int lcore_id;
+ unsigned int free;
- struct rte_mbuf *pkts[MAX_PKT_QUOTA];
- struct rte_ring *ring;
- enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
+ struct rte_mbuf *pkts[MAX_PKT_QUOTA];
+ struct rte_ring *ring;
+ enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
- lcore_id = rte_lcore_id();
+ lcore_id = rte_lcore_id();
- RTE_LOG(INFO, USER1,
- "%s() started on core %u\n", __func__, lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u\n", __func__, lcore_id);
- while (1) {
+ while (1) {
- /* Process each port round robin style */
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* Process each port round robin style */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- ring = rings[lcore_id][port_id];
+ ring = rings[lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
- /* Enqueue received packets on the RX ring */
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, (uint16_t) *quota);
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
- /* Return mbufs to the pool, effectively dropping packets */
- for (i = 0; i < nb_rx_pkts; i++)
- rte_pktmbuf_free(pkts[i]);
- }
- }
- }
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+ }
}
static void
pipeline_stage(__attribute__((unused)) void *args)
{
- int i, ret;
- int nb_dq_pkts;
+ int i, ret;
+ int nb_dq_pkts;
- uint8_t port_id;
+ uint8_t port_id;
- unsigned int lcore_id, previous_lcore_id;
+ unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;
- void *pkts[MAX_PKT_QUOTA];
- struct rte_ring *rx, *tx;
- enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
+ void *pkts[MAX_PKT_QUOTA];
+ struct rte_ring *rx, *tx;
+ enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
- lcore_id = rte_lcore_id();
- previous_lcore_id = get_previous_lcore_id(lcore_id);
+ lcore_id = rte_lcore_id();
+ previous_lcore_id = get_previous_lcore_id(lcore_id);
- RTE_LOG(INFO, USER1,
- "%s() started on core %u - processing packets from core %u\n",
- __func__, lcore_id, previous_lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u - processing packets from core %u\n",
+ __func__, lcore_id, previous_lcore_id);
- while (1) {
+ while (1) {
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
- /* Dequeue up to quota mbuf from rx */
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
- if (unlikely(nb_dq_pkts < 0))
- continue;
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
- /* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
- /* Return mbufs to the pool, effectively dropping packets */
- for (i = 0; i < nb_dq_pkts; i++)
- rte_pktmbuf_free(pkts[i]);
- }
- }
- }
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+ }
}
static void
@@ -265,108 +279,114 @@ send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
- uint8_t port_id;
- uint8_t dest_port_id;
+ uint8_t port_id;
+ uint8_t dest_port_id;
- unsigned int lcore_id, previous_lcore_id;
+ unsigned int lcore_id, previous_lcore_id;
- struct rte_ring *tx;
- struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
+ struct rte_ring *tx;
+ struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
- lcore_id = rte_lcore_id();
- previous_lcore_id = get_previous_lcore_id(lcore_id);
+ lcore_id = rte_lcore_id();
+ previous_lcore_id = get_previous_lcore_id(lcore_id);
- RTE_LOG(INFO, USER1,
- "%s() started on core %u - processing packets from core %u\n",
- __func__, lcore_id, previous_lcore_id);
+ RTE_LOG(INFO, USER1,
+ "%s() started on core %u - processing packets from core %u\n",
+ __func__, lcore_id, previous_lcore_id);
- while (1) {
+ while (1) {
- /* Process each ring round robin style */
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* Process each ring round robin style */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
+ if (!is_bit_set(port_id, portmask))
+ continue;
- dest_port_id = port_pairs[port_id];
- tx = rings[previous_lcore_id][port_id];
+ dest_port_id = port_pairs[port_id];
+ tx = rings[previous_lcore_id][port_id];
- if (rte_ring_empty(tx))
- continue;
+ if (rte_ring_empty(tx))
+ continue;
- /* Dequeue packets from tx and send them */
- nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx, (void *) tx_pkts, *quota);
- rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
+ /* Dequeue packets from tx and send them */
+ nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
+ (void *) tx_pkts, *quota, NULL);
+ rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
- /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
- }
- }
+ /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
+ }
+ }
}
int
main(int argc, char **argv)
{
- int ret;
- unsigned int lcore_id, master_lcore_id, last_lcore_id;
+ int ret;
+ unsigned int lcore_id, master_lcore_id, last_lcore_id;
- uint8_t port_id;
+ uint8_t port_id;
- rte_set_log_level(RTE_LOG_INFO);
+ rte_log_set_global_level(RTE_LOG_INFO);
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
- argc -= ret;
- argv += ret;
+ argc -= ret;
+ argv += ret;
- init_dpdk();
- setup_shared_variables();
+ init_dpdk();
+ setup_shared_variables();
- *quota = 32;
- *low_watermark = 60 * RING_SIZE / 100;
+ *quota = 32;
+ *low_watermark = 60 * RING_SIZE / 100;
- last_lcore_id = get_last_lcore_id();
- master_lcore_id = rte_get_master_lcore();
+ last_lcore_id = get_last_lcore_id();
+ master_lcore_id = rte_get_master_lcore();
- /* Parse the application's arguments */
- ret = parse_qw_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
+ /* Parse the application's arguments */
+ ret = parse_qw_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
- /* Create a pool of mbuf to store packets */
- mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
- MBUF_DATA_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_panic("%s\n", rte_strerror(rte_errno));
+ /* Create a pool of mbuf to store packets */
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
+ MBUF_DATA_SIZE, rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_panic("%s\n", rte_strerror(rte_errno));
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
- if (is_bit_set(port_id, portmask)) {
- configure_eth_port(port_id);
- init_ring(master_lcore_id, port_id);
- }
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (is_bit_set(port_id, portmask)) {
+ configure_eth_port(port_id);
+ init_ring(master_lcore_id, port_id);
+ }
- pair_ports();
+ pair_ports();
- /* Start pipeline_connect() on all the available slave lcore but the last */
- for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
- if (rte_lcore_is_enabled(lcore_id) && lcore_id != master_lcore_id) {
+ /*
+ * Start pipeline_connect() on all the available slave lcores
+ * but the last
+ */
+ for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id) &&
+ lcore_id != master_lcore_id) {
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
- if (is_bit_set(port_id, portmask))
- init_ring(lcore_id, port_id);
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (is_bit_set(port_id, portmask))
+ init_ring(lcore_id, port_id);
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))pipeline_stage, NULL, lcore_id);
- }
- }
+ /* typecast is a workaround for GCC 4.3 bug */
+ rte_eal_remote_launch((int (*)(void *))pipeline_stage,
+ NULL, lcore_id);
+ }
+ }
- /* Start send_stage() on the last slave core */
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
+ /* Start send_stage() on the last slave core */
+ /* typecast is a workaround for GCC 4.3 bug */
+ rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
- /* Start receive_stage() on the master core */
- receive_stage(NULL);
+ /* Start receive_stage() on the master core */
+ receive_stage(NULL);
- return 0;
+ return 0;
}
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 6b364898..8c8e3116 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -37,12 +37,13 @@
#include "../include/conf.h"
enum ring_state {
- RING_READY,
- RING_OVERLOADED,
+ RING_READY,
+ RING_OVERLOADED,
};
extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;
extern uint8_t port_pairs[RTE_MAX_ETHPORTS];
@@ -53,7 +54,7 @@ extern struct rte_mempool *mbuf_pool;
static inline int
is_bit_set(int i, unsigned int mask)
{
- return (1 << i) & mask;
+ return (1 << i) & mask;
}
#endif /* _MAIN_H_ */
diff --git a/examples/quota_watermark/qwctl/commands.c b/examples/quota_watermark/qwctl/commands.c
index 5348dd3d..5cac0e17 100644
--- a/examples/quota_watermark/qwctl/commands.c
+++ b/examples/quota_watermark/qwctl/commands.c
@@ -53,36 +53,36 @@
*/
struct cmd_help_tokens {
- cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t verb;
};
cmdline_parse_token_string_t cmd_help_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_help_tokens, verb, "help");
+ TOKEN_STRING_INITIALIZER(struct cmd_help_tokens, verb, "help");
static void
cmd_help_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- cmdline_printf(cl, "Available commands:\n"
- "- help\n"
- "- set [ring_name|variable] <value>\n"
- "- show [ring_name|variable]\n"
- "\n"
- "Available variables:\n"
- "- low_watermark\n"
- "- quota\n"
- "- ring names follow the core%%u_port%%u format\n");
+ cmdline_printf(cl, "Available commands:\n"
+ "- help\n"
+ "- set [ring_name|variable] <value>\n"
+ "- show [ring_name|variable]\n"
+ "\n"
+ "Available variables:\n"
+ "- low_watermark\n"
+ "- quota\n"
+ "- ring names follow the core%%u_port%%u format\n");
}
cmdline_parse_inst_t cmd_help = {
- .f = cmd_help_handler,
- .data = NULL,
- .help_str = "show help",
- .tokens = {
- (void *) &cmd_help_verb,
- NULL,
- },
+ .f = cmd_help_handler,
+ .data = NULL,
+ .help_str = "show help",
+ .tokens = {
+ (void *) &cmd_help_verb,
+ NULL,
+ },
};
@@ -91,69 +91,74 @@ cmdline_parse_inst_t cmd_help = {
*/
struct cmd_set_tokens {
- cmdline_fixed_string_t verb;
- cmdline_fixed_string_t variable;
- uint32_t value;
+ cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t variable;
+ uint32_t value;
};
cmdline_parse_token_string_t cmd_set_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, verb, "set");
+ TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, verb, "set");
cmdline_parse_token_string_t cmd_set_variable =
- TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, variable, NULL);
+ TOKEN_STRING_INITIALIZER(struct cmd_set_tokens, variable, NULL);
cmdline_parse_token_num_t cmd_set_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_tokens, value, UINT32);
+ TOKEN_NUM_INITIALIZER(struct cmd_set_tokens, value, UINT32);
static void
cmd_set_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- struct cmd_set_tokens *tokens = parsed_result;
- struct rte_ring *ring;
-
- if (!strcmp(tokens->variable, "quota")) {
-
- if (tokens->value > 0 && tokens->value <= MAX_PKT_QUOTA)
- *quota = tokens->value;
- else
- cmdline_printf(cl, "quota must be between 1 and %u\n", MAX_PKT_QUOTA);
- }
-
- else if (!strcmp(tokens->variable, "low_watermark")) {
-
- if (tokens->value <= 100)
- *low_watermark = tokens->value * RING_SIZE / 100;
- else
- cmdline_printf(cl, "low_watermark must be between 0%% and 100%%\n");
- }
-
- else {
-
- ring = rte_ring_lookup(tokens->variable);
- if (ring == NULL)
- cmdline_printf(cl, "Cannot find ring \"%s\"\n", tokens->variable);
- else
- if (tokens->value >= *low_watermark * 100 / RING_SIZE
- && tokens->value <= 100)
- rte_ring_set_water_mark(ring, tokens->value * RING_SIZE / 100);
- else
- cmdline_printf(cl, "ring high watermark must be between %u%% "
- "and 100%%\n", *low_watermark * 100 / RING_SIZE);
- }
+ struct cmd_set_tokens *tokens = parsed_result;
+ struct rte_ring *ring;
+
+ if (!strcmp(tokens->variable, "quota")) {
+
+ if (tokens->value > 0 && tokens->value <= MAX_PKT_QUOTA)
+ *quota = tokens->value;
+ else
+ cmdline_printf(cl, "quota must be between 1 and %u\n",
+ MAX_PKT_QUOTA);
+ }
+
+ else if (!strcmp(tokens->variable, "low_watermark")) {
+
+ if (tokens->value <= 100)
+ *low_watermark = tokens->value * RING_SIZE / 100;
+ else
+ cmdline_printf(cl,
+ "low_watermark must be between 0%% and 100%%\n");
+ }
+
+ else {
+
+ ring = rte_ring_lookup(tokens->variable);
+ if (ring == NULL)
+ cmdline_printf(cl, "Cannot find ring \"%s\"\n",
+ tokens->variable);
+ else
+ if (tokens->value >= *low_watermark * 100 / RING_SIZE
+ && tokens->value <= 100)
+ *high_watermark = tokens->value *
+ RING_SIZE / 100;
+ else
+ cmdline_printf(cl,
+ "ring high watermark must be between %u%% and 100%%\n",
+ *low_watermark * 100 / RING_SIZE);
+ }
}
cmdline_parse_inst_t cmd_set = {
- .f = cmd_set_handler,
- .data = NULL,
- .help_str = "Set a variable value",
- .tokens = {
- (void *) &cmd_set_verb,
- (void *) &cmd_set_variable,
- (void *) &cmd_set_value,
- NULL,
- },
+ .f = cmd_set_handler,
+ .data = NULL,
+ .help_str = "Set a variable value",
+ .tokens = {
+ (void *) &cmd_set_verb,
+ (void *) &cmd_set_variable,
+ (void *) &cmd_set_value,
+ NULL,
+ },
};
@@ -162,56 +167,59 @@ cmdline_parse_inst_t cmd_set = {
*/
struct cmd_show_tokens {
- cmdline_fixed_string_t verb;
- cmdline_fixed_string_t variable;
+ cmdline_fixed_string_t verb;
+ cmdline_fixed_string_t variable;
};
cmdline_parse_token_string_t cmd_show_verb =
- TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, verb, "show");
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, verb, "show");
cmdline_parse_token_string_t cmd_show_variable =
- TOKEN_STRING_INITIALIZER(struct cmd_show_tokens, variable, NULL);
+ TOKEN_STRING_INITIALIZER(struct cmd_show_tokens,
+ variable, NULL);
static void
cmd_show_handler(__attribute__((unused)) void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
{
- struct cmd_show_tokens *tokens = parsed_result;
- struct rte_ring *ring;
+ struct cmd_show_tokens *tokens = parsed_result;
+ struct rte_ring *ring;
- if (!strcmp(tokens->variable, "quota"))
- cmdline_printf(cl, "Global quota: %d\n", *quota);
+ if (!strcmp(tokens->variable, "quota"))
+ cmdline_printf(cl, "Global quota: %d\n", *quota);
- else if (!strcmp(tokens->variable, "low_watermark"))
- cmdline_printf(cl, "Global low_watermark: %u\n", *low_watermark);
+ else if (!strcmp(tokens->variable, "low_watermark"))
+ cmdline_printf(cl, "Global low_watermark: %u\n",
+ *low_watermark);
- else {
+ else {
- ring = rte_ring_lookup(tokens->variable);
- if (ring == NULL)
- cmdline_printf(cl, "Cannot find ring \"%s\"\n", tokens->variable);
- else
- rte_ring_dump(stdout, ring);
- }
+ ring = rte_ring_lookup(tokens->variable);
+ if (ring == NULL)
+ cmdline_printf(cl, "Cannot find ring \"%s\"\n",
+ tokens->variable);
+ else
+ rte_ring_dump(stdout, ring);
+ }
}
cmdline_parse_inst_t cmd_show = {
- .f = cmd_show_handler,
- .data = NULL,
- .help_str = "Show a variable value",
- .tokens = {
- (void *) &cmd_show_verb,
- (void *) &cmd_show_variable,
- NULL,
- },
+ .f = cmd_show_handler,
+ .data = NULL,
+ .help_str = "Show a variable value",
+ .tokens = {
+ (void *) &cmd_show_verb,
+ (void *) &cmd_show_variable,
+ NULL,
+ },
};
cmdline_parse_ctx_t qwctl_ctx[] = {
- (cmdline_parse_inst_t *)&cmd_help,
- (cmdline_parse_inst_t *)&cmd_set,
- (cmdline_parse_inst_t *)&cmd_show,
- NULL,
+ (cmdline_parse_inst_t *)&cmd_help,
+ (cmdline_parse_inst_t *)&cmd_set,
+ (cmdline_parse_inst_t *)&cmd_show,
+ NULL,
};
diff --git a/examples/quota_watermark/qwctl/qwctl.c b/examples/quota_watermark/qwctl/qwctl.c
index 29c501ca..18ec17a1 100644
--- a/examples/quota_watermark/qwctl/qwctl.c
+++ b/examples/quota_watermark/qwctl/qwctl.c
@@ -55,40 +55,42 @@
int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;
static void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
+ const struct rte_memzone *qw_memzone;
- qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME);
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
+ qw_memzone = rte_memzone_lookup(QUOTA_WATERMARK_MEMZONE_NAME);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "Couldn't find memzone\n");
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
int main(int argc, char **argv)
{
- int ret;
- struct cmdline *cl;
+ int ret;
+ struct cmdline *cl;
- rte_set_log_level(RTE_LOG_INFO);
+ rte_log_set_global_level(RTE_LOG_INFO);
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
- setup_shared_variables();
+ setup_shared_variables();
- cl = cmdline_stdin_new(qwctl_ctx, "qwctl> ");
- if (cl == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
+ cl = cmdline_stdin_new(qwctl_ctx, "qwctl> ");
+ if (cl == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
- cmdline_interact(cl);
- cmdline_stdin_exit(cl);
+ cmdline_interact(cl);
+ cmdline_stdin_exit(cl);
- return 0;
+ return 0;
}
diff --git a/examples/quota_watermark/qwctl/qwctl.h b/examples/quota_watermark/qwctl/qwctl.h
index 8d146e57..545914b3 100644
--- a/examples/quota_watermark/qwctl/qwctl.h
+++ b/examples/quota_watermark/qwctl/qwctl.h
@@ -36,5 +36,6 @@
extern int *quota;
extern unsigned int *low_watermark;
+extern unsigned int *high_watermark;
#endif /* _MAIN_H_ */
diff --git a/examples/server_node_efd/Makefile b/examples/server_node_efd/Makefile
new file mode 100644
index 00000000..6977ef93
--- /dev/null
+++ b/examples/server_node_efd/Makefile
@@ -0,0 +1,44 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += server
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += node
+
+include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/server_node_efd/node/Makefile b/examples/server_node_efd/node/Makefile
new file mode 100644
index 00000000..8cf7b650
--- /dev/null
+++ b/examples/server_node_efd/node/Makefile
@@ -0,0 +1,48 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = node
+
+# all source are stored in SRCS-y
+SRCS-y := node.c
+
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
new file mode 100644
index 00000000..f780b926
--- /dev/null
+++ b/examples/server_node_efd/node/node.c
@@ -0,0 +1,417 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_ip.h>
+
+#include "common.h"
+
+/* Number of packets to attempt to read from queue */
+#define PKT_READ_SIZE ((uint16_t)32)
+
+/*
+ * Our node id number - tells us which rx queue to read, and NIC TX
+ * queue to write to.
+ */
+static uint8_t node_id;
+
+#define MBQ_CAPACITY 32
+
+/* maps input ports to output ports for packets */
+static uint8_t output_ports[RTE_MAX_ETHPORTS];
+
+/* buffers up a set of packet that are ready to send */
+struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+/* shared data from server. We update statistics here */
+static struct tx_stats *tx_stats;
+
+static struct filter_stats *filter_stats;
+
+/*
+ * print a usage message
+ */
+static void
+usage(const char *progname)
+{
+ printf("Usage: %s [EAL args] -- -n <node_id>\n\n", progname);
+}
+
+/*
+ * Convert the node id number from a string to an int.
+ */
+static int
+parse_node_num(const char *node)
+{
+ char *end = NULL;
+ unsigned long temp;
+
+ if (node == NULL || *node == '\0')
+ return -1;
+
+ temp = strtoul(node, &end, 10);
+ if (end == NULL || *end != '\0')
+ return -1;
+
+ node_id = (uint8_t)temp;
+ return 0;
+}
+
+/*
+ * Parse the application arguments to the node app.
+ */
+static int
+parse_app_args(int argc, char *argv[])
+{
+ int option_index, opt;
+ char **argvopt = argv;
+ const char *progname = NULL;
+ static struct option lgopts[] = { /* no long options */
+ {NULL, 0, 0, 0 }
+ };
+ progname = argv[0];
+
+ while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
+ &option_index)) != EOF) {
+ switch (opt) {
+ case 'n':
+ if (parse_node_num(optarg) != 0) {
+ usage(progname);
+ return -1;
+ }
+ break;
+ default:
+ usage(progname);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Tx buffer error callback
+ */
+static void
+flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
+ void *userdata) {
+ int i;
+ uint8_t port_id = (uintptr_t)userdata;
+
+ tx_stats->tx_drop[port_id] += count;
+
+ /* free the mbufs which failed from transmit */
+ for (i = 0; i < count; i++)
+ rte_pktmbuf_free(unsent[i]);
+
+}
+
+static void
+configure_tx_buffer(uint8_t port_id, uint16_t size)
+{
+ int ret;
+
+ /* Initialize TX buffers */
+ tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(size), 0,
+ rte_eth_dev_socket_id(port_id));
+ if (tx_buffer[port_id] == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx "
+ "on port %u\n", (unsigned int) port_id);
+
+ rte_eth_tx_buffer_init(tx_buffer[port_id], size);
+
+ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
+ flush_tx_error_callback, (void *)(intptr_t)port_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot set error callback for "
+ "tx buffer on port %u\n", (unsigned int) port_id);
+}
+
+/*
+ * set up output ports so that all traffic on port gets sent out
+ * its paired port. Index using actual port numbers since that is
+ * what comes in the mbuf structure.
+ */
+static void
+configure_output_ports(const struct shared_info *info)
+{
+ int i;
+
+ if (info->num_ports > RTE_MAX_ETHPORTS)
+ rte_exit(EXIT_FAILURE, "Too many ethernet ports. "
+ "RTE_MAX_ETHPORTS = %u\n",
+ (unsigned int)RTE_MAX_ETHPORTS);
+ for (i = 0; i < info->num_ports - 1; i += 2) {
+ uint8_t p1 = info->id[i];
+ uint8_t p2 = info->id[i+1];
+
+ output_ports[p1] = p2;
+ output_ports[p2] = p1;
+
+ configure_tx_buffer(p1, MBQ_CAPACITY);
+ configure_tx_buffer(p2, MBQ_CAPACITY);
+
+ }
+}
+
+/*
+ * Create the hash table that will contain the flows that
+ * the node will handle, which will be used to decide if packet
+ * is transmitted or dropped.
+ */
+static struct rte_hash *
+create_hash_table(const struct shared_info *info)
+{
+ uint32_t num_flows_node = info->num_flows / info->num_nodes;
+ char name[RTE_HASH_NAMESIZE];
+ struct rte_hash *h;
+
+ /* create table */
+ struct rte_hash_parameters hash_params = {
+ .entries = num_flows_node * 2, /* table load = 50% */
+ .key_len = sizeof(uint32_t), /* Store IPv4 dest IP address */
+ .socket_id = rte_socket_id(),
+ .hash_func_init_val = 0,
+ };
+
+ snprintf(name, sizeof(name), "hash_table_%d", node_id);
+ hash_params.name = name;
+ h = rte_hash_create(&hash_params);
+
+ if (h == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Problem creating the hash table for node %d\n",
+ node_id);
+ return h;
+}
+
+static void
+populate_hash_table(const struct rte_hash *h, const struct shared_info *info)
+{
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint32_t num_flows_node = 0;
+ uint64_t target_node;
+
+ /* Add flows in table */
+ for (i = 0; i < info->num_flows; i++) {
+ target_node = i % info->num_nodes;
+ if (target_node != node_id)
+ continue;
+
+ ip_dst = rte_cpu_to_be_32(i);
+
+ ret = rte_hash_add_key(h, (void *) &ip_dst);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u "
+ "in hash table\n", i);
+ else
+ num_flows_node++;
+
+ }
+
+ printf("Hash table: Adding 0x%x keys\n", num_flows_node);
+}
+
+/*
+ * This function performs routing of packets
+ * Just sends each input packet out an output port based solely on the input
+ * port it arrived on.
+ */
+static inline void
+transmit_packet(struct rte_mbuf *buf)
+{
+ int sent;
+ const uint8_t in_port = buf->port;
+ const uint8_t out_port = output_ports[in_port];
+ struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
+
+ sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
+ if (sent)
+ tx_stats->tx[out_port] += sent;
+
+}
+
+static inline void
+handle_packets(struct rte_hash *h, struct rte_mbuf **bufs, uint16_t num_packets)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[PKT_READ_SIZE];
+ const void *key_ptrs[PKT_READ_SIZE];
+ unsigned int i;
+ int32_t positions[PKT_READ_SIZE] = {0};
+
+ for (i = 0; i < num_packets; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(bufs[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = &ipv4_dst_ip[i];
+ }
+ /* Check if packets belongs to any flows handled by this node */
+ rte_hash_lookup_bulk(h, key_ptrs, num_packets, positions);
+
+ for (i = 0; i < num_packets; i++) {
+ if (likely(positions[i] >= 0)) {
+ filter_stats->passed++;
+ transmit_packet(bufs[i]);
+ } else {
+ filter_stats->drop++;
+ /* Drop packet, as flow is not handled by this node */
+ rte_pktmbuf_free(bufs[i]);
+ }
+ }
+}
+
+/*
+ * Application main function - loops through
+ * receiving and processing packets. Never returns
+ */
+int
+main(int argc, char *argv[])
+{
+ const struct rte_memzone *mz;
+ struct rte_ring *rx_ring;
+ struct rte_hash *h;
+ struct rte_mempool *mp;
+ struct shared_info *info;
+ int need_flush = 0; /* indicates whether we have unsent packets */
+ int retval;
+ void *pkts[PKT_READ_SIZE];
+ uint16_t sent;
+
+ retval = rte_eal_init(argc, argv);
+ if (retval < 0)
+ return -1;
+ argc -= retval;
+ argv += retval;
+
+ if (parse_app_args(argc, argv) < 0)
+ rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
+
+ if (rte_eth_dev_count() == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
+ if (rx_ring == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get RX ring - "
+ "is server process running?\n");
+
+ mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
+ if (mp == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
+
+ mz = rte_memzone_lookup(MZ_SHARED_INFO);
+ if (mz == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
+ info = mz->addr;
+ tx_stats = &(info->tx_stats[node_id]);
+ filter_stats = &(info->filter_stats[node_id]);
+
+ configure_output_ports(info);
+
+ h = create_hash_table(info);
+
+ populate_hash_table(h, info);
+
+ RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+ printf("\nNode process %d handling packets\n", node_id);
+ printf("[Press Ctrl-C to quit ...]\n");
+
+ for (;;) {
+ uint16_t rx_pkts = PKT_READ_SIZE;
+ uint8_t port;
+
+ /*
+ * Try dequeuing max possible packets first, if that fails,
+ * get the most we can. Loop body should only execute once,
+ * maximum
+ */
+ while (rx_pkts > 0 &&
+ unlikely(rte_ring_dequeue_bulk(rx_ring, pkts,
+ rx_pkts, NULL) == 0))
+ rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring),
+ PKT_READ_SIZE);
+
+ if (unlikely(rx_pkts == 0)) {
+ if (need_flush)
+ for (port = 0; port < info->num_ports; port++) {
+ sent = rte_eth_tx_buffer_flush(
+ info->id[port],
+ node_id,
+ tx_buffer[port]);
+ if (unlikely(sent))
+ tx_stats->tx[port] += sent;
+ }
+ need_flush = 0;
+ continue;
+ }
+
+ handle_packets(h, (struct rte_mbuf **)pkts, rx_pkts);
+
+ need_flush = 1;
+ }
+}
diff --git a/examples/server_node_efd/server/Makefile b/examples/server_node_efd/server/Makefile
new file mode 100644
index 00000000..a2f2f361
--- /dev/null
+++ b/examples/server_node_efd/server/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV), "linuxapp")
+$(error This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+endif
+
+# binary name
+APP = server
+
+# all source are stored in SRCS-y
+SRCS-y := main.c init.c args.c
+
+INC := $(wildcard *.h)
+
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/server_node_efd/server/args.c b/examples/server_node_efd/server/args.c
new file mode 100644
index 00000000..ee292038
--- /dev/null
+++ b/examples/server_node_efd/server/args.c
@@ -0,0 +1,200 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+/* 1M flows by default */
+#define DEFAULT_NUM_FLOWS 0x100000
+
+/* global var for number of nodes - extern in header */
+uint8_t num_nodes;
+/* global var for number of flows - extern in header */
+uint32_t num_flows = DEFAULT_NUM_FLOWS;
+
+static const char *progname;
+
+/**
+ * Prints out usage information to stdout
+ */
+static void
+usage(void)
+{
+ printf("%s [EAL options] -- -p PORTMASK -n NUM_NODES -f NUM_FLOWS\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to use\n"
+ " -n NUM_NODES: number of node processes to use\n"
+ " -f NUM_FLOWS: number of flows to be added in the EFD table\n",
+ progname);
+}
+
+/**
+ * The ports to be used by the application are passed in
+ * the form of a bitmask. This function parses the bitmask
+ * and places the port numbers to be used into the port[]
+ * array variable
+ */
+static int
+parse_portmask(uint8_t max_ports, const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+ uint8_t count = 0;
+
+ if (portmask == NULL || *portmask == '\0')
+ return -1;
+
+ /* convert parameter to a number and verify */
+ pm = strtoul(portmask, &end, 16);
+ if (end == NULL || *end != '\0' || pm == 0)
+ return -1;
+
+ /* loop through bits of the mask and mark ports */
+ while (pm != 0) {
+ if (pm & 0x01) { /* bit is set in mask, use port */
+ if (count >= max_ports)
+ printf("WARNING: requested port %u not present"
+ " - ignoring\n", (unsigned int)count);
+ else
+ info->id[info->num_ports++] = count;
+ }
+ pm = (pm >> 1);
+ count++;
+ }
+
+ return 0;
+}
+
+/**
+ * Take the number of nodes parameter passed to the app
+ * and convert to a number to store in the num_nodes variable
+ */
+static int
+parse_num_nodes(const char *nodes)
+{
+ char *end = NULL;
+ unsigned long temp;
+
+ if (nodes == NULL || *nodes == '\0')
+ return -1;
+
+ temp = strtoul(nodes, &end, 10);
+ if (end == NULL || *end != '\0' || temp == 0)
+ return -1;
+
+ num_nodes = (uint8_t)temp;
+ return 0;
+}
+
+static int
+parse_num_flows(const char *flows)
+{
+ char *end = NULL;
+
+ /* parse hexadecimal string */
+ num_flows = strtoul(flows, &end, 16);
+ if ((flows[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (num_flows == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * The application specific arguments follow the DPDK-specific
+ * arguments which are stripped by the DPDK init. This function
+ * processes these application arguments, printing usage info
+ * on error.
+ */
+int
+parse_app_args(uint8_t max_ports, int argc, char *argv[])
+{
+ int option_index, opt;
+ char **argvopt = argv;
+ static struct option lgopts[] = { /* no long options */
+ {NULL, 0, 0, 0 }
+ };
+ progname = argv[0];
+
+ while ((opt = getopt_long(argc, argvopt, "n:f:p:", lgopts,
+ &option_index)) != EOF) {
+ switch (opt) {
+ case 'p':
+ if (parse_portmask(max_ports, optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ case 'n':
+ if (parse_num_nodes(optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ case 'f':
+ if (parse_num_flows(optarg) != 0) {
+ usage();
+ return -1;
+ }
+ break;
+ default:
+ printf("ERROR: Unknown option '%c'\n", opt);
+ usage();
+ return -1;
+ }
+ }
+
+ if (info->num_ports == 0 || num_nodes == 0) {
+ usage();
+ return -1;
+ }
+
+ if (info->num_ports % 2 != 0) {
+ printf("ERROR: application requires an even "
+ "number of ports to use\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/drivers/net/null/rte_eth_null.h b/examples/server_node_efd/server/args.h
index abada8c2..cacf3957 100644
--- a/drivers/net/null/rte_eth_null.h
+++ b/examples/server_node_efd/server/args.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,10 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RTE_ETH_NULL_H_
-#define RTE_ETH_NULL_H_
+#ifndef _ARGS_H_
+#define _ARGS_H_
-int eth_dev_null_create(const char *name, const unsigned numa_node,
- unsigned packet_size, unsigned packet_copy);
+int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
-#endif /* RTE_ETH_NULL_H_ */
+#endif /* ifndef _ARGS_H_ */
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
new file mode 100644
index 00000000..82457b44
--- /dev/null
+++ b/examples/server_node_efd/server/init.c
@@ -0,0 +1,371 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_efd.h>
+#include <rte_hash.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+#define MBUFS_PER_NODE 1536
+#define MBUFS_PER_PORT 1536
+#define MBUF_CACHE_SIZE 512
+
+#define RTE_MP_RX_DESC_DEFAULT 512
+#define RTE_MP_TX_DESC_DEFAULT 512
+#define NODE_QUEUE_RINGSIZE 128
+
+#define NO_FLAGS 0
+
+/* The mbuf pool for packet rx */
+struct rte_mempool *pktmbuf_pool;
+
+/* array of info/queues for nodes */
+struct node *nodes;
+
+/* EFD table */
+struct rte_efd_table *efd_table;
+
+/* Shared info between server and nodes */
+struct shared_info *info;
+
+/**
+ * Initialise the mbuf pool for packet reception for the NIC, and any other
+ * buffer pools needed by the app - currently none.
+ */
+static int
+init_mbuf_pools(void)
+{
+ const unsigned int num_mbufs = (num_nodes * MBUFS_PER_NODE) +
+ (info->num_ports * MBUFS_PER_PORT);
+
+ /*
+ * Don't pass single-producer/single-consumer flags to mbuf create as it
+ * seems faster to use a cache instead
+ */
+ printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
+ PKTMBUF_POOL_NAME, num_mbufs);
+ pktmbuf_pool = rte_pktmbuf_pool_create(PKTMBUF_POOL_NAME, num_mbufs,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ return pktmbuf_pool == NULL; /* 0 on success */
+}
+
+/**
+ * Initialise an individual port:
+ * - configure number of rx and tx rings
+ * - set up each rx ring, to pull from the main mbuf pool
+ * - set up each tx ring
+ * - start the port and report its status to stdout
+ */
+static int
+init_port(uint8_t port_num)
+{
+ /* for port configuration all features are off by default */
+ const struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS
+ }
+ };
+ const uint16_t rx_rings = 1, tx_rings = num_nodes;
+ const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+ const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+
+ uint16_t q;
+ int retval;
+
+ printf("Port %u init ... ", (unsigned int)port_num);
+ fflush(stdout);
+
+ /*
+ * Standard DPDK port initialisation - config port, then set up
+ * rx and tx rings.
+ */
+ retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
+ rte_eth_dev_socket_id(port_num),
+ NULL, pktmbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
+ rte_eth_dev_socket_id(port_num),
+ NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ rte_eth_promiscuous_enable(port_num);
+
+ retval = rte_eth_dev_start(port_num);
+ if (retval < 0)
+ return retval;
+
+ printf("done:\n");
+
+ return 0;
+}
+
+/**
+ * Set up the DPDK rings which will be used to pass packets, via
+ * pointers, between the multi-process server and node processes.
+ * Each node needs one RX queue.
+ */
+static int
+init_shm_rings(void)
+{
+ unsigned int i;
+ unsigned int socket_id;
+ const char *q_name;
+ const unsigned int ringsize = NODE_QUEUE_RINGSIZE;
+
+ nodes = rte_malloc("node details",
+ sizeof(*nodes) * num_nodes, 0);
+ if (nodes == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate memory for "
+ "node program details\n");
+
+ for (i = 0; i < num_nodes; i++) {
+ /* Create an RX queue for each node */
+ socket_id = rte_socket_id();
+ q_name = get_rx_queue_name(i);
+ nodes[i].rx_q = rte_ring_create(q_name,
+ ringsize, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (nodes[i].rx_q == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create rx ring queue "
+ "for node %u\n", i);
+ }
+ return 0;
+}
+
+/*
+ * Create EFD table which will contain all the flows
+ * that will be distributed among the nodes
+ */
+static void
+create_efd_table(void)
+{
+ uint8_t socket_id = rte_socket_id();
+
+ /* create table */
+ efd_table = rte_efd_create("flow table", num_flows * 2, sizeof(uint32_t),
+ 1 << socket_id, socket_id);
+
+ if (efd_table == NULL)
+ rte_exit(EXIT_FAILURE, "Problem creating the flow table\n");
+}
+
+static void
+populate_efd_table(void)
+{
+ unsigned int i;
+ int32_t ret;
+ uint32_t ip_dst;
+ uint8_t socket_id = rte_socket_id();
+ uint64_t node_id;
+
+ /* Add flows in table */
+ for (i = 0; i < num_flows; i++) {
+ node_id = i % num_nodes;
+
+ ip_dst = rte_cpu_to_be_32(i);
+ ret = rte_efd_update(efd_table, socket_id,
+ (void *)&ip_dst, (efd_value_t)node_id);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Unable to add entry %u in "
+ "EFD table\n", i);
+ }
+
+ printf("EFD table: Adding 0x%x keys\n", num_flows);
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint8_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if ((port_mask & (1 << info->id[portid])) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(info->id[portid], &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", info->id[portid],
+ (unsigned int)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint8_t)info->id[portid]);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+/**
+ * Main init function for the multi-process server app,
+ * calls subfunctions to do each stage of the initialisation.
+ */
+int
+init(int argc, char *argv[])
+{
+ int retval;
+ const struct rte_memzone *mz;
+ uint8_t i, total_ports;
+
+ /* init EAL, parsing EAL args */
+ retval = rte_eal_init(argc, argv);
+ if (retval < 0)
+ return -1;
+ argc -= retval;
+ argv += retval;
+
+ /* get total number of ports */
+ total_ports = rte_eth_dev_count();
+
+ /* set up array for port data */
+ mz = rte_memzone_reserve(MZ_SHARED_INFO, sizeof(*info),
+ rte_socket_id(), NO_FLAGS);
+ if (mz == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot reserve memory zone "
+ "for port information\n");
+ memset(mz->addr, 0, sizeof(*info));
+ info = mz->addr;
+
+ /* parse additional, application arguments */
+ retval = parse_app_args(total_ports, argc, argv);
+ if (retval != 0)
+ return -1;
+
+ /* initialise mbuf pools */
+ retval = init_mbuf_pools();
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");
+
+ /* now initialise the ports we will use */
+ for (i = 0; i < info->num_ports; i++) {
+ retval = init_port(info->id[i]);
+ if (retval != 0)
+ rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
+ (unsigned int) i);
+ }
+
+ check_all_ports_link_status(info->num_ports, (~0x0));
+
+ /* initialise the node queues/rings for inter-eu comms */
+ init_shm_rings();
+
+ /* Create the EFD table */
+ create_efd_table();
+
+ /* Populate the EFD table */
+ populate_efd_table();
+
+ /* Share the total number of nodes */
+ info->num_nodes = num_nodes;
+
+ /* Share the total number of flows */
+ info->num_flows = num_flows;
+ return 0;
+}
diff --git a/lib/librte_eal/common/include/rte_warnings.h b/examples/server_node_efd/server/init.h
index 54b545c9..8dc5885b 100644
--- a/lib/librte_eal/common/include/rte_warnings.h
+++ b/examples/server_node_efd/server/init.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,54 +31,46 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/**
- * @file
- * Definitions of warnings for use of various insecure functions
- */
-
-#ifndef _RTE_WARNINGS_H_
-#define _RTE_WARNINGS_H_
-
-#ifdef RTE_INSECURE_FUNCTION_WARNING
+#ifndef _INIT_H_
+#define _INIT_H_
-/* we need to include all used standard header files so that they appear
- * _before_ we poison the function names.
+/*
+ * #include <rte_ring.h>
+ * #include "args.h"
*/
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#ifdef RTE_EXEC_ENV_LINUXAPP
-#include <dirent.h>
-#endif
-
-/* the following function are deemed not fully secure for use e.g. they
- * do not always null-terminate arguments */
-#pragma GCC poison sprintf strtok snprintf vsnprintf
-#pragma GCC poison strlen strcpy strcat
-#pragma GCC poison sscanf
+/*
+ * Define a node structure with all needed info, including
+ * stats from the nodes.
+ */
+struct node {
+ struct rte_ring *rx_q;
+ unsigned int node_id;
+ /* these stats hold how many packets the node will actually receive,
+ * and how many packets were dropped because the node's queue was full.
+ * The port-info stats, in contrast, record how many packets were received
+ * or transmitted on an actual NIC port.
+ */
+ struct {
+ uint64_t rx;
+ uint64_t rx_drop;
+ } stats;
+};
-/* other unsafe functions may be implemented as macros so just undef them */
-#ifdef strsep
-#undef strsep
-#else
-#pragma GCC poison strsep
-#endif
+extern struct rte_efd_table *efd_table;
+extern struct node *nodes;
-#ifdef strncpy
-#undef strncpy
-#else
-#pragma GCC poison strncpy
-#endif
+/*
+ * shared information between server and nodes: number of nodes,
+ * port numbers, rx and tx stats etc.
+ */
+extern struct shared_info *info;
-#ifdef strncat
-#undef strncat
-#else
-#pragma GCC poison strncat
-#endif
+extern struct rte_mempool *pktmbuf_pool;
+extern uint8_t num_nodes;
+extern unsigned int num_sockets;
+extern uint32_t num_flows;
-#endif
+int init(int argc, char *argv[]);
-#endif /* RTE_WARNINGS_H */
+#endif /* ifndef _INIT_H_ */
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
new file mode 100644
index 00000000..597b4c25
--- /dev/null
+++ b/examples/server_node_efd/server/main.c
@@ -0,0 +1,362 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <netinet/ip.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_atomic.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_efd.h>
+#include <rte_ip.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+/*
+ * When doing reads from the NIC or the node queues,
+ * use this batch size
+ */
+#define PACKET_READ_SIZE 32
+
+/*
+ * Local buffers to put packets in, used to send packets in bursts to the
+ * nodes
+ */
+struct node_rx_buf {
+ struct rte_mbuf *buffer[PACKET_READ_SIZE];
+ uint16_t count;
+};
+
+struct efd_stats {
+ uint64_t distributed;
+ uint64_t drop;
+} flow_dist_stats;
+
+/* One buffer per node rx queue - dynamically allocate array */
+static struct node_rx_buf *cl_rx_buf;
+
+static const char *
+get_printable_mac_addr(uint8_t port)
+{
+ static const char err_address[] = "00:00:00:00:00:00";
+ static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
+ struct ether_addr mac;
+
+ if (unlikely(port >= RTE_MAX_ETHPORTS))
+ return err_address;
+ if (unlikely(addresses[port][0] == '\0')) {
+ rte_eth_macaddr_get(port, &mac);
+ snprintf(addresses[port], sizeof(addresses[port]),
+ "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
+ }
+ return addresses[port];
+}
+
+/*
+ * This function displays the recorded statistics for each port
+ * and for each node. It uses ANSI terminal codes to clear
+ * screen when called. It is called from a single non-master
+ * thread in the server process, when the process is run with more
+ * than one lcore enabled.
+ */
+static void
+do_stats_display(void)
+{
+ unsigned int i, j;
+ const char clr[] = {27, '[', '2', 'J', '\0'};
+ const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0'};
+ uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
+ uint64_t node_tx[MAX_NODES], node_tx_drop[MAX_NODES];
+
+ /* to get TX stats, we need to do some summing calculations */
+ memset(port_tx, 0, sizeof(port_tx));
+ memset(port_tx_drop, 0, sizeof(port_tx_drop));
+ memset(node_tx, 0, sizeof(node_tx));
+ memset(node_tx_drop, 0, sizeof(node_tx_drop));
+
+ for (i = 0; i < num_nodes; i++) {
+ const struct tx_stats *tx = &info->tx_stats[i];
+
+ for (j = 0; j < info->num_ports; j++) {
+ const uint64_t tx_val = tx->tx[info->id[j]];
+ const uint64_t drop_val = tx->tx_drop[info->id[j]];
+
+ port_tx[j] += tx_val;
+ port_tx_drop[j] += drop_val;
+ node_tx[i] += tx_val;
+ node_tx_drop[i] += drop_val;
+ }
+ }
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("PORTS\n");
+ printf("-----\n");
+ for (i = 0; i < info->num_ports; i++)
+ printf("Port %u: '%s'\t", (unsigned int)info->id[i],
+ get_printable_mac_addr(info->id[i]));
+ printf("\n\n");
+ for (i = 0; i < info->num_ports; i++) {
+ printf("Port %u - rx: %9"PRIu64"\t"
+ "tx: %9"PRIu64"\n",
+ (unsigned int)info->id[i], info->rx_stats.rx[i],
+ port_tx[i]);
+ }
+
+ printf("\nSERVER\n");
+ printf("-----\n");
+ printf("distributed: %9"PRIu64", drop: %9"PRIu64"\n",
+ flow_dist_stats.distributed, flow_dist_stats.drop);
+
+ printf("\nNODES\n");
+ printf("-------\n");
+ for (i = 0; i < num_nodes; i++) {
+ const unsigned long long rx = nodes[i].stats.rx;
+ const unsigned long long rx_drop = nodes[i].stats.rx_drop;
+ const struct filter_stats *filter = &info->filter_stats[i];
+
+ printf("Node %2u - rx: %9llu, rx_drop: %9llu\n"
+ " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n"
+ " filter_passed: %9"PRIu64", "
+ "filter_drop: %9"PRIu64"\n",
+ i, rx, rx_drop, node_tx[i], node_tx_drop[i],
+ filter->passed, filter->drop);
+ }
+
+ printf("\n");
+}
+
+/*
+ * The function called from each non-master lcore used by the process.
+ * The test_and_set function is used to randomly pick a single lcore on which
+ * the code to display the statistics will run. Otherwise, the code just
+ * repeatedly sleeps.
+ */
+static int
+sleep_lcore(__attribute__((unused)) void *dummy)
+{
+ /* Used to pick a display thread - static, so zero-initialised */
+ static rte_atomic32_t display_stats;
+
+ /* Only one core should display stats */
+ if (rte_atomic32_test_and_set(&display_stats)) {
+ const unsigned int sleeptime = 1;
+
+ printf("Core %u displaying statistics\n", rte_lcore_id());
+
+ /* Longer initial pause so above printf is seen */
+ sleep(sleeptime * 3);
+
+ /* Loop forever: sleep always returns 0 or <= param */
+ while (sleep(sleeptime) <= sleeptime)
+ do_stats_display();
+ }
+ return 0;
+}
+
+/*
+ * Function to set all the node statistic values to zero.
+ * Called at program startup.
+ */
+static void
+clear_stats(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_nodes; i++)
+ nodes[i].stats.rx = nodes[i].stats.rx_drop = 0;
+}
+
+/*
+ * send a burst of traffic to a node, assuming there are packets
+ * available to be sent to this node
+ */
+static void
+flush_rx_queue(uint16_t node)
+{
+ uint16_t j;
+ struct node *cl;
+
+ if (cl_rx_buf[node].count == 0)
+ return;
+
+ cl = &nodes[node];
+ if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[node].buffer,
+ cl_rx_buf[node].count, NULL) != cl_rx_buf[node].count){
+ for (j = 0; j < cl_rx_buf[node].count; j++)
+ rte_pktmbuf_free(cl_rx_buf[node].buffer[j]);
+ cl->stats.rx_drop += cl_rx_buf[node].count;
+ } else
+ cl->stats.rx += cl_rx_buf[node].count;
+
+ cl_rx_buf[node].count = 0;
+}
+
+/*
+ * marks a packet down to be sent to a particular node process
+ */
+static inline void
+enqueue_rx_packet(uint8_t node, struct rte_mbuf *buf)
+{
+ cl_rx_buf[node].buffer[cl_rx_buf[node].count++] = buf;
+}
+
+/*
+ * This function takes a group of packets and routes them
+ * individually to the node process. Very simply round-robins the packets
+ * without checking any of the packet contents.
+ */
+static void
+process_packets(uint32_t port_num __rte_unused, struct rte_mbuf *pkts[],
+ uint16_t rx_count, unsigned int socket_id)
+{
+ uint16_t i;
+ uint8_t node;
+ efd_value_t data[RTE_EFD_BURST_MAX];
+ const void *key_ptrs[RTE_EFD_BURST_MAX];
+
+ struct ipv4_hdr *ipv4_hdr;
+ uint32_t ipv4_dst_ip[RTE_EFD_BURST_MAX];
+
+ for (i = 0; i < rx_count; i++) {
+ /* Handle IPv4 header.*/
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkts[i], struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ ipv4_dst_ip[i] = ipv4_hdr->dst_addr;
+ key_ptrs[i] = (void *)&ipv4_dst_ip[i];
+ }
+
+ rte_efd_lookup_bulk(efd_table, socket_id, rx_count,
+ (const void **) key_ptrs, data);
+ for (i = 0; i < rx_count; i++) {
+ node = (uint8_t) ((uintptr_t)data[i]);
+
+ if (node >= num_nodes) {
+ /*
+ * Node is out of range, which means that
+ * flow has not been inserted
+ */
+ flow_dist_stats.drop++;
+ rte_pktmbuf_free(pkts[i]);
+ } else {
+ flow_dist_stats.distributed++;
+ enqueue_rx_packet(node, pkts[i]);
+ }
+ }
+
+ for (i = 0; i < num_nodes; i++)
+ flush_rx_queue(i);
+}
+
+/*
+ * Function called by the master lcore of the DPDK process.
+ */
+static void
+do_packet_forwarding(void)
+{
+ unsigned int port_num = 0; /* indexes the port[] array */
+ unsigned int socket_id = rte_socket_id();
+
+ for (;;) {
+ struct rte_mbuf *buf[PACKET_READ_SIZE];
+ uint16_t rx_count;
+
+ /* read a port */
+ rx_count = rte_eth_rx_burst(info->id[port_num], 0,
+ buf, PACKET_READ_SIZE);
+ info->rx_stats.rx[port_num] += rx_count;
+
+ /* Now process the NIC packets read */
+ if (likely(rx_count > 0))
+ process_packets(port_num, buf, rx_count, socket_id);
+
+ /* move to next port */
+ if (++port_num == info->num_ports)
+ port_num = 0;
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ /* initialise the system */
+ if (init(argc, argv) < 0)
+ return -1;
+ RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+ cl_rx_buf = calloc(num_nodes, sizeof(cl_rx_buf[0]));
+
+ /* clear statistics */
+ clear_stats();
+
+ /* put all other cores to sleep bar master */
+ rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
+
+ do_packet_forwarding();
+ return 0;
+}
diff --git a/examples/server_node_efd/shared/common.h b/examples/server_node_efd/shared/common.h
new file mode 100644
index 00000000..8a134799
--- /dev/null
+++ b/examples/server_node_efd/shared/common.h
@@ -0,0 +1,99 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <rte_hash_crc.h>
+#include <rte_hash.h>
+
+#define MAX_NODES 16
+/*
+ * Shared port info, including statistics information for display by server.
+ * Structure will be put in a memzone.
+ * - All port id values share one cache line as this data will be read-only
+ * during operation.
+ * - All rx statistic values share cache lines, as this data is written only
+ * by the server process. (rare reads by stats display)
+ * - The tx statistics have values for all ports per cache line, but the stats
+ * themselves are written by the nodes, so we have a distinct set, on different
+ * cache lines for each node to use.
+ */
+struct rx_stats {
+ uint64_t rx[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct tx_stats {
+ uint64_t tx[RTE_MAX_ETHPORTS];
+ uint64_t tx_drop[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct filter_stats {
+ uint64_t drop;
+ uint64_t passed;
+} __rte_cache_aligned;
+
+struct shared_info {
+ uint8_t num_nodes;
+ uint8_t num_ports;
+ uint32_t num_flows;
+ uint8_t id[RTE_MAX_ETHPORTS];
+ struct rx_stats rx_stats;
+ struct tx_stats tx_stats[MAX_NODES];
+ struct filter_stats filter_stats[MAX_NODES];
+};
+
+/* define common names for structures shared between server and node */
+#define MP_NODE_RXQ_NAME "MProc_Node_%u_RX"
+#define PKTMBUF_POOL_NAME "MProc_pktmbuf_pool"
+#define MZ_SHARED_INFO "MProc_shared_info"
+
+/*
+ * Given the rx queue name template above, get the queue name
+ */
+static inline const char *
+get_rx_queue_name(unsigned int id)
+{
+ /*
+ * Buffer for return value. Size calculated by %u being replaced
+ * by maximum 3 digits (plus an extra byte for safety)
+ */
+ static char buffer[sizeof(MP_NODE_RXQ_NAME) + 2];
+
+ snprintf(buffer, sizeof(buffer) - 1, MP_NODE_RXQ_NAME, id);
+ return buffer;
+}
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+#endif
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index 1d6d4635..cd6e3f1c 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -49,7 +49,7 @@
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include "main.h"
#include "vxlan.h"
@@ -68,7 +68,7 @@
(nb_switching_cores * MBUF_CACHE_SIZE))
#define MBUF_CACHE_SIZE 128
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
@@ -567,7 +567,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m)
unsigned len, ret = 0;
const uint16_t lcore_id = rte_lcore_id();
- RTE_LOG(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%d) TX: MAC address is external\n",
vdev->vid);
/* Add packet to the port tx queue */
@@ -649,7 +649,7 @@ switch_worker(__rte_unused void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- RTE_LOG(DEBUG, VHOST_DATA, "TX queue drained after "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "TX queue drained after "
"timeout with burst size %u\n",
tx_q->len);
ret = overlay_options.tx_handle(ports[0],
@@ -1081,7 +1081,7 @@ new_device(int vid)
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct virtio_net_device_ops virtio_net_device_ops = {
+static const struct vhost_device_ops virtio_net_device_ops = {
.new_device = new_device,
.destroy_device = destroy_device,
};
@@ -1199,15 +1199,13 @@ main(int argc, char *argv[])
MAX_SUP_PORTS);
}
/* Create the mbuf pool. */
- mbuf_pool = rte_mempool_create(
+ mbuf_pool = rte_pktmbuf_pool_create(
"MBUF_POOL",
- NUM_MBUFS_PER_PORT
- * valid_nb_ports,
- MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ NUM_MBUFS_PER_PORT * valid_nb_ports,
+ MBUF_CACHE_SIZE,
+ 0,
+ MBUF_DATA_SIZE,
+ rte_socket_id());
if (mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
@@ -1250,15 +1248,28 @@ main(int argc, char *argv[])
rte_eal_remote_launch(switch_worker,
mbuf_pool, lcore_id);
}
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
ret = rte_vhost_driver_register((char *)&dev_basename, 0);
if (ret != 0)
rte_exit(EXIT_FAILURE, "failed to register vhost driver.\n");
- rte_vhost_driver_callback_register(&virtio_net_device_ops);
+ rte_vhost_driver_disable_features(dev_basename,
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF);
- rte_vhost_driver_session_start();
+ ret = rte_vhost_driver_callback_register(dev_basename,
+ &virtio_net_device_ops);
+ if (ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to register vhost driver callbacks.\n");
+ }
+
+ if (rte_vhost_driver_start(dev_basename) < 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to start vhost driver.\n");
+ }
+
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_wait_lcore(lcore_id);
return 0;
}
diff --git a/examples/tep_termination/main.h b/examples/tep_termination/main.h
index c0ea7667..8ed817d4 100644
--- a/examples/tep_termination/main.h
+++ b/examples/tep_termination/main.h
@@ -54,6 +54,8 @@
/* Max number of devices. Limited by the application. */
#define MAX_DEVICES 64
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
/* Per-device statistics struct */
struct device_statistics {
uint64_t tx_total;
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 8f1f15bb..b57c0451 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -49,7 +49,7 @@
#include <rte_tcp.h>
#include "main.h"
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
#include "vxlan.h"
#include "vxlan_setup.h"
@@ -102,7 +102,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload disabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile
index e95c68ae..af7be99a 100644
--- a/examples/vhost/Makefile
+++ b/examples/vhost/Makefile
@@ -48,7 +48,7 @@ else
APP = vhost-switch
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c virtio_net.c
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index eddaf926..e07f8669 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@
#include <rte_log.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include <rte_ip.h>
#include <rte_tcp.h>
@@ -65,7 +65,6 @@
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
@@ -129,6 +128,8 @@ static uint32_t enable_tso;
static int client_mode;
static int dequeue_zero_copy;
+static int builtin_net_driver;
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
@@ -153,7 +154,7 @@ static struct rte_eth_conf vmdq_conf_default = {
*/
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
@@ -328,16 +329,6 @@ port_init(uint8_t port)
if (port >= rte_eth_dev_count()) return -1;
- if (enable_tx_csum == 0)
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
-
- if (enable_tso == 0) {
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- }
-
rx_rings = (uint16_t)dev_info.max_rx_queues;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
@@ -405,7 +396,7 @@ static int
us_vhost_parse_socket_path(const char *q_arg)
{
/* parse number string */
- if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
+ if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
return -1;
socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
@@ -509,6 +500,7 @@ us_vhost_parse_args(int argc, char **argv)
{"tso", required_argument, NULL, 0},
{"client", no_argument, &client_mode, 1},
{"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
+ {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
{NULL, 0, 0, 0},
};
@@ -531,7 +523,6 @@ us_vhost_parse_args(int argc, char **argv)
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
ETH_VMDQ_ACCEPT_BROADCAST |
ETH_VMDQ_ACCEPT_MULTICAST;
- rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
break;
@@ -806,7 +797,12 @@ virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
{
uint16_t ret;
- ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ if (builtin_net_driver) {
+ ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
+ } else {
+ ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
+ }
+
if (enable_stats) {
rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
@@ -832,17 +828,17 @@ virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
return -1;
if (vdev->vid == dst_vdev->vid) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->vid);
return 0;
}
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is local\n", dst_vdev->vid);
if (unlikely(dst_vdev->remove)) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) device is marked for removal\n", dst_vdev->vid);
return 0;
}
@@ -867,7 +863,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
return 0;
if (vdev->vid == dst_vdev->vid) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: src and dst MAC is same. Dropping packet.\n",
vdev->vid);
return -1;
@@ -881,7 +877,7 @@ find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
*offset = VLAN_HLEN;
*vlan_tag = vlan_tags[vdev->vid];
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
vdev->vid, dst_vdev->vid, *vlan_tag);
@@ -973,7 +969,7 @@ virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
}
}
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"(%d) TX: MAC address is external\n", vdev->vid);
queue2nic:
@@ -1041,7 +1037,7 @@ drain_mbuf_table(struct mbuf_table *tx_q)
if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
prev_tsc = cur_tsc;
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"TX queue drained after timeout with burst size %u\n",
tx_q->len);
do_drain_mbuf_table(tx_q);
@@ -1077,8 +1073,13 @@ drain_eth_rx(struct vhost_dev *vdev)
}
}
- enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
+ if (builtin_net_driver) {
+ enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
+ pkts, rx_count);
+ } else {
+ enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
pkts, rx_count);
+ }
if (enable_stats) {
rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
@@ -1094,8 +1095,13 @@ drain_virtio_tx(struct vhost_dev *vdev)
uint16_t count;
uint16_t i;
- count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
+ if (builtin_net_driver) {
+ count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
pkts, MAX_PKT_BURST);
+ } else {
+ count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
+ mbuf_pool, pkts, MAX_PKT_BURST);
+ }
/* setup VMDq for the first packet */
if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
@@ -1198,6 +1204,9 @@ destroy_device(int vid)
rte_pause();
}
+ if (builtin_net_driver)
+ vs_vhost_net_remove(vdev);
+
TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
lcore_vdev_entry);
TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
@@ -1246,6 +1255,9 @@ new_device(int vid)
}
vdev->vid = vid;
+ if (builtin_net_driver)
+ vs_vhost_net_setup(vdev);
+
TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
@@ -1281,7 +1293,7 @@ new_device(int vid)
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct virtio_net_device_ops virtio_net_device_ops =
+static const struct vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,
@@ -1509,9 +1521,6 @@ main(int argc, char *argv[])
RTE_LCORE_FOREACH_SLAVE(lcore_id)
rte_eal_remote_launch(switch_worker, NULL, lcore_id);
- if (mergeable == 0)
- rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
-
if (client_mode)
flags |= RTE_VHOST_USER_CLIENT;
@@ -1520,18 +1529,59 @@ main(int argc, char *argv[])
/* Register vhost user driver to handle vhost messages. */
for (i = 0; i < nb_sockets; i++) {
- ret = rte_vhost_driver_register
- (socket_files + i * PATH_MAX, flags);
+ char *file = socket_files + i * PATH_MAX;
+ ret = rte_vhost_driver_register(file, flags);
if (ret != 0) {
unregister_drivers(i);
rte_exit(EXIT_FAILURE,
"vhost driver register failure.\n");
}
+
+ if (builtin_net_driver)
+ rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
+
+ if (mergeable == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (enable_tx_csum == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_CSUM);
+ }
+
+ if (enable_tso == 0) {
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_HOST_TSO4);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_HOST_TSO6);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4);
+ rte_vhost_driver_disable_features(file,
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ }
+
+ if (promiscuous) {
+ rte_vhost_driver_enable_features(file,
+ 1ULL << VIRTIO_NET_F_CTRL_RX);
+ }
+
+ ret = rte_vhost_driver_callback_register(file,
+ &virtio_net_device_ops);
+ if (ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to register vhost driver callbacks.\n");
+ }
+
+ if (rte_vhost_driver_start(file) < 0) {
+ rte_exit(EXIT_FAILURE,
+ "failed to start vhost driver.\n");
+ }
}
- rte_vhost_driver_callback_register(&virtio_net_device_ops);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id)
+ rte_eal_wait_lcore(lcore_id);
- rte_vhost_driver_session_start();
return 0;
}
diff --git a/examples/vhost/main.h b/examples/vhost/main.h
index 6bb42e89..9a2aca37 100644
--- a/examples/vhost/main.h
+++ b/examples/vhost/main.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,11 +36,17 @@
#include <sys/queue.h>
+#include <rte_ether.h>
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
+#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
+
struct device_statistics {
uint64_t tx;
uint64_t tx_total;
@@ -48,6 +54,12 @@ struct device_statistics {
rte_atomic64_t rx_total_atomic;
};
+struct vhost_queue {
+ struct rte_vhost_vring vr;
+ uint16_t last_avail_idx;
+ uint16_t last_used_idx;
+};
+
struct vhost_dev {
/**< Number of memory regions for gpa to hpa translation. */
uint32_t nregions_hpa;
@@ -65,9 +77,16 @@ struct vhost_dev {
volatile uint8_t remove;
int vid;
+ uint64_t features;
+ size_t hdr_len;
+ uint16_t nr_vrings;
+ struct rte_vhost_memory *mem;
struct device_statistics stats;
TAILQ_ENTRY(vhost_dev) global_vdev_entry;
TAILQ_ENTRY(vhost_dev) lcore_vdev_entry;
+
+#define MAX_QUEUE_PAIRS 4
+ struct vhost_queue queues[MAX_QUEUE_PAIRS * 2];
} __rte_cache_aligned;
TAILQ_HEAD(vhost_dev_tailq_list, vhost_dev);
@@ -88,4 +107,15 @@ struct lcore_info {
struct vhost_dev_tailq_list vdev_list;
};
+/* we implement non-extra virtio net features */
+#define VIRTIO_NET_FEATURES 0
+
+void vs_vhost_net_setup(struct vhost_dev *dev);
+void vs_vhost_net_remove(struct vhost_dev *dev);
+uint16_t vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count);
+
+uint16_t vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool,
+ struct rte_mbuf **pkts, uint16_t count);
#endif /* _MAIN_H_ */
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
new file mode 100644
index 00000000..cc2c3d88
--- /dev/null
+++ b/examples/vhost/virtio_net.c
@@ -0,0 +1,403 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/virtio_net.h>
+
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_vhost.h>
+
+#include "main.h"
+
+/*
+ * A very simple vhost-user net driver implementation, without
+ * any extra features being enabled, such as TSO and mrg-Rx.
+ */
+
+void
+vs_vhost_net_setup(struct vhost_dev *dev)
+{
+ uint16_t i;
+ int vid = dev->vid;
+ struct vhost_queue *queue;
+
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "setting builtin vhost-user net driver\n");
+
+ rte_vhost_get_negotiated_features(vid, &dev->features);
+ if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
+ dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ dev->hdr_len = sizeof(struct virtio_net_hdr);
+
+ rte_vhost_get_mem_table(vid, &dev->mem);
+
+ dev->nr_vrings = rte_vhost_get_vring_num(vid);
+ for (i = 0; i < dev->nr_vrings; i++) {
+ queue = &dev->queues[i];
+
+ queue->last_used_idx = 0;
+ queue->last_avail_idx = 0;
+ rte_vhost_get_vhost_vring(vid, i, &queue->vr);
+ }
+}
+
+void
+vs_vhost_net_remove(struct vhost_dev *dev)
+{
+ free(dev->mem);
+}
+
+static inline int __attribute__((always_inline))
+enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
+ struct rte_mbuf *m, uint16_t desc_idx)
+{
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct vring_desc *desc;
+ uint64_t desc_addr;
+ struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
+ /* A counter to avoid desc dead loop chain */
+ uint16_t nr_desc = 1;
+
+ desc = &vr->desc[desc_idx];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ /*
+ * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
+ * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
+ * otherwise stores offset on the stack instead of in a register.
+ */
+ if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
+ return -1;
+
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ /* write virtio-net header */
+ *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
+
+ desc_offset = dev->hdr_len;
+ desc_avail = desc->len - dev->hdr_len;
+
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_offset = 0;
+ while (mbuf_avail != 0 || m->next != NULL) {
+ /* done with current mbuf, fetch next */
+ if (mbuf_avail == 0) {
+ m = m->next;
+
+ mbuf_offset = 0;
+ mbuf_avail = rte_pktmbuf_data_len(m);
+ }
+
+ /* done with current desc buf, fetch next */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
+ /* Room in vring buffer is not enough */
+ return -1;
+ }
+ if (unlikely(desc->next >= vr->size ||
+ ++nr_desc > vr->size))
+ return -1;
+
+ desc = &vr->desc[desc->next];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ }
+
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+ }
+
+ return 0;
+}
+
+uint16_t
+vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_queue *queue;
+ struct rte_vhost_vring *vr;
+ uint16_t avail_idx, free_entries, start_idx;
+ uint16_t desc_indexes[MAX_PKT_BURST];
+ uint16_t used_idx;
+ uint32_t i;
+
+ queue = &dev->queues[queue_id];
+ vr = &queue->vr;
+
+ avail_idx = *((volatile uint16_t *)&vr->avail->idx);
+ start_idx = queue->last_used_idx;
+ free_entries = avail_idx - start_idx;
+ count = RTE_MIN(count, free_entries);
+ count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
+ if (count == 0)
+ return 0;
+
+ /* Retrieve all of the desc indexes first to avoid caching issues. */
+ rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
+ for (i = 0; i < count; i++) {
+ used_idx = (start_idx + i) & (vr->size - 1);
+ desc_indexes[i] = vr->avail->ring[used_idx];
+ vr->used->ring[used_idx].id = desc_indexes[i];
+ vr->used->ring[used_idx].len = pkts[i]->pkt_len +
+ dev->hdr_len;
+ }
+
+ rte_prefetch0(&vr->desc[desc_indexes[0]]);
+ for (i = 0; i < count; i++) {
+ uint16_t desc_idx = desc_indexes[i];
+ int err;
+
+ err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
+ if (unlikely(err)) {
+ used_idx = (start_idx + i) & (vr->size - 1);
+ vr->used->ring[used_idx].len = dev->hdr_len;
+ }
+
+ if (i + 1 < count)
+ rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
+ }
+
+ rte_smp_wmb();
+
+ *(volatile uint16_t *)&vr->used->idx += count;
+ queue->last_used_idx += count;
+
+ /* flush used->idx update before we read avail->flags. */
+ rte_mb();
+
+ /* Kick the guest if necessary. */
+ if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vr->callfd >= 0))
+ eventfd_write(vr->callfd, (eventfd_t)1);
+ return count;
+}
+
+static inline int __attribute__((always_inline))
+dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
+ struct rte_mbuf *m, uint16_t desc_idx,
+ struct rte_mempool *mbuf_pool)
+{
+ struct vring_desc *desc;
+ uint64_t desc_addr;
+ uint32_t desc_avail, desc_offset;
+ uint32_t mbuf_avail, mbuf_offset;
+ uint32_t cpy_len;
+ struct rte_mbuf *cur = m, *prev = m;
+ /* A counter to avoid desc dead loop chain */
+ uint32_t nr_desc = 1;
+
+ desc = &vr->desc[desc_idx];
+ if (unlikely((desc->len < dev->hdr_len)) ||
+ (desc->flags & VRING_DESC_F_INDIRECT))
+ return -1;
+
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ /*
+ * We don't support ANY_LAYOUT, neither VERSION_1, meaning
+ * a Tx packet from guest must have 2 desc buffers at least:
+ * the first for storing the header and the others for
+ * storing the data.
+ *
+ * And since we don't support TSO, we could simply skip the
+ * header.
+ */
+ desc = &vr->desc[desc->next];
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ nr_desc += 1;
+
+ mbuf_offset = 0;
+ mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
+ while (1) {
+ cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(desc_addr + desc_offset)),
+ cpy_len);
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ desc_avail -= cpy_len;
+ desc_offset += cpy_len;
+
+ /* This desc reaches to its end, get the next one */
+ if (desc_avail == 0) {
+ if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
+ if (unlikely(desc->next >= vr->size ||
+ ++nr_desc > vr->size))
+ return -1;
+ desc = &vr->desc[desc->next];
+
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ if (unlikely(!desc_addr))
+ return -1;
+ rte_prefetch0((void *)(uintptr_t)desc_addr);
+
+ desc_offset = 0;
+ desc_avail = desc->len;
+ }
+
+ /*
+ * This mbuf reaches to its end, get a new one
+ * to hold more data.
+ */
+ if (mbuf_avail == 0) {
+ cur = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(cur == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA, "Failed to "
+ "allocate memory for mbuf.\n");
+ return -1;
+ }
+
+ prev->next = cur;
+ prev->data_len = mbuf_offset;
+ m->nb_segs += 1;
+ m->pkt_len += mbuf_offset;
+ prev = cur;
+
+ mbuf_offset = 0;
+ mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ }
+
+ prev->data_len = mbuf_offset;
+ m->pkt_len += mbuf_offset;
+
+ return 0;
+}
+
+uint16_t
+vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ struct vhost_queue *queue;
+ struct rte_vhost_vring *vr;
+ uint32_t desc_indexes[MAX_PKT_BURST];
+ uint32_t used_idx;
+ uint32_t i = 0;
+ uint16_t free_entries;
+ uint16_t avail_idx;
+
+ queue = &dev->queues[queue_id];
+ vr = &queue->vr;
+
+ free_entries = *((volatile uint16_t *)&vr->avail->idx) -
+ queue->last_avail_idx;
+ if (free_entries == 0)
+ return 0;
+
+ /* Prefetch available and used ring */
+ avail_idx = queue->last_avail_idx & (vr->size - 1);
+ used_idx = queue->last_used_idx & (vr->size - 1);
+ rte_prefetch0(&vr->avail->ring[avail_idx]);
+ rte_prefetch0(&vr->used->ring[used_idx]);
+
+ count = RTE_MIN(count, MAX_PKT_BURST);
+ count = RTE_MIN(count, free_entries);
+
+ /*
+ * Retrieve all of the head indexes first and pre-update used entries
+ * to avoid caching issues.
+ */
+ for (i = 0; i < count; i++) {
+ avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
+ used_idx = (queue->last_used_idx + i) & (vr->size - 1);
+ desc_indexes[i] = vr->avail->ring[avail_idx];
+
+ vr->used->ring[used_idx].id = desc_indexes[i];
+ vr->used->ring[used_idx].len = 0;
+ }
+
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vr->desc[desc_indexes[0]]);
+ for (i = 0; i < count; i++) {
+ int err;
+
+ if (likely(i + 1 < count))
+ rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
+
+ pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(pkts[i] == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ break;
+ }
+
+ err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts[i]);
+ break;
+ }
+
+ }
+ if (!i)
+ return 0;
+
+ queue->last_avail_idx += i;
+ queue->last_used_idx += i;
+ rte_smp_wmb();
+ rte_smp_rmb();
+
+ vr->used->idx += i;
+
+ if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vr->callfd >= 0))
+ eventfd_write(vr->callfd, (eventfd_t)1);
+
+ return i;
+}
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index f4dbaa48..d9ef140f 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -149,7 +149,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
*/
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
@@ -525,7 +525,7 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
break;
}
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
@@ -555,7 +555,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
uint8_t success = 0;
void *userdata;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
@@ -580,7 +580,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
@@ -775,7 +775,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"Source and destination MAC addresses are the same. "
"Dropping packet.\n",
dev_ll->dev->device_fh);
@@ -783,12 +783,12 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
"Device is marked for removal\n",
dev_ll->dev->device_fh);
} else {
@@ -829,7 +829,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *
return;
}
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
"MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
@@ -903,7 +903,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
if (vq->last_used_idx == avail_idx)
return;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
@@ -913,7 +913,7 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- RTE_LOG(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
+ RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
@@ -1003,7 +1003,7 @@ switch_worker(__attribute__((unused)) void *arg)
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- RTE_LOG(DEBUG, VHOST_DATA,
+ RTE_LOG_DP(DEBUG, VHOST_DATA,
"TX queue drained after timeout with burst size %u\n",
tx_q->len);
diff --git a/lib/Makefile b/lib/Makefile
index 990f23a4..07e1fd0c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -34,33 +34,82 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-y += librte_compat
DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
+DEPDIRS-librte_ring := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
+DEPDIRS-librte_mempool := librte_eal librte_ring
DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
+DEPDIRS-librte_mbuf := librte_eal librte_mempool
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
+DEPDIRS-librte_timer := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
+DEPDIRS-librte_cfgfile := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
+DEPDIRS-librte_cmdline := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
+DEPDIRS-librte_ether := librte_net librte_eal librte_mempool librte_ring
+DEPDIRS-librte_ether += librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
+DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
+DEPDIRS-librte_cryptodev += librte_kvargs
+DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
+DEPDIRS-librte_eventdev := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
+DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
+DEPDIRS-librte_hash := librte_eal librte_ring
+DIRS-$(CONFIG_RTE_LIBRTE_EFD) += librte_efd
+DEPDIRS-librte_efd := librte_eal librte_ring librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
+DEPDIRS-librte_lpm := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_ACL) += librte_acl
+DEPDIRS-librte_acl := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_NET) += librte_net
+DEPDIRS-librte_net := librte_mbuf librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += librte_ip_frag
+DEPDIRS-librte_ip_frag := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_ip_frag += librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += librte_jobstats
+DEPDIRS-librte_jobstats := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_METRICS) += librte_metrics
+DEPDIRS-librte_metrics := librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_BITRATE) += librte_bitratestats
+DEPDIRS-librte_bitratestats := librte_eal librte_metrics librte_ether
+DIRS-$(CONFIG_RTE_LIBRTE_LATENCY_STATS) += librte_latencystats
+DEPDIRS-librte_latencystats := librte_eal librte_metrics librte_ether librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
+DEPDIRS-librte_power := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
+DEPDIRS-librte_meter := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
+DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
+DEPDIRS-librte_sched += librte_timer
DIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += librte_kvargs
+DEPDIRS-librte_kvargs := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += librte_distributor
+DEPDIRS-librte_distributor := librte_eal librte_mbuf librte_ether
DIRS-$(CONFIG_RTE_LIBRTE_PORT) += librte_port
+DEPDIRS-librte_port := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_port += librte_ip_frag librte_sched
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DEPDIRS-librte_port += librte_kni
+endif
DIRS-$(CONFIG_RTE_LIBRTE_TABLE) += librte_table
+DEPDIRS-librte_table := librte_eal librte_mempool librte_mbuf
+DEPDIRS-librte_table += librte_port librte_lpm librte_hash
+ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
+DEPDIRS-librte_table += librte_acl
+endif
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += librte_pipeline
+DEPDIRS-librte_pipeline := librte_eal librte_mempool librte_mbuf
+DEPDIRS-librte_pipeline += librte_table librte_port
DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += librte_reorder
+DEPDIRS-librte_reorder := librte_eal librte_mempool librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += librte_pdump
+DEPDIRS-librte_pdump := librte_eal librte_mempool librte_mbuf librte_ether
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
endif
+DEPDIRS-librte_kni:= librte_eal librte_mempool librte_mbuf librte_ether
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_acl/Makefile b/lib/librte_acl/Makefile
index d05be665..e2dacd60 100644
--- a/lib/librte_acl/Makefile
+++ b/lib/librte_acl/Makefile
@@ -92,7 +92,4 @@ endif
SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include := rte_acl_osdep.h
SYMLINK-$(CONFIG_RTE_LIBRTE_ACL)-include += rte_acl.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ACL) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_acl/acl_bld.c b/lib/librte_acl/acl_bld.c
index 9e5ad1c6..0768cd3b 100644
--- a/lib/librte_acl/acl_bld.c
+++ b/lib/librte_acl/acl_bld.c
@@ -830,8 +830,8 @@ acl_gen_range_trie(struct acl_build_context *context,
{
int32_t n;
struct rte_acl_node *root;
- const uint8_t *lo = (const uint8_t *)min;
- const uint8_t *hi = (const uint8_t *)max;
+ const uint8_t *lo = min;
+ const uint8_t *hi = max;
*pend = acl_alloc_node(context, level+size);
root = acl_alloc_node(context, level++);
@@ -886,8 +886,8 @@ acl_gen_mask_trie(struct acl_build_context *context,
struct rte_acl_node *root;
struct rte_acl_node *node, *prev;
struct rte_acl_bitset bits;
- const uint8_t *val = (const uint8_t *)value;
- const uint8_t *msk = (const uint8_t *)mask;
+ const uint8_t *val = value;
+ const uint8_t *msk = mask;
root = acl_alloc_node(context, level++);
prev = root;
diff --git a/lib/librte_acl/acl_run.h b/lib/librte_acl/acl_run.h
index 024f3931..a862ff6e 100644
--- a/lib/librte_acl/acl_run.h
+++ b/lib/librte_acl/acl_run.h
@@ -69,10 +69,10 @@ struct acl_flow_data {
uint32_t trie;
/* current trie index (0 to N-1) */
uint32_t cmplt_size;
+ /* maximum number of packets to process */
uint32_t total_packets;
- uint32_t categories;
/* number of result categories per packet. */
- /* maximum number of packets to process */
+ uint32_t categories;
const uint64_t *trans;
const uint8_t **data;
uint32_t *results;
diff --git a/lib/librte_acl/rte_acl.c b/lib/librte_acl/rte_acl.c
index 8b7e92ce..d1f40bef 100644
--- a/lib/librte_acl/rte_acl.c
+++ b/lib/librte_acl/rte_acl.c
@@ -313,8 +313,7 @@ acl_check_rule(const struct rte_acl_rule_data *rd)
if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) &
rd->category_mask) == 0 ||
rd->priority > RTE_ACL_MAX_PRIORITY ||
- rd->priority < RTE_ACL_MIN_PRIORITY ||
- rd->userdata == RTE_ACL_INVALID_USERDATA)
+ rd->priority < RTE_ACL_MIN_PRIORITY)
return -EINVAL;
return 0;
}
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index caa91f7e..b53179a8 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -120,8 +120,6 @@ enum {
RTE_ACL_MIN_PRIORITY = 0,
};
-#define RTE_ACL_INVALID_USERDATA 0
-
#define RTE_ACL_MASKLEN_TO_BITMASK(v, s) \
((v) == 0 ? (v) : (typeof(v))((uint64_t)-1 << ((s) * CHAR_BIT - (v))))
diff --git a/lib/librte_bitratestats/Makefile b/lib/librte_bitratestats/Makefile
new file mode 100644
index 00000000..58a20ea0
--- /dev/null
+++ b/lib/librte_bitratestats/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_bitratestats.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+EXPORT_MAP := rte_bitratestats_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_BITRATE) := rte_bitrate.c
+
+# Install header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_BITRATE)-include += rte_bitrate.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_bitratestats/rte_bitrate.c b/lib/librte_bitratestats/rte_bitrate.c
new file mode 100644
index 00000000..193aa690
--- /dev/null
+++ b/lib/librte_bitratestats/rte_bitrate.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_metrics.h>
+#include <rte_bitrate.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+/*
+ * Persistent bit-rate data.
+ * @internal
+ */
+struct rte_stats_bitrate {
+ uint64_t last_ibytes;
+ uint64_t last_obytes;
+ uint64_t peak_ibits;
+ uint64_t peak_obits;
+ uint64_t mean_ibits;
+ uint64_t mean_obits;
+ uint64_t ewma_ibits;
+ uint64_t ewma_obits;
+};
+
+struct rte_stats_bitrates {
+ struct rte_stats_bitrate port_stats[RTE_MAX_ETHPORTS];
+ uint16_t id_stats_set;
+};
+
+struct rte_stats_bitrates *
+rte_stats_bitrate_create(void)
+{
+ return rte_zmalloc(NULL, sizeof(struct rte_stats_bitrates),
+ RTE_CACHE_LINE_SIZE);
+}
+
+int
+rte_stats_bitrate_reg(struct rte_stats_bitrates *bitrate_data)
+{
+ const char * const names[] = {
+ "ewma_bits_in", "ewma_bits_out",
+ "mean_bits_in", "mean_bits_out",
+ "peak_bits_in", "peak_bits_out",
+ };
+ int return_value;
+
+ return_value = rte_metrics_reg_names(&names[0], ARRAY_SIZE(names));
+ if (return_value >= 0)
+ bitrate_data->id_stats_set = return_value;
+ return return_value;
+}
+
+int
+rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
+ uint8_t port_id)
+{
+ struct rte_stats_bitrate *port_data;
+ struct rte_eth_stats eth_stats;
+ int ret_code;
+ uint64_t cnt_bits;
+ int64_t delta;
+ const int64_t alpha_percent = 20;
+ uint64_t values[6];
+
+ ret_code = rte_eth_stats_get(port_id, &eth_stats);
+ if (ret_code != 0)
+ return ret_code;
+
+ port_data = &bitrate_data->port_stats[port_id];
+
+ /* Incoming bitrate. This is an iteratively calculated EWMA
+ * (Exponentially Weighted Moving Average) that uses a
+ * weighting factor of alpha_percent. An unsmoothed mean
+ * for just the current time delta is also calculated for the
+ * benefit of people who don't understand signal processing.
+ */
+ cnt_bits = (eth_stats.ibytes - port_data->last_ibytes) << 3;
+ port_data->last_ibytes = eth_stats.ibytes;
+ if (cnt_bits > port_data->peak_ibits)
+ port_data->peak_ibits = cnt_bits;
+ delta = cnt_bits;
+ delta -= port_data->ewma_ibits;
+ /* The +-50 fixes integer rounding during divison */
+ if (delta > 0)
+ delta = (delta * alpha_percent + 50) / 100;
+ else
+ delta = (delta * alpha_percent - 50) / 100;
+ port_data->ewma_ibits += delta;
+ /* Integer roundoff prevents EWMA between 0 and (100/alpha_percent)
+ * ever reaching zero in no-traffic conditions
+ */
+ if (cnt_bits == 0 && delta == 0)
+ port_data->ewma_ibits = 0;
+ port_data->mean_ibits = cnt_bits;
+
+ /* Outgoing bitrate (also EWMA) */
+ cnt_bits = (eth_stats.obytes - port_data->last_obytes) << 3;
+ port_data->last_obytes = eth_stats.obytes;
+ if (cnt_bits > port_data->peak_obits)
+ port_data->peak_obits = cnt_bits;
+ delta = cnt_bits;
+ delta -= port_data->ewma_obits;
+ if (delta > 0)
+ delta = (delta * alpha_percent + 50) / 100;
+ else
+ delta = (delta * alpha_percent - 50) / 100;
+ port_data->ewma_obits += delta;
+ if (cnt_bits == 0 && delta == 0)
+ port_data->ewma_obits = 0;
+ port_data->mean_obits = cnt_bits;
+
+ values[0] = port_data->ewma_ibits;
+ values[1] = port_data->ewma_obits;
+ values[2] = port_data->mean_ibits;
+ values[3] = port_data->mean_obits;
+ values[4] = port_data->peak_ibits;
+ values[5] = port_data->peak_obits;
+ rte_metrics_update_values(port_id, bitrate_data->id_stats_set,
+ values, ARRAY_SIZE(values));
+ return 0;
+}
diff --git a/lib/librte_eal/common/include/arch/tile/rte_atomic.h b/lib/librte_bitratestats/rte_bitrate.h
index 28825ff6..15fc270a 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_atomic.h
+++ b/lib/librte_bitratestats/rte_bitrate.h
@@ -1,7 +1,8 @@
-/*
+/*-
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,65 +29,66 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#ifndef _RTE_ATOMIC_TILE_H_
-#define _RTE_ATOMIC_TILE_H_
+#ifndef _RTE_BITRATE_H_
+#define _RTE_BITRATE_H_
-#ifndef RTE_FORCE_INTRINSICS
-# error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
+#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
-#include "generic/rte_atomic.h"
-
/**
- * General memory barrier.
- *
- * Guarantees that the LOAD and STORE operations generated before the
- * barrier occur before the LOAD and STORE operations generated after.
- * This function is architecture dependent.
+ * Bitrate statistics data structure.
+ * This data structure is intentionally opaque.
*/
-static inline void rte_mb(void)
-{
- __sync_synchronize();
-}
+struct rte_stats_bitrates;
+
/**
- * Write memory barrier.
+ * Allocate a bitrate statistics structure
*
- * Guarantees that the STORE operations generated before the barrier
- * occur before the STORE operations generated after.
- * This function is architecture dependent.
+ * @return
+ * - Pointer to structure on success
+ * - NULL on error (zmalloc failure)
*/
-static inline void rte_wmb(void)
-{
- __sync_synchronize();
-}
+struct rte_stats_bitrates *rte_stats_bitrate_create(void);
+
/**
- * Read memory barrier.
+ * Register bitrate statistics with the metric library.
*
- * Guarantees that the LOAD operations generated before the barrier
- * occur before the LOAD operations generated after.
- * This function is architecture dependent.
+ * @param bitrate_data
+ * Pointer allocated by rte_stats_create()
+ *
+ * @return
+ * Zero on success
+ * Negative on error
*/
-static inline void rte_rmb(void)
-{
- __sync_synchronize();
-}
+int rte_stats_bitrate_reg(struct rte_stats_bitrates *bitrate_data);
-#define rte_smp_mb() rte_mb()
-#define rte_smp_wmb() rte_compiler_barrier()
-
-#define rte_smp_rmb() rte_compiler_barrier()
+/**
+ * Calculate statistics for current time window. The period with which
+ * this function is called should be the intended sampling window width.
+ *
+ * @param bitrate_data
+ * Bitrate statistics data pointer
+ *
+ * @param port_id
+ * Port id to calculate statistics for
+ *
+ * @return
+ * - Zero on success
+ * - Negative value on error
+ */
+int rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
+ uint8_t port_id);
#ifdef __cplusplus
}
#endif
-#endif /* _RTE_ATOMIC_TILE_H_ */
+#endif /* _RTE_BITRATE_H_ */
diff --git a/lib/librte_bitratestats/rte_bitratestats_version.map b/lib/librte_bitratestats/rte_bitratestats_version.map
new file mode 100644
index 00000000..fe745445
--- /dev/null
+++ b/lib/librte_bitratestats/rte_bitratestats_version.map
@@ -0,0 +1,9 @@
+DPDK_17.05 {
+ global:
+
+ rte_stats_bitrate_calc;
+ rte_stats_bitrate_create;
+ rte_stats_bitrate_reg;
+
+ local: *;
+};
diff --git a/lib/librte_cfgfile/Makefile b/lib/librte_cfgfile/Makefile
index 616aef09..755ef11f 100644
--- a/lib/librte_cfgfile/Makefile
+++ b/lib/librte_cfgfile/Makefile
@@ -51,7 +51,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_CFGFILE) += rte_cfgfile.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_CFGFILE)-include += rte_cfgfile.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_cfgfile/rte_cfgfile.c b/lib/librte_cfgfile/rte_cfgfile.c
index d72052a0..b54a523d 100644
--- a/lib/librte_cfgfile/rte_cfgfile.c
+++ b/lib/librte_cfgfile/rte_cfgfile.c
@@ -35,6 +35,7 @@
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
+#include <rte_common.h>
#include <rte_string_fns.h>
#include "rte_cfgfile.h"
@@ -58,6 +59,25 @@ struct rte_cfgfile {
* for new entries do we add in */
#define CFG_ALLOC_ENTRY_BATCH 16
+/**
+ * Default cfgfile load parameters.
+ */
+static const struct rte_cfgfile_parameters default_cfgfile_params = {
+ .comment_character = CFG_DEFAULT_COMMENT_CHARACTER,
+};
+
+/**
+ * Defines the list of acceptable comment characters supported by this
+ * library.
+ */
+static const char valid_comment_chars[] = {
+ '!',
+ '#',
+ '%',
+ ';',
+ '@'
+};
+
static unsigned
_strip(char *str, unsigned len)
{
@@ -85,17 +105,56 @@ _strip(char *str, unsigned len)
return newlen;
}
+static int
+rte_cfgfile_check_params(const struct rte_cfgfile_parameters *params)
+{
+ unsigned int valid_comment;
+ unsigned int i;
+
+ if (!params) {
+ printf("Error - missing cfgfile parameters\n");
+ return -EINVAL;
+ }
+
+ valid_comment = 0;
+ for (i = 0; i < RTE_DIM(valid_comment_chars); i++) {
+ if (params->comment_character == valid_comment_chars[i]) {
+ valid_comment = 1;
+ break;
+ }
+ }
+
+ if (valid_comment == 0) {
+ printf("Error - invalid comment characters %c\n",
+ params->comment_character);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
struct rte_cfgfile *
rte_cfgfile_load(const char *filename, int flags)
{
+ return rte_cfgfile_load_with_params(filename, flags,
+ &default_cfgfile_params);
+}
+
+struct rte_cfgfile *
+rte_cfgfile_load_with_params(const char *filename, int flags,
+ const struct rte_cfgfile_parameters *params)
+{
int allocated_sections = CFG_ALLOC_SECTION_BATCH;
int allocated_entries = 0;
int curr_section = -1;
int curr_entry = -1;
- char buffer[256] = {0};
+ char buffer[CFG_NAME_LEN + CFG_VALUE_LEN + 4] = {0};
int lineno = 0;
struct rte_cfgfile *cfg = NULL;
+ if (rte_cfgfile_check_params(params))
+ return NULL;
+
FILE *f = fopen(filename, "r");
if (f == NULL)
return NULL;
@@ -107,6 +166,22 @@ rte_cfgfile_load(const char *filename, int flags)
memset(cfg->sections, 0, sizeof(cfg->sections[0]) * allocated_sections);
+ if (flags & CFG_FLAG_GLOBAL_SECTION) {
+ curr_section = 0;
+ allocated_entries = CFG_ALLOC_ENTRY_BATCH;
+ cfg->sections[curr_section] = malloc(
+ sizeof(*cfg->sections[0]) +
+ sizeof(cfg->sections[0]->entries[0]) *
+ allocated_entries);
+ if (cfg->sections[curr_section] == NULL) {
+ printf("Error - no memory for global section\n");
+ goto error1;
+ }
+
+ snprintf(cfg->sections[curr_section]->name,
+ sizeof(cfg->sections[0]->name), "GLOBAL");
+ }
+
while (fgets(buffer, sizeof(buffer), f) != NULL) {
char *pos = NULL;
size_t len = strnlen(buffer, sizeof(buffer));
@@ -116,7 +191,7 @@ rte_cfgfile_load(const char *filename, int flags)
"Check if line too long\n", lineno);
goto error1;
}
- pos = memchr(buffer, ';', sizeof(buffer));
+ pos = memchr(buffer, params->comment_character, len);
if (pos != NULL) {
*pos = '\0';
len = pos - buffer;
@@ -151,6 +226,7 @@ rte_cfgfile_load(const char *filename, int flags)
sizeof(*cfg) + sizeof(cfg->sections[0])
* allocated_sections);
if (n_cfg == NULL) {
+ curr_section--;
printf("Error - no more memory\n");
goto error1;
}
@@ -182,12 +258,21 @@ rte_cfgfile_load(const char *filename, int flags)
struct rte_cfgfile_section *sect =
cfg->sections[curr_section];
- char *split[2];
- if (rte_strsplit(buffer, sizeof(buffer), split, 2, '=')
- != 2) {
- printf("Error at line %d - cannot split "
- "string\n", lineno);
- goto error1;
+ int n;
+ char *split[2] = {NULL};
+ n = rte_strsplit(buffer, sizeof(buffer), split, 2, '=');
+ if (flags & CFG_FLAG_EMPTY_VALUES) {
+ if ((n < 1) || (n > 2)) {
+ printf("Error at line %d - cannot split string, n=%d\n",
+ lineno, n);
+ goto error1;
+ }
+ } else {
+ if (n != 2) {
+ printf("Error at line %d - cannot split string, n=%d\n",
+ lineno, n);
+ goto error1;
+ }
}
curr_entry++;
@@ -198,6 +283,7 @@ rte_cfgfile_load(const char *filename, int flags)
sizeof(sect->entries[0]) *
allocated_entries);
if (n_sect == NULL) {
+ curr_entry--;
printf("Error - no more memory\n");
goto error1;
}
@@ -216,7 +302,7 @@ rte_cfgfile_load(const char *filename, int flags)
snprintf(entry->name, sizeof(entry->name), "%s",
split[0]);
snprintf(entry->value, sizeof(entry->value), "%s",
- split[1]);
+ split[1] ? split[1] : "");
_strip(entry->name, strnlen(entry->name,
sizeof(entry->name)));
_strip(entry->value, strnlen(entry->value,
@@ -233,6 +319,8 @@ rte_cfgfile_load(const char *filename, int flags)
error1:
cfg->num_sections = curr_section + 1;
+ if (curr_section >= 0)
+ cfg->sections[curr_section]->num_entries = curr_entry + 1;
rte_cfgfile_close(cfg);
error2:
fclose(f);
diff --git a/lib/librte_cfgfile/rte_cfgfile.h b/lib/librte_cfgfile/rte_cfgfile.h
index b40e6a13..fa10d408 100644
--- a/lib/librte_cfgfile/rte_cfgfile.h
+++ b/lib/librte_cfgfile/rte_cfgfile.h
@@ -66,19 +66,61 @@ struct rte_cfgfile_entry {
char value[CFG_VALUE_LEN]; /**< Value */
};
+/** Configuration file operation optional arguments */
+struct rte_cfgfile_parameters {
+ /** Config file comment character; one of '!', '#', '%', ';', '@' */
+ char comment_character;
+};
+
+/**@{ cfgfile load operation flags */
+enum {
+ /**
+ * Indicates that the file supports key value entries before the first
+ * defined section. These entries can be accessed in the "GLOBAL"
+ * section.
+ */
+ CFG_FLAG_GLOBAL_SECTION = 1,
+
+ /**
+ * Indicates that file supports key value entries where the value can
+ * be zero length (e.g., "key=").
+ */
+ CFG_FLAG_EMPTY_VALUES = 2,
+};
+/**@} */
+
+/** Defines the default comment character used for parsing config files. */
+#define CFG_DEFAULT_COMMENT_CHARACTER ';'
+
/**
* Open config file
*
* @param filename
* Config file name
* @param flags
-* Config file flags, Reserved for future use. Must be set to 0.
+* Config file flags
* @return
* Handle to configuration file on success, NULL otherwise
*/
struct rte_cfgfile *rte_cfgfile_load(const char *filename, int flags);
/**
+ * Open config file with specified optional parameters.
+ *
+ * @param filename
+ * Config file name
+ * @param flags
+ * Config file flags
+ * @param params
+ * Additional configuration attributes. Must be configured with desired
+ * values prior to invoking this API.
+ * @return
+ * Handle to configuration file on success, NULL otherwise
+ */
+struct rte_cfgfile *rte_cfgfile_load_with_params(const char *filename,
+ int flags, const struct rte_cfgfile_parameters *params);
+
+/**
* Get number of sections in config file
*
* @param cfg
diff --git a/lib/librte_cfgfile/rte_cfgfile_version.map b/lib/librte_cfgfile/rte_cfgfile_version.map
index 3c2f0dbf..5fe60f72 100644
--- a/lib/librte_cfgfile/rte_cfgfile_version.map
+++ b/lib/librte_cfgfile/rte_cfgfile_version.map
@@ -20,3 +20,10 @@ DPDK_16.04 {
rte_cfgfile_section_entries_by_index;
} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_cfgfile_load_with_params;
+
+} DPDK_16.04;
diff --git a/lib/librte_cmdline/Makefile b/lib/librte_cmdline/Makefile
index 7d2d148c..644f68e4 100644
--- a/lib/librte_cmdline/Makefile
+++ b/lib/librte_cmdline/Makefile
@@ -61,7 +61,4 @@ INCS += cmdline_parse_etheraddr.h cmdline_parse_string.h cmdline_rdline.h
INCS += cmdline_vt100.h cmdline_socket.h cmdline_cirbuf.h cmdline_parse_portlist.h
SYMLINK-$(CONFIG_RTE_LIBRTE_CMDLINE)-include := $(INCS)
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index b496067a..b8148808 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -146,7 +146,9 @@ nb_common_chars(const char * s1, const char * s2)
*/
static int
match_inst(cmdline_parse_inst_t *inst, const char *buf,
- unsigned int nb_match_token, void *resbuf, unsigned resbuf_size)
+ unsigned int nb_match_token, void *resbuf, unsigned resbuf_size,
+ cmdline_parse_token_hdr_t
+ *(*dyn_tokens)[CMDLINE_PARSE_DYNAMIC_TOKENS])
{
unsigned int token_num=0;
cmdline_parse_token_hdr_t * token_p;
@@ -155,6 +157,11 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf,
struct cmdline_token_hdr token_hdr;
token_p = inst->tokens[token_num];
+ if (!token_p && dyn_tokens && inst->f) {
+ if (!(*dyn_tokens)[0])
+ inst->f(&(*dyn_tokens)[0], NULL, dyn_tokens);
+ token_p = (*dyn_tokens)[0];
+ }
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
@@ -196,7 +203,17 @@ match_inst(cmdline_parse_inst_t *inst, const char *buf,
buf += n;
token_num ++;
- token_p = inst->tokens[token_num];
+ if (!inst->tokens[0]) {
+ if (token_num < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
+ if (!(*dyn_tokens)[token_num])
+ inst->f(&(*dyn_tokens)[token_num],
+ NULL,
+ dyn_tokens);
+ token_p = (*dyn_tokens)[token_num];
+ } else
+ token_p = NULL;
+ } else
+ token_p = inst->tokens[token_num];
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
}
@@ -238,7 +255,11 @@ cmdline_parse(struct cmdline *cl, const char * buf)
unsigned int inst_num=0;
cmdline_parse_inst_t *inst;
const char *curbuf;
- char result_buf[CMDLINE_PARSE_RESULT_BUFSIZE];
+ union {
+ char buf[CMDLINE_PARSE_RESULT_BUFSIZE];
+ long double align; /* strong alignment constraint for buf */
+ } result, tmp_result;
+ cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
void (*f)(void *, struct cmdline *, void *) = NULL;
void *data = NULL;
int comment = 0;
@@ -255,6 +276,7 @@ cmdline_parse(struct cmdline *cl, const char * buf)
return CMDLINE_PARSE_BAD_ARGS;
ctx = cl->ctx;
+ memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/*
* - look if the buffer contains at least one line
@@ -299,13 +321,16 @@ cmdline_parse(struct cmdline *cl, const char * buf)
debug_printf("INST %d\n", inst_num);
/* fully parsed */
- tok = match_inst(inst, buf, 0, result_buf, sizeof(result_buf));
+ tok = match_inst(inst, buf, 0, tmp_result.buf,
+ sizeof(tmp_result.buf), &dyn_tokens);
if (tok > 0) /* we matched at least one token */
err = CMDLINE_PARSE_BAD_ARGS;
else if (!tok) {
debug_printf("INST fully parsed\n");
+ memcpy(&result, &tmp_result,
+ sizeof(result));
/* skip spaces */
while (isblank2(*curbuf)) {
curbuf++;
@@ -333,7 +358,7 @@ cmdline_parse(struct cmdline *cl, const char * buf)
/* call func */
if (f) {
- f(result_buf, cl, data);
+ f(result.buf, cl, data);
}
/* no match */
@@ -355,6 +380,7 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
cmdline_parse_token_hdr_t *token_p;
struct cmdline_token_hdr token_hdr;
char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
+ cmdline_parse_token_hdr_t *dyn_tokens[CMDLINE_PARSE_DYNAMIC_TOKENS];
unsigned int partial_tok_len;
int comp_len = -1;
int tmp_len = -1;
@@ -374,6 +400,7 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
debug_printf("%s called\n", __func__);
memset(&token_hdr, 0, sizeof(token_hdr));
+ memset(&dyn_tokens, 0, sizeof(dyn_tokens));
/* count the number of complete token to parse */
for (i=0 ; buf[i] ; i++) {
@@ -396,11 +423,24 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
inst = ctx[inst_num];
while (inst) {
/* parse the first tokens of the inst */
- if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
+ if (nb_token &&
+ match_inst(inst, buf, nb_token, NULL, 0,
+ &dyn_tokens))
goto next;
debug_printf("instruction match\n");
- token_p = inst->tokens[nb_token];
+ if (!inst->tokens[0]) {
+ if (nb_token <
+ (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
+ if (!dyn_tokens[nb_token])
+ inst->f(&dyn_tokens[nb_token],
+ NULL,
+ &dyn_tokens);
+ token_p = dyn_tokens[nb_token];
+ } else
+ token_p = NULL;
+ } else
+ token_p = inst->tokens[nb_token];
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
@@ -490,10 +530,21 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
/* we need to redo it */
inst = ctx[inst_num];
- if (nb_token && match_inst(inst, buf, nb_token, NULL, 0))
+ if (nb_token &&
+ match_inst(inst, buf, nb_token, NULL, 0, &dyn_tokens))
goto next2;
- token_p = inst->tokens[nb_token];
+ if (!inst->tokens[0]) {
+ if (nb_token < (CMDLINE_PARSE_DYNAMIC_TOKENS - 1)) {
+ if (!dyn_tokens[nb_token])
+ inst->f(&dyn_tokens[nb_token],
+ NULL,
+ &dyn_tokens);
+ token_p = dyn_tokens[nb_token];
+ } else
+ token_p = NULL;
+ } else
+ token_p = inst->tokens[nb_token];
if (token_p)
memcpy(&token_hdr, token_p, sizeof(token_hdr));
diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h
index 4ac05d6b..65b18d4f 100644
--- a/lib/librte_cmdline/cmdline_parse.h
+++ b/lib/librte_cmdline/cmdline_parse.h
@@ -83,6 +83,9 @@ extern "C" {
/* maximum buffer size for parsed result */
#define CMDLINE_PARSE_RESULT_BUFSIZE 8192
+/* maximum number of dynamic tokens */
+#define CMDLINE_PARSE_DYNAMIC_TOKENS 128
+
/**
* Stores a pointer to the ops struct, and the offset: the place to
* write the parsed result in the destination structure.
@@ -130,6 +133,24 @@ struct cmdline;
* Store a instruction, which is a pointer to a callback function and
* its parameter that is called when the instruction is parsed, a help
* string, and a list of token composing this instruction.
+ *
+ * When no tokens are defined (tokens[0] == NULL), they are retrieved
+ * dynamically by calling f() as follows:
+ *
+ * f((struct cmdline_token_hdr **)&token_hdr,
+ * NULL,
+ * (struct cmdline_token_hdr *[])tokens));
+ *
+ * The address of the resulting token is expected at the location pointed by
+ * the first argument. Can be set to NULL to end the list.
+ *
+ * The cmdline argument (struct cmdline *) is always NULL.
+ *
+ * The last argument points to the NULL-terminated list of dynamic tokens
+ * defined so far. Since token_hdr points to an index of that list, the
+ * current index can be derived as follows:
+ *
+ * int index = token_hdr - &(*tokens)[0];
*/
struct cmdline_inst {
/* f(parsed_struct, data) */
diff --git a/lib/librte_cmdline/cmdline_parse_num.c b/lib/librte_cmdline/cmdline_parse_num.c
index b0f9a35d..e507ec4f 100644
--- a/lib/librte_cmdline/cmdline_parse_num.c
+++ b/lib/librte_cmdline/cmdline_parse_num.c
@@ -250,7 +250,7 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case HEX:
st = HEX_OK;
- /* no break */
+ /* fall-through no break */
case HEX_OK:
if (c >= '0' && c <= '9') {
if (add_to_res(c - '0', &res1, 16) < 0)
@@ -282,7 +282,7 @@ cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res,
case BIN:
st = BIN_OK;
- /* no break */
+ /* fall-through */
case BIN_OK:
if (c >= '0' && c <= '1') {
if (add_to_res(c - '0', &res1, 2) < 0)
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index aebf5d9f..18f5e8c5 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -52,11 +52,4 @@ SYMLINK-y-include += rte_cryptodev_pmd.h
# versioning export map
EXPORT_MAP := rte_cryptodev_version.map
-# library dependencies
-DEPDIRS-y += lib/librte_eal
-DEPDIRS-y += lib/librte_mempool
-DEPDIRS-y += lib/librte_ring
-DEPDIRS-y += lib/librte_mbuf
-DEPDIRS-y += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index d3d38e4f..3a408448 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -105,9 +105,31 @@ enum rte_crypto_cipher_algorithm {
RTE_CRYPTO_CIPHER_ZUC_EEA3,
/**< ZUC algorithm in EEA3 mode */
+ RTE_CRYPTO_CIPHER_DES_CBC,
+ /**< DES algorithm in CBC mode */
+
+ RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ /**< AES algorithm using modes required by
+ * DOCSIS Baseline Privacy Plus Spec.
+ * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
+ * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
+ */
+
+ RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ /**< DES algorithm using modes required by
+ * DOCSIS Baseline Privacy Plus Spec.
+ * Chained mbufs are not supported in this mode, i.e. rte_mbuf.next
+ * for m_src and m_dst in the rte_crypto_sym_op must be NULL.
+ */
+
RTE_CRYPTO_CIPHER_LIST_END
+
};
+/** Cipher algorithm name strings */
+extern const char *
+rte_crypto_cipher_algorithm_strings[];
+
/** Symmetric Cipher Direction */
enum rte_crypto_cipher_operation {
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -116,6 +138,10 @@ enum rte_crypto_cipher_operation {
/**< Decrypt cipher operation */
};
+/** Cipher operation name strings */
+extern const char *
+rte_crypto_cipher_operation_strings[];
+
/**
* Symmetric Cipher Setup Data.
*
@@ -241,12 +267,20 @@ enum rte_crypto_auth_algorithm {
RTE_CRYPTO_AUTH_LIST_END
};
+/** Authentication algorithm name strings */
+extern const char *
+rte_crypto_auth_algorithm_strings[];
+
/** Symmetric Authentication / Hash Operations */
enum rte_crypto_auth_operation {
RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */
RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */
};
+/** Authentication operation name strings */
+extern const char *
+rte_crypto_auth_operation_strings[];
+
/**
* Authentication / Hash transform data.
*
@@ -276,17 +310,16 @@ struct rte_crypto_auth_xform {
* this specifies the length of the digest to be compared for the
* session.
*
+ * It is the caller's responsibility to ensure that the
+ * digest length is compliant with the hash algorithm being used.
* If the value is less than the maximum length allowed by the hash,
- * the result shall be truncated. If the value is greater than the
- * maximum length allowed by the hash then an error will be generated
- * by *rte_cryptodev_sym_session_create* or by the
- * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs.
+ * the result shall be truncated.
*/
uint32_t add_auth_data_length;
/**< The length of the additional authenticated data (AAD) in bytes.
- * The maximum permitted value is 240 bytes, unless otherwise specified
- * below.
+ * The maximum permitted value is 65535 (2^16 - 1) bytes, unless
+ * otherwise specified below.
*
* This field must be specified when the hash algorithm is one of the
* following:
@@ -541,8 +574,7 @@ struct rte_crypto_sym_op {
struct {
uint8_t *data;
- /**< If this member of this structure is set this is a
- * pointer to the location where the digest result
+ /**< This points to the location where the digest result
* should be inserted (in the case of digest generation)
* or where the purported digest exists (in the case of
* digest verification).
@@ -560,18 +592,13 @@ struct rte_crypto_sym_op {
* @note
* For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for
* "digest result" read "authentication tag T".
- *
- * If this member is not set the digest result is
- * understood to be in the destination buffer for
- * digest generation, and in the source buffer for
- * digest verification. The location of the digest
- * result in this case is immediately following the
- * region over which the digest is computed.
*/
phys_addr_t phys_addr;
/**< Physical address of digest */
uint16_t length;
- /**< Length of digest */
+ /**< Length of digest. This must be the same value as
+ * @ref rte_crypto_auth_xform.digest_length.
+ */
} digest; /**< Digest parameters */
struct {
@@ -586,7 +613,7 @@ struct rte_crypto_sym_op {
* set up for the session in the @ref
* rte_crypto_auth_xform structure as part of the @ref
* rte_cryptodev_sym_session_create function call.
- * This length must not exceed 240 bytes.
+ * This length must not exceed 65535 (2^16-1) bytes.
*
* Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM),
* the caller should setup this field as follows:
@@ -619,7 +646,10 @@ struct rte_crypto_sym_op {
* operation, this field is used to pass plaintext.
*/
phys_addr_t phys_addr; /**< physical address */
- uint16_t length; /**< Length of digest */
+ uint16_t length;
+ /**< Length of additional authenticated data (AAD)
+ * in bytes
+ */
} aad;
/**< Additional authentication parameters */
} auth;
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 54e95d5c..b65cd9ce 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -101,16 +101,138 @@ struct rte_cryptodev_callback {
uint32_t active; /**< Callback is executing */
};
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
static const char *cryptodev_vdev_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_VDEV_SOCKET_ID
};
+/**
+ * The crypto cipher algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_cipher_algorithm_strings[] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = "3des-cbc",
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = "3des-ecb",
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = "3des-ctr",
+
+ [RTE_CRYPTO_CIPHER_AES_CBC] = "aes-cbc",
+ [RTE_CRYPTO_CIPHER_AES_CCM] = "aes-ccm",
+ [RTE_CRYPTO_CIPHER_AES_CTR] = "aes-ctr",
+ [RTE_CRYPTO_CIPHER_AES_DOCSISBPI] = "aes-docsisbpi",
+ [RTE_CRYPTO_CIPHER_AES_ECB] = "aes-ecb",
+ [RTE_CRYPTO_CIPHER_AES_GCM] = "aes-gcm",
+ [RTE_CRYPTO_CIPHER_AES_F8] = "aes-f8",
+ [RTE_CRYPTO_CIPHER_AES_XTS] = "aes-xts",
+
+ [RTE_CRYPTO_CIPHER_ARC4] = "arc4",
+
+ [RTE_CRYPTO_CIPHER_DES_CBC] = "des-cbc",
+ [RTE_CRYPTO_CIPHER_DES_DOCSISBPI] = "des-docsisbpi",
+
+ [RTE_CRYPTO_CIPHER_NULL] = "null",
+
+ [RTE_CRYPTO_CIPHER_KASUMI_F8] = "kasumi-f8",
+ [RTE_CRYPTO_CIPHER_SNOW3G_UEA2] = "snow3g-uea2",
+ [RTE_CRYPTO_CIPHER_ZUC_EEA3] = "zuc-eea3"
+};
+
+/**
+ * The crypto cipher operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_cipher_operation_strings[] = {
+ [RTE_CRYPTO_CIPHER_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_CIPHER_OP_DECRYPT] = "decrypt"
+};
+
+/**
+ * The crypto auth algorithm strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_auth_algorithm_strings[] = {
+ [RTE_CRYPTO_AUTH_AES_CBC_MAC] = "aes-cbc-mac",
+ [RTE_CRYPTO_AUTH_AES_CCM] = "aes-ccm",
+ [RTE_CRYPTO_AUTH_AES_CMAC] = "aes-cmac",
+ [RTE_CRYPTO_AUTH_AES_GCM] = "aes-gcm",
+ [RTE_CRYPTO_AUTH_AES_GMAC] = "aes-gmac",
+ [RTE_CRYPTO_AUTH_AES_XCBC_MAC] = "aes-xcbc-mac",
+
+ [RTE_CRYPTO_AUTH_MD5] = "md5",
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = "md5-hmac",
+
+ [RTE_CRYPTO_AUTH_NULL] = "null",
+
+ [RTE_CRYPTO_AUTH_SHA1] = "sha1",
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = "sha1-hmac",
+
+ [RTE_CRYPTO_AUTH_SHA224] = "sha2-224",
+ [RTE_CRYPTO_AUTH_SHA224_HMAC] = "sha2-224-hmac",
+ [RTE_CRYPTO_AUTH_SHA256] = "sha2-256",
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = "sha2-256-hmac",
+ [RTE_CRYPTO_AUTH_SHA384] = "sha2-384",
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = "sha2-384-hmac",
+ [RTE_CRYPTO_AUTH_SHA512] = "sha2-512",
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = "sha2-512-hmac",
+
+ [RTE_CRYPTO_AUTH_KASUMI_F9] = "kasumi-f9",
+ [RTE_CRYPTO_AUTH_SNOW3G_UIA2] = "snow3g-uia2",
+ [RTE_CRYPTO_AUTH_ZUC_EIA3] = "zuc-eia3"
+};
+
+int
+rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_cipher_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_cipher_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_cipher_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+int
+rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
+ const char *algo_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_auth_algorithm_strings); i++) {
+ if (strcmp(algo_string, rte_crypto_auth_algorithm_strings[i]) == 0) {
+ *algo_enum = (enum rte_crypto_auth_algorithm) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
+/**
+ * The crypto auth operation strings identifiers.
+ * It could be used in application command line.
+ */
+const char *
+rte_crypto_auth_operation_strings[] = {
+ [RTE_CRYPTO_AUTH_OP_VERIFY] = "verify",
+ [RTE_CRYPTO_AUTH_OP_GENERATE] = "generate"
+};
+
static uint8_t
number_of_sockets(void)
{
@@ -132,7 +254,7 @@ static int
parse_integer_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
- int *i = (int *) extra_args;
+ int *i = extra_args;
*i = atoi(value);
if (*i < 0) {
@@ -143,6 +265,25 @@ parse_integer_arg(const char *key __rte_unused,
return 0;
}
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_crypto_vdev_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CDEV_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -1;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
int
rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
const char *input_args)
@@ -179,6 +320,12 @@ rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &parse_name_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
if (params->socket_id >= number_of_sockets()) {
CDEV_LOG_ERR("Invalid socket id specified to create "
"the virtual crypto device on");
@@ -191,6 +338,73 @@ free_kvlist:
return ret;
}
+const struct rte_cryptodev_symmetric_capability *
+rte_cryptodev_sym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ struct rte_cryptodev_info dev_info;
+ int i = 0;
+
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != idx->type)
+ continue;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ capability->sym.auth.algo == idx->algo.auth)
+ return &capability->sym;
+
+ if (idx->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ capability->sym.cipher.algo == idx->algo.cipher)
+ return &capability->sym;
+ }
+
+ return NULL;
+
+}
+
+#define param_range_check(x, y) \
+ (((x < y.min) || (x > y.max)) || \
+ (y.increment != 0 && (x % y.increment) != 0))
+
+int
+rte_cryptodev_sym_capability_check_cipher(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t iv_size)
+{
+ if (param_range_check(key_size, capability->cipher.key_size))
+ return -1;
+
+ if (param_range_check(iv_size, capability->cipher.iv_size))
+ return -1;
+
+ return 0;
+}
+
+int
+rte_cryptodev_sym_capability_check_auth(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size)
+{
+ if (param_range_check(key_size, capability->auth.key_size))
+ return -1;
+
+ if (param_range_check(digest_size, capability->auth.digest_size))
+ return -1;
+
+ if (param_range_check(aad_size, capability->auth.aad_size))
+ return -1;
+
+ return 0;
+}
+
+
const char *
rte_cryptodev_get_feature_name(uint64_t flag)
{
@@ -211,19 +425,65 @@ rte_cryptodev_get_feature_name(uint64_t flag)
return "CPU_AESNI";
case RTE_CRYPTODEV_FF_HW_ACCELERATED:
return "HW_ACCELERATED";
-
+ case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
+ return "MBUF_SCATTER_GATHER";
+ case RTE_CRYPTODEV_FF_CPU_NEON:
+ return "CPU_NEON";
+ case RTE_CRYPTODEV_FF_CPU_ARM_CE:
+ return "CPU_ARM_CE";
default:
return NULL;
}
}
-
int
rte_cryptodev_create_vdev(const char *name, const char *args)
{
- return rte_eal_vdev_init(name, args);
+ return rte_vdev_init(name, args);
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_dev(uint8_t dev_id)
+{
+ return &rte_cryptodev_globals->devs[dev_id];
+}
+
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_named_dev(const char *name)
+{
+ struct rte_cryptodev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
+ dev = &rte_cryptodev_globals->devs[i];
+
+ if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+unsigned int
+rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev = NULL;
+
+ if (dev_id >= rte_cryptodev_globals->nb_devs)
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+ if (dev->attached != RTE_CRYPTODEV_ATTACHED)
+ return 0;
+ else
+ return 1;
}
+
int
rte_cryptodev_get_dev_id(const char *name)
{
@@ -262,6 +522,35 @@ rte_cryptodev_count_devtype(enum rte_cryptodev_type type)
return dev_count;
}
+uint8_t
+rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+ uint8_t nb_devices)
+{
+ uint8_t i, count = 0;
+ struct rte_cryptodev *devs = rte_cryptodev_globals->devs;
+ uint8_t max_devs = rte_cryptodev_globals->max_devs;
+
+ for (i = 0; i < max_devs && count < nb_devices; i++) {
+
+ if (devs[i].attached == RTE_CRYPTODEV_ATTACHED) {
+ const struct rte_cryptodev_driver *drv = devs[i].driver;
+ int cmp;
+
+ if (drv)
+ cmp = strncmp(drv->pci_drv.driver.name,
+ dev_name, strlen(dev_name));
+ else
+ cmp = strncmp(devs[i].data->name,
+ dev_name, strlen(dev_name));
+
+ if (cmp == 0)
+ devices[count++] = devs[i].data->dev_id;
+ }
+ }
+
+ return count;
+}
+
int
rte_cryptodev_socket_id(uint8_t dev_id)
{
@@ -427,7 +716,7 @@ rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv,
if (cryptodrv == NULL)
return -ENODEV;
- rte_eal_pci_device_name(&pci_dev->addr, cryptodev_name,
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
sizeof(cryptodev_name));
cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
@@ -447,7 +736,7 @@ rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv,
"device data");
}
- cryptodev->pci_dev = pci_dev;
+ cryptodev->device = &pci_dev->device;
cryptodev->driver = cryptodrv;
/* init user callbacks */
@@ -483,7 +772,7 @@ rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev)
if (pci_dev == NULL)
return -EINVAL;
- rte_eal_pci_device_name(&pci_dev->addr, cryptodev_name,
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
sizeof(cryptodev_name));
cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
@@ -507,7 +796,7 @@ rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev)
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(cryptodev->data->dev_private);
- cryptodev->pci_dev = NULL;
+ cryptodev->device = NULL;
cryptodev->driver = NULL;
cryptodev->data = NULL;
@@ -668,6 +957,8 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
return -EBUSY;
}
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
/* Setup new number of queue pairs and reconfigure device. */
diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs,
config->socket_id);
@@ -678,10 +969,14 @@ rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
}
/* Setup Session mempool for device */
- return rte_cryptodev_sym_session_pool_create(dev,
+ diag = rte_cryptodev_sym_session_pool_create(dev,
config->session_mp.nb_objs,
config->session_mp.cache_size,
config->socket_id);
+ if (diag != 0)
+ return diag;
+
+ return (*dev->dev_ops->dev_configure)(dev, config);
}
@@ -868,7 +1163,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
if (dev->driver)
dev_info->driver_name = dev->driver->pci_drv.driver.name;
}
@@ -1088,7 +1383,7 @@ rte_cryptodev_sym_session_create(uint8_t dev_id,
return NULL;
}
- sess = (struct rte_cryptodev_sym_session *)_sess;
+ sess = _sess;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL);
if (dev->dev_ops->session_configure(dev, xform, sess->_private) ==
@@ -1104,6 +1399,53 @@ rte_cryptodev_sym_session_create(uint8_t dev_id,
return sess;
}
+int
+rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[sess->dev_id];
+
+ /* The API is optional, not returning error if driver do not suuport */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
+ if (dev->dev_ops->qp_attach_session(dev, qp_id, sess->_private)) {
+ CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
+ sess->dev_id, qp_id);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int
+rte_cryptodev_queue_pair_detach_sym_session(uint16_t qp_id,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct rte_cryptodev *dev;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(sess->dev_id)) {
+ CDEV_LOG_ERR("Invalid dev_id=%d", sess->dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_crypto_devices[sess->dev_id];
+
+ /* The API is optional, not returning error if driver do not suuport */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
+ if (dev->dev_ops->qp_detach_session(dev, qp_id, sess->_private)) {
+ CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
+ sess->dev_id, qp_id);
+ return -EPERM;
+ }
+
+ return 0;
+}
struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_free(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess)
@@ -1206,3 +1548,27 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
return mp;
}
+
+int
+rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix)
+{
+ struct rte_cryptodev *dev = NULL;
+ uint32_t i = 0;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < RTE_CRYPTO_MAX_DEVS; i++) {
+ int ret = snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_%u", dev_name_prefix, i);
+
+ if (ret < 0)
+ return ret;
+
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (!dev)
+ return 0;
+ }
+
+ return -1;
+}
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 8f63e8f6..88aeb873 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,6 +1,6 @@
/*-
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -66,6 +66,12 @@ extern "C" {
/**< KASUMI PMD device name */
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
/**< KASUMI PMD device name */
+#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+/**< ARMv8 Crypto PMD device name */
+#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+/**< Scheduler Crypto PMD device name */
+#define CRYPTODEV_NAME_DPAA2_SEC_PMD cryptodev_dpaa2_sec_pmd
+/**< NXP DPAA2 - SEC PMD device name */
/** Crypto device type */
enum rte_cryptodev_type {
@@ -77,6 +83,9 @@ enum rte_cryptodev_type {
RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */
RTE_CRYPTODEV_ZUC_PMD, /**< ZUC PMD */
RTE_CRYPTODEV_OPENSSL_PMD, /**< OpenSSL PMD */
+ RTE_CRYPTODEV_ARMV8_PMD, /**< ARMv8 crypto PMD */
+ RTE_CRYPTODEV_SCHEDULER_PMD, /**< Crypto Scheduler PMD */
+ RTE_CRYPTODEV_DPAA2_SEC_PMD, /**< NXP DPAA2 - SEC PMD */
};
extern const char **rte_cyptodev_names;
@@ -110,6 +119,20 @@ extern const char **rte_cyptodev_names;
#endif
/**
+ * Crypto parameters range description
+ */
+struct rte_crypto_param_range {
+ uint16_t min; /**< minimum size */
+ uint16_t max; /**< maximum size */
+ uint16_t increment;
+ /**< if a range of sizes are supported,
+ * this parameter is used to indicate
+ * increments in byte size that are supported
+ * between the minimum and maximum
+ */
+};
+
+/**
* Symmetric Crypto Capability
*/
struct rte_cryptodev_symmetric_capability {
@@ -122,35 +145,11 @@ struct rte_cryptodev_symmetric_capability {
/**< authentication algorithm */
uint16_t block_size;
/**< algorithm block size */
- struct {
- uint16_t min; /**< minimum key size */
- uint16_t max; /**< maximum key size */
- uint16_t increment;
- /**< if a range of sizes are supported,
- * this parameter is used to indicate
- * increments in byte size that are supported
- * between the minimum and maximum */
- } key_size;
+ struct rte_crypto_param_range key_size;
/**< auth key size range */
- struct {
- uint16_t min; /**< minimum digest size */
- uint16_t max; /**< maximum digest size */
- uint16_t increment;
- /**< if a range of sizes are supported,
- * this parameter is used to indicate
- * increments in byte size that are supported
- * between the minimum and maximum */
- } digest_size;
+ struct rte_crypto_param_range digest_size;
/**< digest size range */
- struct {
- uint16_t min; /**< minimum aad size */
- uint16_t max; /**< maximum aad size */
- uint16_t increment;
- /**< if a range of sizes are supported,
- * this parameter is used to indicate
- * increments in byte size that are supported
- * between the minimum and maximum */
- } aad_size;
+ struct rte_crypto_param_range aad_size;
/**< Additional authentication data size range */
} auth;
/**< Symmetric Authentication transform capabilities */
@@ -159,25 +158,9 @@ struct rte_cryptodev_symmetric_capability {
/**< cipher algorithm */
uint16_t block_size;
/**< algorithm block size */
- struct {
- uint16_t min; /**< minimum key size */
- uint16_t max; /**< maximum key size */
- uint16_t increment;
- /**< if a range of sizes are supported,
- * this parameter is used to indicate
- * increments in byte size that are supported
- * between the minimum and maximum */
- } key_size;
+ struct rte_crypto_param_range key_size;
/**< cipher key size range */
- struct {
- uint16_t min; /**< minimum iv size */
- uint16_t max; /**< maximum iv size */
- uint16_t increment;
- /**< if a range of sizes are supported,
- * this parameter is used to indicate
- * increments in byte size that are supported
- * between the minimum and maximum */
- } iv_size;
+ struct rte_crypto_param_range iv_size;
/**< Initialisation vector data size range */
} cipher;
/**< Symmetric Cipher transform capabilities */
@@ -196,6 +179,94 @@ struct rte_cryptodev_capabilities {
};
};
+/** Structure used to describe crypto algorithms */
+struct rte_cryptodev_sym_capability_idx {
+ enum rte_crypto_sym_xform_type type;
+ union {
+ enum rte_crypto_cipher_algorithm cipher;
+ enum rte_crypto_auth_algorithm auth;
+ } algo;
+};
+
+/**
+ * Provide capabilities available for defined device and algorithm
+ *
+ * @param dev_id The identifier of the device.
+ * @param idx Description of crypto algorithms.
+ *
+ * @return
+ * - Return description of the symmetric crypto capability if exist.
+ * - Return NULL if the capability not exist.
+ */
+const struct rte_cryptodev_symmetric_capability *
+rte_cryptodev_sym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_sym_capability_idx *idx);
+
+/**
+ * Check if key size and initial vector are supported
+ * in crypto cipher capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size Cipher key size.
+ * @param iv_size Cipher initial vector size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_cipher(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t iv_size);
+
+/**
+ * Check if key size and initial vector are supported
+ * in crypto auth capability
+ *
+ * @param capability Description of the symmetric crypto capability.
+ * @param key_size Auth key size.
+ * @param digest_size Auth digest size.
+ * @param aad_size Auth aad size.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int
+rte_cryptodev_sym_capability_check_auth(
+ const struct rte_cryptodev_symmetric_capability *capability,
+ uint16_t key_size, uint16_t digest_size, uint16_t aad_size);
+
+/**
+ * Provide the cipher algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the cipher algorithm
+ * enum to be filled
+ * @param algo_string Authentication algo string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
+ const char *algo_string);
+
+/**
+ * Provide the authentication algorithm enum, given an algorithm string
+ *
+ * @param algo_enum A pointer to the authentication algorithm
+ * enum to be filled
+ * @param algo_string Authentication algo string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 is the string is valid
+ */
+int
+rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum,
+ const char *algo_string);
+
/** Macro used at end of crypto PMD list */
#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
@@ -225,6 +296,14 @@ struct rte_cryptodev_capabilities {
/**< Utilises CPU AES-NI instructions */
#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
/**< Operations are off-loaded to an external hardware accelerator */
+#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
+/**< Utilises CPU SIMD AVX512 instructions */
+#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
+/**< Scatter-gather mbufs are supported */
+#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 10)
+/**< Utilises CPU NEON instructions */
+#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 11)
+/**< Utilises ARM CPU Cryptographic Extensions */
/**
@@ -256,6 +335,10 @@ struct rte_cryptodev_info {
struct {
unsigned max_nb_sessions;
/**< Maximum number of sessions supported by device. */
+ unsigned int max_nb_sessions_per_qp;
+ /**< Maximum number of sessions per queue pair.
+ * Default 0 for infinite sessions
+ */
} sym;
};
@@ -300,6 +383,8 @@ struct rte_cryptodev_stats {
/**< Total error count on operations dequeued */
};
+#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
+/**< Max length of name of crypto PMD */
#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8
#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048
@@ -311,6 +396,7 @@ struct rte_crypto_vdev_init_params {
unsigned max_nb_queue_pairs;
unsigned max_nb_sessions;
uint8_t socket_id;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
};
/**
@@ -365,8 +451,30 @@ rte_cryptodev_get_dev_id(const char *name);
extern uint8_t
rte_cryptodev_count(void);
+/**
+ * Get number of crypto device defined type.
+ *
+ * @param type type of device.
+ *
+ * @return
+ * Returns number of crypto device.
+ */
extern uint8_t
rte_cryptodev_count_devtype(enum rte_cryptodev_type type);
+
+/**
+ * Get number and identifiers of attached crypto device.
+ *
+ * @param dev_name device name.
+ * @param devices output devices identifiers.
+ * @param nb_devices maximal number of devices.
+ *
+ * @return
+ * Returns number of attached crypto device.
+ */
+uint8_t
+rte_cryptodev_devices_get(const char *dev_name, uint8_t *devices,
+ uint8_t nb_devices);
/*
* Return the NUMA socket to which a device is connected
*
@@ -621,8 +729,8 @@ struct rte_cryptodev {
/**< Functions exported by PMD */
uint64_t feature_flags;
/**< Supported features */
- struct rte_pci_device *pci_dev;
- /**< PCI info. supplied by probing */
+ struct rte_device *device;
+ /**< Backing device */
enum rte_cryptodev_type dev_type;
/**< Crypto device type */
@@ -635,10 +743,6 @@ struct rte_cryptodev {
/**< Flag indicating the device is attached */
} __rte_cache_aligned;
-
-#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
-/**< Max length of name of crypto PMD */
-
/**
*
* The data part, with no function pointers, associated with each device.
@@ -818,6 +922,36 @@ extern struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_free(uint8_t dev_id,
struct rte_cryptodev_sym_session *session);
+/**
+ * Attach queue pair with sym session.
+ *
+ * @param qp_id Queue pair to which session will be attached.
+ * @param session Session pointer previously allocated by
+ * *rte_cryptodev_sym_session_create*.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_cryptodev_queue_pair_attach_sym_session(uint16_t qp_id,
+ struct rte_cryptodev_sym_session *session);
+
+/**
+ * Detach queue pair with sym session.
+ *
+ * @param qp_id Queue pair to which session is attached.
+ * @param session Session pointer previously allocated by
+ * *rte_cryptodev_sym_session_create*.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+rte_cryptodev_queue_pair_detach_sym_session(uint16_t qp_id,
+ struct rte_cryptodev_sym_session *session);
+
#ifdef __cplusplus
}
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index c6a57945..17ef37c7 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -57,14 +57,6 @@ extern "C" {
#include "rte_crypto.h"
#include "rte_cryptodev.h"
-
-#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
-#define RTE_PMD_DEBUG_TRACE(...) \
- rte_pmd_debug_trace(__func__, __VA_ARGS__)
-#else
-#define RTE_PMD_DEBUG_TRACE(...)
-#endif
-
struct rte_cryptodev_session {
RTE_STD_C11
struct {
@@ -160,11 +152,8 @@ extern struct rte_cryptodev_global *rte_cryptodev_globals;
* @return
* - The rte_cryptodev structure pointer for the given device ID.
*/
-static inline struct rte_cryptodev *
-rte_cryptodev_pmd_get_dev(uint8_t dev_id)
-{
- return &rte_cryptodev_globals->devs[dev_id];
-}
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_dev(uint8_t dev_id);
/**
* Get the rte_cryptodev structure device pointer for the named device.
@@ -174,25 +163,8 @@ rte_cryptodev_pmd_get_dev(uint8_t dev_id)
* @return
* - The rte_cryptodev structure pointer for the given device ID.
*/
-static inline struct rte_cryptodev *
-rte_cryptodev_pmd_get_named_dev(const char *name)
-{
- struct rte_cryptodev *dev;
- unsigned i;
-
- if (name == NULL)
- return NULL;
-
- for (i = 0; i < rte_cryptodev_globals->max_devs; i++) {
- dev = &rte_cryptodev_globals->devs[i];
-
- if ((dev->attached == RTE_CRYPTODEV_ATTACHED) &&
- (strcmp(dev->data->name, name) == 0))
- return dev;
- }
-
- return NULL;
-}
+struct rte_cryptodev *
+rte_cryptodev_pmd_get_named_dev(const char *name);
/**
* Validate if the crypto device index is valid attached crypto device.
@@ -202,20 +174,8 @@ rte_cryptodev_pmd_get_named_dev(const char *name)
* @return
* - If the device index is valid (1) or not (0).
*/
-static inline unsigned
-rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id)
-{
- struct rte_cryptodev *dev = NULL;
-
- if (dev_id >= rte_cryptodev_globals->nb_devs)
- return 0;
-
- dev = rte_cryptodev_pmd_get_dev(dev_id);
- if (dev->attached != RTE_CRYPTODEV_ATTACHED)
- return 0;
- else
- return 1;
-}
+unsigned int
+rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id);
/**
* The pool of rte_cryptodev structures.
@@ -233,10 +193,12 @@ extern struct rte_cryptodev *rte_cryptodevs;
* Function used to configure device.
*
* @param dev Crypto device pointer
+ * config Crypto device configurations
*
* @return Returns 0 on success
*/
-typedef int (*cryptodev_configure_t)(struct rte_cryptodev *dev);
+typedef int (*cryptodev_configure_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
/**
* Function used to start a configured device.
@@ -413,6 +375,31 @@ typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
void *session_private);
+/**
+ * Optional API for drivers to attach sessions with queue pair.
+ * @param dev Crypto device pointer
+ * @param qp_id queue pair id for attaching session
+ * @param priv_sess Pointer to cryptodev's private session structure
+ * @return
+ * - Return 0 on success
+ */
+typedef int (*cryptodev_sym_queue_pair_attach_session_t)(
+ struct rte_cryptodev *dev,
+ uint16_t qp_id,
+ void *session_private);
+
+/**
+ * Optional API for drivers to detach sessions from queue pair.
+ * @param dev Crypto device pointer
+ * @param qp_id queue pair id for detaching session
+ * @param priv_sess Pointer to cryptodev's private session structure
+ * @return
+ * - Return 0 on success
+ */
+typedef int (*cryptodev_sym_queue_pair_detach_session_t)(
+ struct rte_cryptodev *dev,
+ uint16_t qp_id,
+ void *session_private);
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@@ -447,6 +434,10 @@ struct rte_cryptodev_ops {
/**< Configure a Crypto session. */
cryptodev_sym_free_session_t session_clear;
/**< Clear a Crypto sessions private data. */
+ cryptodev_sym_queue_pair_attach_session_t qp_attach_session;
+ /**< Attach session to queue pair. */
+ cryptodev_sym_queue_pair_attach_session_t qp_detach_session;
+ /**< Detach session from queue pair. */
};
@@ -520,6 +511,13 @@ int rte_cryptodev_pci_probe(struct rte_pci_driver *pci_drv,
*/
int rte_cryptodev_pci_remove(struct rte_pci_device *pci_dev);
+/**
+ * @internal
+ * Create unique device name
+ */
+int
+rte_cryptodev_pmd_create_dev_name(char *name, const char *dev_name_prefix);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index 9dde0e72..9ac510ec 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -46,3 +46,31 @@ DPDK_16.11 {
rte_cryptodev_pci_remove;
} DPDK_16.07;
+
+DPDK_17.02 {
+ global:
+
+ rte_cryptodev_devices_get;
+ rte_cryptodev_pmd_create_dev_name;
+ rte_cryptodev_pmd_get_dev;
+ rte_cryptodev_pmd_get_named_dev;
+ rte_cryptodev_pmd_is_valid_dev;
+ rte_cryptodev_sym_capability_check_auth;
+ rte_cryptodev_sym_capability_check_cipher;
+ rte_cryptodev_sym_capability_get;
+ rte_crypto_auth_algorithm_strings;
+ rte_crypto_auth_operation_strings;
+ rte_crypto_cipher_algorithm_strings;
+ rte_crypto_cipher_operation_strings;
+
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_get_auth_algo_enum;
+ rte_cryptodev_get_cipher_algo_enum;
+ rte_cryptodev_queue_pair_attach_sym_session;
+ rte_cryptodev_queue_pair_detach_sym_session;
+
+} DPDK_17.02;
diff --git a/lib/librte_distributor/Makefile b/lib/librte_distributor/Makefile
index 4c9af172..3ffb911c 100644
--- a/lib/librte_distributor/Makefile
+++ b/lib/librte_distributor/Makefile
@@ -42,13 +42,20 @@ EXPORT_MAP := rte_distributor_version.map
LIBABIVER := 1
# all source are stored in SRCS-y
-SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor.c
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) := rte_distributor_v20.c
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor.c
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_sse.c
+# distributor SIMD algo needs SSE4.2 support
+ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_2,$(CFLAGS)),)
+CFLAGS_rte_distributor_match_sse.o += -msse4.2
+endif
+else
+SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += rte_distributor_match_generic.c
+endif
+
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR)-include := rte_distributor.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_distributor/rte_distributor.c b/lib/librte_distributor/rte_distributor.c
index f3f778c9..e4dfa7f0 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -36,367 +35,492 @@
#include <string.h>
#include <rte_mbuf.h>
#include <rte_memory.h>
+#include <rte_cycles.h>
+#include <rte_compat.h>
#include <rte_memzone.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_eal_memconfig.h>
+#include <rte_compat.h>
+#include "rte_distributor_private.h"
#include "rte_distributor.h"
+#include "rte_distributor_v20.h"
+#include "rte_distributor_v1705.h"
-#define NO_FLAGS 0
-#define RTE_DISTRIB_PREFIX "DT_"
-
-/* we will use the bottom four bits of pointer for flags, shifting out
- * the top four bits to make room (since a 64-bit pointer actually only uses
- * 48 bits). An arithmetic-right-shift will then appropriately restore the
- * original pointer value with proper sign extension into the top bits. */
-#define RTE_DISTRIB_FLAG_BITS 4
-#define RTE_DISTRIB_FLAGS_MASK (0x0F)
-#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
-#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
-#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
-
-#define RTE_DISTRIB_BACKLOG_SIZE 8
-#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
-
-#define RTE_DISTRIB_MAX_RETURNS 128
-#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
-
-/**
- * Maximum number of workers allowed.
- * Be aware of increasing the limit, becaus it is limited by how we track
- * in-flight tags. See @in_flight_bitmask and @rte_distributor_process
- */
-#define RTE_DISTRIB_MAX_WORKERS 64
-
-/**
- * Buffer structure used to pass the pointer data between cores. This is cache
- * line aligned, but to improve performance and prevent adjacent cache-line
- * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
- * the next cache line to worker 0, we pad this out to three cache lines.
- * Only 64-bits of the memory is actually used though.
- */
-union rte_distributor_buffer {
- volatile int64_t bufptr64;
- char pad[RTE_CACHE_LINE_SIZE*3];
-} __rte_cache_aligned;
-
-struct rte_distributor_backlog {
- unsigned start;
- unsigned count;
- int64_t pkts[RTE_DISTRIB_BACKLOG_SIZE];
-};
+TAILQ_HEAD(rte_dist_burst_list, rte_distributor);
-struct rte_distributor_returned_pkts {
- unsigned start;
- unsigned count;
- struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
+static struct rte_tailq_elem rte_dist_burst_tailq = {
+ .name = "RTE_DIST_BURST",
};
+EAL_REGISTER_TAILQ(rte_dist_burst_tailq)
-struct rte_distributor {
- TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
-
- char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
- unsigned num_workers; /**< Number of workers polling */
-
- uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
- /**< Tracks the tag being processed per core */
- uint64_t in_flight_bitmask;
- /**< on/off bits for in-flight tags.
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
- * the bitmask has to expand.
- */
+/**** APIs called by workers ****/
- struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
+/**** Burst Packet APIs called by workers ****/
- union rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
+void
+rte_distributor_request_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt,
+ unsigned int count)
+{
+ struct rte_distributor_buffer *buf = &(d->bufs[worker_id]);
+ unsigned int i;
- struct rte_distributor_returned_pkts returns;
-};
+ volatile int64_t *retptr64;
-TAILQ_HEAD(rte_distributor_list, rte_distributor);
+ if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
+ rte_distributor_request_pkt_v20(d->d_v20,
+ worker_id, oldpkt[0]);
+ return;
+ }
-static struct rte_tailq_elem rte_distributor_tailq = {
- .name = "RTE_DISTRIBUTOR",
-};
-EAL_REGISTER_TAILQ(rte_distributor_tailq)
+ retptr64 = &(buf->retptr64[0]);
+ /* Spin while handshake bits are set (scheduler clears it) */
+ while (unlikely(*retptr64 & RTE_DISTRIB_GET_BUF)) {
+ rte_pause();
+ uint64_t t = rte_rdtsc()+100;
-/**** APIs called by workers ****/
+ while (rte_rdtsc() < t)
+ rte_pause();
+ }
-void
-rte_distributor_request_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
-{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_GET_BUF;
- while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
- rte_pause();
- buf->bufptr64 = req;
+ /*
+ * OK, if we've got here, then the scheduler has just cleared the
+ * handshake bits. Populate the retptrs with returning packets.
+ */
+
+ for (i = count; i < RTE_DIST_BURST_SIZE; i++)
+ buf->retptr64[i] = 0;
+
+ /* Set Return bit for each packet returned */
+ for (i = count; i-- > 0; )
+ buf->retptr64[i] =
+ (((int64_t)(uintptr_t)(oldpkt[i])) <<
+ RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
+
+ /*
+ * Finally, set the GET_BUF to signal to distributor that cache
+ * line is ready for processing
+ */
+ *retptr64 |= RTE_DISTRIB_GET_BUF;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_request_pkt, _v1705, 17.05);
+MAP_STATIC_SYMBOL(void rte_distributor_request_pkt(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt,
+ unsigned int count),
+ rte_distributor_request_pkt_v1705);
-struct rte_mbuf *
-rte_distributor_poll_pkt(struct rte_distributor *d,
- unsigned worker_id)
+int
+rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **pkts)
{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
- return NULL;
+ struct rte_distributor_buffer *buf = &d->bufs[worker_id];
+ uint64_t ret;
+ int count = 0;
+ unsigned int i;
+
+ if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
+ pkts[0] = rte_distributor_poll_pkt_v20(d->d_v20, worker_id);
+ return (pkts[0]) ? 1 : 0;
+ }
+
+ /* If bit is set, return */
+ if (buf->bufptr64[0] & RTE_DISTRIB_GET_BUF)
+ return -1;
/* since bufptr64 is signed, this should be an arithmetic shift */
- int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
- return (struct rte_mbuf *)((uintptr_t)ret);
+ for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
+ if (likely(buf->bufptr64[i] & RTE_DISTRIB_VALID_BUF)) {
+ ret = buf->bufptr64[i] >> RTE_DISTRIB_FLAG_BITS;
+ pkts[count++] = (struct rte_mbuf *)((uintptr_t)(ret));
+ }
+ }
+
+ /*
+ * so now we've got the contents of the cacheline into an array of
+ * mbuf pointers, so toggle the bit so scheduler can start working
+ * on the next cacheline while we're working.
+ */
+ buf->bufptr64[0] |= RTE_DISTRIB_GET_BUF;
+
+ return count;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_poll_pkt, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_poll_pkt(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **pkts),
+ rte_distributor_poll_pkt_v1705);
-struct rte_mbuf *
-rte_distributor_get_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
+int
+rte_distributor_get_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **pkts,
+ struct rte_mbuf **oldpkt, unsigned int return_count)
{
- struct rte_mbuf *ret;
- rte_distributor_request_pkt(d, worker_id, oldpkt);
- while ((ret = rte_distributor_poll_pkt(d, worker_id)) == NULL)
- rte_pause();
- return ret;
+ int count;
+
+ if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
+ if (return_count <= 1) {
+ pkts[0] = rte_distributor_get_pkt_v20(d->d_v20,
+ worker_id, oldpkt[0]);
+ return (pkts[0]) ? 1 : 0;
+ } else
+ return -EINVAL;
+ }
+
+ rte_distributor_request_pkt(d, worker_id, oldpkt, return_count);
+
+ count = rte_distributor_poll_pkt(d, worker_id, pkts);
+ while (count == -1) {
+ uint64_t t = rte_rdtsc() + 100;
+
+ while (rte_rdtsc() < t)
+ rte_pause();
+
+ count = rte_distributor_poll_pkt(d, worker_id, pkts);
+ }
+ return count;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_get_pkt, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_get_pkt(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **pkts,
+ struct rte_mbuf **oldpkt, unsigned int return_count),
+ rte_distributor_get_pkt_v1705);
int
-rte_distributor_return_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt)
+rte_distributor_return_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt, int num)
{
- union rte_distributor_buffer *buf = &d->bufs[worker_id];
- uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
- | RTE_DISTRIB_RETURN_BUF;
- buf->bufptr64 = req;
- return 0;
-}
+ struct rte_distributor_buffer *buf = &d->bufs[worker_id];
+ unsigned int i;
+
+ if (unlikely(d->alg_type == RTE_DIST_ALG_SINGLE)) {
+ if (num == 1)
+ return rte_distributor_return_pkt_v20(d->d_v20,
+ worker_id, oldpkt[0]);
+ else
+ return -EINVAL;
+ }
-/**** APIs called on distributor core ***/
+ for (i = 0; i < RTE_DIST_BURST_SIZE; i++)
+ /* Switch off the return bit first */
+ buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
-/* as name suggests, adds a packet to the backlog for a particular worker */
-static int
-add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
-{
- if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
- return -1;
+ for (i = num; i-- > 0; )
+ buf->retptr64[i] = (((int64_t)(uintptr_t)oldpkt[i]) <<
+ RTE_DISTRIB_FLAG_BITS) | RTE_DISTRIB_RETURN_BUF;
+
+ /* set the GET_BUF but even if we got no returns */
+ buf->retptr64[0] |= RTE_DISTRIB_GET_BUF;
- bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
- = item;
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_return_pkt, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_return_pkt(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt, int num),
+ rte_distributor_return_pkt_v1705);
-/* takes the next packet for a worker off the backlog */
-static int64_t
-backlog_pop(struct rte_distributor_backlog *bl)
-{
- bl->count--;
- return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
-}
+/**** APIs called on distributor core ***/
/* stores a packet returned from a worker inside the returns array */
static inline void
store_return(uintptr_t oldbuf, struct rte_distributor *d,
- unsigned *ret_start, unsigned *ret_count)
+ unsigned int *ret_start, unsigned int *ret_count)
{
- /* store returns in a circular buffer - code is branch-free */
+ if (!oldbuf)
+ return;
+ /* store returns in a circular buffer */
d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
= (void *)oldbuf;
- *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
- *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+ *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK);
+ *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK);
}
-static inline void
-handle_worker_shutdown(struct rte_distributor *d, unsigned wkr)
+/*
+ * Match then flow_ids (tags) of the incoming packets to the flow_ids
+ * of the inflight packets (both inflight on the workers and in each worker
+ * backlog). This will then allow us to pin those packets to the relevant
+ * workers to give us our atomic flow pinning.
+ */
+void
+find_match_scalar(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr)
{
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- d->bufs[wkr].bufptr64 = 0;
- if (unlikely(d->backlog[wkr].count != 0)) {
- /* On return of a packet, we need to move the
- * queued packets for this core elsewhere.
- * Easiest solution is to set things up for
- * a recursive call. That will cause those
- * packets to be queued up for the next free
- * core, i.e. it will return as soon as a
- * core becomes free to accept the first
- * packet, as subsequent ones will be added to
- * the backlog for that core.
- */
- struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
- unsigned i;
- struct rte_distributor_backlog *bl = &d->backlog[wkr];
-
- for (i = 0; i < bl->count; i++) {
- unsigned idx = (bl->start + i) &
- RTE_DISTRIB_BACKLOG_MASK;
- pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
+ struct rte_distributor_backlog *bl;
+ uint16_t i, j, w;
+
+ /*
+ * Function overview:
+ * 1. Loop through all worker ID's
+ * 2. Compare the current inflights to the incoming tags
+ * 3. Compare the current backlog to the incoming tags
+ * 4. Add any matches to the output
+ */
+
+ for (j = 0 ; j < RTE_DIST_BURST_SIZE; j++)
+ output_ptr[j] = 0;
+
+ for (i = 0; i < d->num_workers; i++) {
+ bl = &d->backlog[i];
+
+ for (j = 0; j < RTE_DIST_BURST_SIZE ; j++)
+ for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
+ if (d->in_flight_tags[i][j] == data_ptr[w]) {
+ output_ptr[j] = i+1;
+ break;
+ }
+ for (j = 0; j < RTE_DIST_BURST_SIZE; j++)
+ for (w = 0; w < RTE_DIST_BURST_SIZE; w++)
+ if (bl->tags[j] == data_ptr[w]) {
+ output_ptr[j] = i+1;
+ break;
+ }
+ }
+
+ /*
+ * At this stage, the output contains 8 16-bit values, with
+ * each non-zero value containing the worker ID on which the
+ * corresponding flow is pinned to.
+ */
+}
+
+
+/*
+ * When the handshake bits indicate that there are packets coming
+ * back from the worker, this function is called to copy and store
+ * the valid returned pointers (store_return).
+ */
+static unsigned int
+handle_returns(struct rte_distributor *d, unsigned int wkr)
+{
+ struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
+ uintptr_t oldbuf;
+ unsigned int ret_start = d->returns.start,
+ ret_count = d->returns.count;
+ unsigned int count = 0;
+ unsigned int i;
+
+ if (buf->retptr64[0] & RTE_DISTRIB_GET_BUF) {
+ for (i = 0; i < RTE_DIST_BURST_SIZE; i++) {
+ if (buf->retptr64[i] & RTE_DISTRIB_RETURN_BUF) {
+ oldbuf = ((uintptr_t)(buf->retptr64[i] >>
RTE_DISTRIB_FLAG_BITS));
+ /* store returns in a circular buffer */
+ store_return(oldbuf, d, &ret_start, &ret_count);
+ count++;
+ buf->retptr64[i] &= ~RTE_DISTRIB_RETURN_BUF;
+ }
}
- /* recursive call.
- * Note that the tags were set before first level call
- * to rte_distributor_process.
- */
- rte_distributor_process(d, pkts, i);
- bl->count = bl->start = 0;
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+ /* Clear for the worker to populate with more returns */
+ buf->retptr64[0] = 0;
}
+ return count;
}
-/* this function is called when process() fn is called without any new
- * packets. It goes through all the workers and clears any returned packets
- * to do a partial flush.
+/*
+ * This function releases a burst (cache line) to a worker.
+ * It is called from the process function when a cacheline is
+ * full to make room for more packets for that worker, or when
+ * all packets have been assigned to bursts and need to be flushed
+ * to the workers.
+ * It also needs to wait for any outstanding packets from the worker
+ * before sending out new packets.
*/
-static int
-process_returns(struct rte_distributor *d)
+static unsigned int
+release(struct rte_distributor *d, unsigned int wkr)
{
- unsigned wkr;
- unsigned flushed = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
+ struct rte_distributor_buffer *buf = &(d->bufs[wkr]);
+ unsigned int i;
- for (wkr = 0; wkr < d->num_workers; wkr++) {
+ while (!(d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF))
+ rte_pause();
- const int64_t data = d->bufs[wkr].bufptr64;
- uintptr_t oldbuf = 0;
+ handle_returns(d, wkr);
- if (data & RTE_DISTRIB_GET_BUF) {
- flushed++;
- if (d->backlog[wkr].count)
- d->bufs[wkr].bufptr64 =
- backlog_pop(&d->backlog[wkr]);
- else {
- d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
- d->in_flight_tags[wkr] = 0;
- d->in_flight_bitmask &= ~(1UL << wkr);
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
+ buf->count = 0;
- store_return(oldbuf, d, &ret_start, &ret_count);
+ for (i = 0; i < d->backlog[wkr].count; i++) {
+ d->bufs[wkr].bufptr64[i] = d->backlog[wkr].pkts[i] |
+ RTE_DISTRIB_GET_BUF | RTE_DISTRIB_VALID_BUF;
+ d->in_flight_tags[wkr][i] = d->backlog[wkr].tags[i];
+ }
+ buf->count = i;
+ for ( ; i < RTE_DIST_BURST_SIZE ; i++) {
+ buf->bufptr64[i] = RTE_DISTRIB_GET_BUF;
+ d->in_flight_tags[wkr][i] = 0;
}
- d->returns.start = ret_start;
- d->returns.count = ret_count;
+ d->backlog[wkr].count = 0;
+
+ /* Clear the GET bit */
+ buf->bufptr64[0] &= ~RTE_DISTRIB_GET_BUF;
+ return buf->count;
- return flushed;
}
+
/* process a set of packets to distribute them to workers */
int
-rte_distributor_process(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned num_mbufs)
+rte_distributor_process_v1705(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int num_mbufs)
{
- unsigned next_idx = 0;
- unsigned wkr = 0;
+ unsigned int next_idx = 0;
+ static unsigned int wkr;
struct rte_mbuf *next_mb = NULL;
int64_t next_value = 0;
- uint32_t new_tag = 0;
- unsigned ret_start = d->returns.start,
- ret_count = d->returns.count;
+ uint16_t new_tag = 0;
+ uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
+ unsigned int i, j, w, wid;
+
+ if (d->alg_type == RTE_DIST_ALG_SINGLE) {
+ /* Call the old API */
+ return rte_distributor_process_v20(d->d_v20, mbufs, num_mbufs);
+ }
+
+ if (unlikely(num_mbufs == 0)) {
+ /* Flush out all non-full cache-lines to workers. */
+ for (wid = 0 ; wid < d->num_workers; wid++) {
+ if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF)) {
+ release(d, wid);
+ handle_returns(d, wid);
+ }
+ }
+ return 0;
+ }
- if (unlikely(num_mbufs == 0))
- return process_returns(d);
+ while (next_idx < num_mbufs) {
+ uint16_t matches[RTE_DIST_BURST_SIZE];
+ unsigned int pkts;
- while (next_idx < num_mbufs || next_mb != NULL) {
+ if (d->bufs[wkr].bufptr64[0] & RTE_DISTRIB_GET_BUF)
+ d->bufs[wkr].count = 0;
- int64_t data = d->bufs[wkr].bufptr64;
- uintptr_t oldbuf = 0;
+ if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
+ pkts = num_mbufs - next_idx;
+ else
+ pkts = RTE_DIST_BURST_SIZE;
+
+ for (i = 0; i < pkts; i++) {
+ if (mbufs[next_idx + i]) {
+ /* flows have to be non-zero */
+ flows[i] = mbufs[next_idx + i]->hash.usr | 1;
+ } else
+ flows[i] = 0;
+ }
+ for (; i < RTE_DIST_BURST_SIZE; i++)
+ flows[i] = 0;
+
+ switch (d->dist_match_fn) {
+ case RTE_DIST_MATCH_VECTOR:
+ find_match_vec(d, &flows[0], &matches[0]);
+ break;
+ default:
+ find_match_scalar(d, &flows[0], &matches[0]);
+ }
+
+ /*
+ * Matches array now contain the intended worker ID (+1) of
+ * the incoming packets. Any zeroes need to be assigned
+ * workers.
+ */
+
+ for (j = 0; j < pkts; j++) {
- if (!next_mb) {
next_mb = mbufs[next_idx++];
- next_value = (((int64_t)(uintptr_t)next_mb)
- << RTE_DISTRIB_FLAG_BITS);
+ next_value = (((int64_t)(uintptr_t)next_mb) <<
+ RTE_DISTRIB_FLAG_BITS);
/*
* User is advocated to set tag vaue for each
* mbuf before calling rte_distributor_process.
* User defined tags are used to identify flows,
* or sessions.
*/
- new_tag = next_mb->hash.usr;
+ /* flows MUST be non-zero */
+ new_tag = (uint16_t)(next_mb->hash.usr) | 1;
/*
- * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
- * then the size of match has to be expanded.
- */
- uint64_t match = 0;
- unsigned i;
- /*
- * to scan for a match use "xor" and "not" to get a 0/1
- * value, then use shifting to merge to single "match"
- * variable, where a one-bit indicates a match for the
- * worker given by the bit-position
+ * Uncommenting the next line will cause the find_match
+ * function to be optimised out, making this function
+ * do parallel (non-atomic) distribution
*/
- for (i = 0; i < d->num_workers; i++)
- match |= (!(d->in_flight_tags[i] ^ new_tag)
- << i);
-
- /* Only turned-on bits are considered as match */
- match &= d->in_flight_bitmask;
-
- if (match) {
- next_mb = NULL;
- unsigned worker = __builtin_ctzl(match);
- if (add_to_backlog(&d->backlog[worker],
- next_value) < 0)
- next_idx--;
+ /* matches[j] = 0; */
+
+ if (matches[j]) {
+ struct rte_distributor_backlog *bl =
+ &d->backlog[matches[j]-1];
+ if (unlikely(bl->count ==
+ RTE_DIST_BURST_SIZE)) {
+ release(d, matches[j]-1);
+ }
+
+ /* Add to worker that already has flow */
+ unsigned int idx = bl->count++;
+
+ bl->tags[idx] = new_tag;
+ bl->pkts[idx] = next_value;
+
+ } else {
+ struct rte_distributor_backlog *bl =
+ &d->backlog[wkr];
+ if (unlikely(bl->count ==
+ RTE_DIST_BURST_SIZE)) {
+ release(d, wkr);
+ }
+
+ /* Add to current worker worker */
+ unsigned int idx = bl->count++;
+
+ bl->tags[idx] = new_tag;
+ bl->pkts[idx] = next_value;
+ /*
+ * Now that we've just added an unpinned flow
+ * to a worker, we need to ensure that all
+ * other packets with that same flow will go
+ * to the same worker in this burst.
+ */
+ for (w = j; w < pkts; w++)
+ if (flows[w] == new_tag)
+ matches[w] = wkr+1;
}
}
-
- if ((data & RTE_DISTRIB_GET_BUF) &&
- (d->backlog[wkr].count || next_mb)) {
-
- if (d->backlog[wkr].count)
- d->bufs[wkr].bufptr64 =
- backlog_pop(&d->backlog[wkr]);
-
- else {
- d->bufs[wkr].bufptr64 = next_value;
- d->in_flight_tags[wkr] = new_tag;
- d->in_flight_bitmask |= (1UL << wkr);
- next_mb = NULL;
- }
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- } else if (data & RTE_DISTRIB_RETURN_BUF) {
- handle_worker_shutdown(d, wkr);
- oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
- }
-
- /* store returns in a circular buffer */
- store_return(oldbuf, d, &ret_start, &ret_count);
-
- if (++wkr == d->num_workers)
+ wkr++;
+ if (wkr >= d->num_workers)
wkr = 0;
}
- /* to finish, check all workers for backlog and schedule work for them
- * if they are ready */
- for (wkr = 0; wkr < d->num_workers; wkr++)
- if (d->backlog[wkr].count &&
- (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
-
- int64_t oldbuf = d->bufs[wkr].bufptr64 >>
- RTE_DISTRIB_FLAG_BITS;
- store_return(oldbuf, d, &ret_start, &ret_count);
- d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
- }
+ /* Flush out all non-full cache-lines to workers. */
+ for (wid = 0 ; wid < d->num_workers; wid++)
+ if ((d->bufs[wid].bufptr64[0] & RTE_DISTRIB_GET_BUF))
+ release(d, wid);
- d->returns.start = ret_start;
- d->returns.count = ret_count;
return num_mbufs;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_process, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_process(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int num_mbufs),
+ rte_distributor_process_v1705);
/* return to the caller, packets returned from workers */
int
-rte_distributor_returned_pkts(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned max_mbufs)
+rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int max_mbufs)
{
struct rte_distributor_returned_pkts *returns = &d->returns;
- unsigned retval = (max_mbufs < returns->count) ?
+ unsigned int retval = (max_mbufs < returns->count) ?
max_mbufs : returns->count;
- unsigned i;
+ unsigned int i;
+
+ if (d->alg_type == RTE_DIST_ALG_SINGLE) {
+ /* Call the old API */
+ return rte_distributor_returned_pkts_v20(d->d_v20,
+ mbufs, max_mbufs);
+ }
for (i = 0; i < retval; i++) {
- unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
+ unsigned int idx = (returns->start + i) &
+ RTE_DISTRIB_RETURNS_MASK;
+
mbufs[i] = returns->mbufs[idx];
}
returns->start += i;
@@ -404,15 +528,19 @@ rte_distributor_returned_pkts(struct rte_distributor *d,
return retval;
}
-
-/* return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog. */
-static inline unsigned
+BIND_DEFAULT_SYMBOL(rte_distributor_returned_pkts, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_returned_pkts(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int max_mbufs),
+ rte_distributor_returned_pkts_v1705);
+
+/*
+ * Return the number of packets in-flight in a distributor, i.e. packets
+ * being workered on or queued up in a backlog.
+ */
+static inline unsigned int
total_outstanding(const struct rte_distributor *d)
{
- unsigned wkr, total_outstanding;
-
- total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
+ unsigned int wkr, total_outstanding = 0;
for (wkr = 0; wkr < d->num_workers; wkr++)
total_outstanding += d->backlog[wkr].count;
@@ -420,45 +548,96 @@ total_outstanding(const struct rte_distributor *d)
return total_outstanding;
}
-/* flush the distributor, so that there are no outstanding packets in flight or
- * queued up. */
+/*
+ * Flush the distributor, so that there are no outstanding packets in flight or
+ * queued up.
+ */
int
-rte_distributor_flush(struct rte_distributor *d)
+rte_distributor_flush_v1705(struct rte_distributor *d)
{
- const unsigned flushed = total_outstanding(d);
+ unsigned int flushed;
+ unsigned int wkr;
+
+ if (d->alg_type == RTE_DIST_ALG_SINGLE) {
+ /* Call the old API */
+ return rte_distributor_flush_v20(d->d_v20);
+ }
+
+ flushed = total_outstanding(d);
while (total_outstanding(d) > 0)
rte_distributor_process(d, NULL, 0);
+ /*
+ * Send empty burst to all workers to allow them to exit
+ * gracefully, should they need to.
+ */
+ rte_distributor_process(d, NULL, 0);
+
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ handle_returns(d, wkr);
+
return flushed;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_flush, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_distributor_flush(struct rte_distributor *d),
+ rte_distributor_flush_v1705);
/* clears the internal returns array in the distributor */
void
-rte_distributor_clear_returns(struct rte_distributor *d)
+rte_distributor_clear_returns_v1705(struct rte_distributor *d)
{
- d->returns.start = d->returns.count = 0;
-#ifndef __OPTIMIZE__
- memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
-#endif
+ unsigned int wkr;
+
+ if (d->alg_type == RTE_DIST_ALG_SINGLE) {
+ /* Call the old API */
+ rte_distributor_clear_returns_v20(d->d_v20);
+ return;
+ }
+
+ /* throw away returns, so workers can exit */
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ d->bufs[wkr].retptr64[0] = 0;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_clear_returns, _v1705, 17.05);
+MAP_STATIC_SYMBOL(void rte_distributor_clear_returns(struct rte_distributor *d),
+ rte_distributor_clear_returns_v1705);
/* creates a distributor instance */
struct rte_distributor *
-rte_distributor_create(const char *name,
- unsigned socket_id,
- unsigned num_workers)
+rte_distributor_create_v1705(const char *name,
+ unsigned int socket_id,
+ unsigned int num_workers,
+ unsigned int alg_type)
{
struct rte_distributor *d;
- struct rte_distributor_list *distributor_list;
+ struct rte_dist_burst_list *dist_burst_list;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
+ unsigned int i;
+
+ /* TODO Reorganise function properly around RTE_DIST_ALG_SINGLE/BURST */
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
- RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
- sizeof(d->in_flight_bitmask) * CHAR_BIT);
+
+ if (alg_type == RTE_DIST_ALG_SINGLE) {
+ d = malloc(sizeof(struct rte_distributor));
+ if (d == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ d->d_v20 = rte_distributor_create_v20(name,
+ socket_id, num_workers);
+ if (d->d_v20 == NULL) {
+ free(d);
+ /* rte_errno will have been set */
+ return NULL;
+ }
+ d->alg_type = alg_type;
+ return d;
+ }
if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
rte_errno = EINVAL;
@@ -475,13 +654,34 @@ rte_distributor_create(const char *name,
d = mz->addr;
snprintf(d->name, sizeof(d->name), "%s", name);
d->num_workers = num_workers;
+ d->alg_type = alg_type;
+
+#if defined(RTE_ARCH_X86)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
+ d->dist_match_fn = RTE_DIST_MATCH_VECTOR;
+ else
+#endif
+ d->dist_match_fn = RTE_DIST_MATCH_SCALAR;
+
+ /*
+ * Set up the backog tags so they're pointing at the second cache
+ * line for performance during flow matching
+ */
+ for (i = 0 ; i < num_workers ; i++)
+ d->backlog[i].tags = &d->in_flight_tags[i][RTE_DIST_BURST_SIZE];
+
+ dist_burst_list = RTE_TAILQ_CAST(rte_dist_burst_tailq.head,
+ rte_dist_burst_list);
- distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
- rte_distributor_list);
rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
- TAILQ_INSERT_TAIL(distributor_list, d, next);
+ TAILQ_INSERT_TAIL(dist_burst_list, d, next);
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
return d;
}
+BIND_DEFAULT_SYMBOL(rte_distributor_create, _v1705, 17.05);
+MAP_STATIC_SYMBOL(struct rte_distributor *rte_distributor_create(
+ const char *name, unsigned int socket_id,
+ unsigned int num_workers, unsigned int alg_type),
+ rte_distributor_create_v1705);
diff --git a/lib/librte_distributor/rte_distributor.h b/lib/librte_distributor/rte_distributor.h
index 7d36bc8a..9b9efdbe 100644
--- a/lib/librte_distributor/rte_distributor.h
+++ b/lib/librte_distributor/rte_distributor.h
@@ -1,8 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,8 +30,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_DISTRIBUTE_H_
-#define _RTE_DISTRIBUTE_H_
+#ifndef _RTE_DISTRIBUTOR_H_
+#define _RTE_DISTRIBUTOR_H_
/**
* @file
@@ -46,7 +45,12 @@
extern "C" {
#endif
-#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+/* Type of distribution (burst/single) */
+enum rte_distributor_alg_type {
+ RTE_DIST_ALG_BURST = 0,
+ RTE_DIST_ALG_SINGLE,
+ RTE_DIST_NUM_ALG_TYPES
+};
struct rte_distributor;
struct rte_mbuf;
@@ -64,12 +68,17 @@ struct rte_mbuf;
* @param num_workers
* The maximum number of workers that will request packets from this
* distributor
+ * @param alg_type
+ * Call the legacy API, or use the new burst API. legacy uses 32-bit
+ * flow ID, and works on a single packet at a time. Latest uses 15-
+ * bit flow ID and works on up to 8 packets at a time to worers.
* @return
* The newly created distributor instance
*/
struct rte_distributor *
-rte_distributor_create(const char *name, unsigned socket_id,
- unsigned num_workers);
+rte_distributor_create(const char *name, unsigned int socket_id,
+ unsigned int num_workers,
+ unsigned int alg_type);
/* *** APIS to be called on the distributor lcore *** */
/*
@@ -85,7 +94,8 @@ rte_distributor_create(const char *name, unsigned socket_id,
/**
* Process a set of packets by distributing them among workers that request
* packets. The distributor will ensure that no two packets that have the
- * same flow id, or tag, in the mbuf will be procesed at the same time.
+ * same flow id, or tag, in the mbuf will be processed on different cores at
+ * the same time.
*
* The user is advocated to set tag for each mbuf before calling this function.
* If user doesn't set the tag, the tag value can be various values depending on
@@ -104,7 +114,7 @@ rte_distributor_create(const char *name, unsigned socket_id,
*/
int
rte_distributor_process(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned num_mbufs);
+ struct rte_mbuf **mbufs, unsigned int num_mbufs);
/**
* Get a set of mbufs that have been returned to the distributor by workers
@@ -122,7 +132,7 @@ rte_distributor_process(struct rte_distributor *d,
*/
int
rte_distributor_returned_pkts(struct rte_distributor *d,
- struct rte_mbuf **mbufs, unsigned max_mbufs);
+ struct rte_mbuf **mbufs, unsigned int max_mbufs);
/**
* Flush the distributor component, so that there are no in-flight or
@@ -161,7 +171,7 @@ rte_distributor_clear_returns(struct rte_distributor *d);
*/
/**
- * API called by a worker to get a new packet to process. Any previous packet
+ * API called by a worker to get new packets to process. Any previous packets
* given to the worker is assumed to have completed processing, and may be
* optionally returned to the distributor via the oldpkt parameter.
*
@@ -170,15 +180,20 @@ rte_distributor_clear_returns(struct rte_distributor *d);
* @param worker_id
* The worker instance number to use - must be less that num_workers passed
* at distributor creation time.
+ * @param pkts
+ * The mbufs pointer array to be filled in (up to 8 packets)
* @param oldpkt
* The previous packet, if any, being processed by the worker
+ * @param retcount
+ * The number of packets being returned
*
* @return
- * A new packet to be processed by the worker thread.
+ * The number of packets in the pkts array
*/
-struct rte_mbuf *
+int
rte_distributor_get_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt);
+ unsigned int worker_id, struct rte_mbuf **pkts,
+ struct rte_mbuf **oldpkt, unsigned int retcount);
/**
* API called by a worker to return a completed packet without requesting a
@@ -189,23 +204,25 @@ rte_distributor_get_pkt(struct rte_distributor *d,
* @param worker_id
* The worker instance number to use - must be less that num_workers passed
* at distributor creation time.
- * @param mbuf
- * The previous packet being processed by the worker
+ * @param oldpkt
+ * The previous packets being processed by the worker
+ * @param num
+ * The number of packets in the oldpkt array
*/
int
-rte_distributor_return_pkt(struct rte_distributor *d, unsigned worker_id,
- struct rte_mbuf *mbuf);
+rte_distributor_return_pkt(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt, int num);
/**
* API called by a worker to request a new packet to process.
* Any previous packet given to the worker is assumed to have completed
* processing, and may be optionally returned to the distributor via
* the oldpkt parameter.
- * Unlike rte_distributor_get_pkt(), this function does not wait for a new
- * packet to be provided by the distributor.
+ * Unlike rte_distributor_get_pkt_burst(), this function does not wait for a
+ * new packet to be provided by the distributor.
*
- * NOTE: after calling this function, rte_distributor_poll_pkt() should
- * be used to poll for the packet requested. The rte_distributor_get_pkt()
+ * NOTE: after calling this function, rte_distributor_poll_pkt_burst() should
+ * be used to poll for the packet requested. The rte_distributor_get_pkt_burst()
* API should *not* be used to try and retrieve the new packet.
*
* @param d
@@ -214,11 +231,14 @@ rte_distributor_return_pkt(struct rte_distributor *d, unsigned worker_id,
* The worker instance number to use - must be less that num_workers passed
* at distributor creation time.
* @param oldpkt
- * The previous packet, if any, being processed by the worker
+ * The returning packets, if any, processed by the worker
+ * @param count
+ * The number of returning packets
*/
void
rte_distributor_request_pkt(struct rte_distributor *d,
- unsigned worker_id, struct rte_mbuf *oldpkt);
+ unsigned int worker_id, struct rte_mbuf **oldpkt,
+ unsigned int count);
/**
* API called by a worker to check for a new packet that was previously
@@ -231,14 +251,16 @@ rte_distributor_request_pkt(struct rte_distributor *d,
* @param worker_id
* The worker instance number to use - must be less that num_workers passed
* at distributor creation time.
+ * @param mbufs
+ * The array of mbufs being given to the worker
*
* @return
- * A new packet to be processed by the worker thread, or NULL if no
+ * The number of packets being given to the worker thread, zero if no
* packet is yet available.
*/
-struct rte_mbuf *
+int
rte_distributor_poll_pkt(struct rte_distributor *d,
- unsigned worker_id);
+ unsigned int worker_id, struct rte_mbuf **mbufs);
#ifdef __cplusplus
}
diff --git a/lib/librte_distributor/rte_distributor_match_generic.c b/lib/librte_distributor/rte_distributor_match_generic.c
new file mode 100644
index 00000000..4925a788
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_match_generic.c
@@ -0,0 +1,43 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include "rte_distributor_private.h"
+#include "rte_distributor.h"
+
+void
+find_match_vec(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr)
+{
+ find_match_scalar(d, data_ptr, output_ptr);
+}
diff --git a/lib/librte_distributor/rte_distributor_match_sse.c b/lib/librte_distributor/rte_distributor_match_sse.c
new file mode 100644
index 00000000..44935a69
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_match_sse.c
@@ -0,0 +1,114 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mbuf.h>
+#include "rte_distributor_private.h"
+#include "rte_distributor.h"
+#include "smmintrin.h"
+#include "nmmintrin.h"
+
+
+void
+find_match_vec(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr)
+{
+ /* Setup */
+ __m128i incoming_fids;
+ __m128i inflight_fids;
+ __m128i preflight_fids;
+ __m128i wkr;
+ __m128i mask1;
+ __m128i mask2;
+ __m128i output;
+ struct rte_distributor_backlog *bl;
+ uint16_t i;
+
+ /*
+ * Function overview:
+ * 2. Loop through all worker ID's
+ * 2a. Load the current inflights for that worker into an xmm reg
+ * 2b. Load the current backlog for that worker into an xmm reg
+ * 2c. use cmpestrm to intersect flow_ids with backlog and inflights
+ * 2d. Add any matches to the output
+ * 3. Write the output xmm (matching worker ids).
+ */
+
+
+ output = _mm_set1_epi16(0);
+ incoming_fids = _mm_load_si128((__m128i *)data_ptr);
+
+ for (i = 0; i < d->num_workers; i++) {
+ bl = &d->backlog[i];
+
+ inflight_fids =
+ _mm_load_si128((__m128i *)&(d->in_flight_tags[i]));
+ preflight_fids =
+ _mm_load_si128((__m128i *)(bl->tags));
+
+ /*
+ * Any incoming_fid that exists anywhere in inflight_fids will
+ * have 0xffff in same position of the mask as the incoming fid
+ * Example (shortened to bytes for brevity):
+ * incoming_fids 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
+ * inflight_fids 0x03 0x05 0x07 0x00 0x00 0x00 0x00 0x00
+ * mask 0x00 0x00 0xff 0x00 0xff 0x00 0xff 0x00
+ */
+
+ mask1 = _mm_cmpestrm(inflight_fids, 8, incoming_fids, 8,
+ _SIDD_UWORD_OPS |
+ _SIDD_CMP_EQUAL_ANY |
+ _SIDD_UNIT_MASK);
+ mask2 = _mm_cmpestrm(preflight_fids, 8, incoming_fids, 8,
+ _SIDD_UWORD_OPS |
+ _SIDD_CMP_EQUAL_ANY |
+ _SIDD_UNIT_MASK);
+
+ mask1 = _mm_or_si128(mask1, mask2);
+ /*
+ * Now mask contains 0xffff where there's a match.
+ * Next we need to store the worker_id in the relevant position
+ * in the output.
+ */
+
+ wkr = _mm_set1_epi16(i+1);
+ mask1 = _mm_and_si128(mask1, wkr);
+ output = _mm_or_si128(mask1, output);
+ }
+
+ /*
+ * At this stage, the output 128-bit contains 8 16-bit values, with
+ * each non-zero value containing the worker ID on which the
+ * corresponding flow is pinned to.
+ */
+ _mm_store_si128((__m128i *)output_ptr, output);
+}
diff --git a/lib/librte_distributor/rte_distributor_private.h b/lib/librte_distributor/rte_distributor_private.h
new file mode 100644
index 00000000..250b23e1
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_private.h
@@ -0,0 +1,202 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DIST_PRIV_H_
+#define _RTE_DIST_PRIV_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define NO_FLAGS 0
+#define RTE_DISTRIB_PREFIX "DT_"
+
+/*
+ * We will use the bottom four bits of pointer for flags, shifting out
+ * the top four bits to make room (since a 64-bit pointer actually only uses
+ * 48 bits). An arithmetic-right-shift will then appropriately restore the
+ * original pointer value with proper sign extension into the top bits.
+ */
+#define RTE_DISTRIB_FLAG_BITS 4
+#define RTE_DISTRIB_FLAGS_MASK (0x0F)
+#define RTE_DISTRIB_NO_BUF 0 /**< empty flags: no buffer requested */
+#define RTE_DISTRIB_GET_BUF (1) /**< worker requests a buffer, returns old */
+#define RTE_DISTRIB_RETURN_BUF (2) /**< worker returns a buffer, no request */
+#define RTE_DISTRIB_VALID_BUF (4) /**< set if bufptr contains ptr */
+
+#define RTE_DISTRIB_BACKLOG_SIZE 8
+#define RTE_DISTRIB_BACKLOG_MASK (RTE_DISTRIB_BACKLOG_SIZE - 1)
+
+#define RTE_DISTRIB_MAX_RETURNS 128
+#define RTE_DISTRIB_RETURNS_MASK (RTE_DISTRIB_MAX_RETURNS - 1)
+
+/**
+ * Maximum number of workers allowed.
+ * Be aware of increasing the limit, becaus it is limited by how we track
+ * in-flight tags. See in_flight_bitmask and rte_distributor_process
+ */
+#define RTE_DISTRIB_MAX_WORKERS 64
+
+#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+
+/**
+ * Buffer structure used to pass the pointer data between cores. This is cache
+ * line aligned, but to improve performance and prevent adjacent cache-line
+ * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
+ * the next cache line to worker 0, we pad this out to three cache lines.
+ * Only 64-bits of the memory is actually used though.
+ */
+union rte_distributor_buffer_v20 {
+ volatile int64_t bufptr64;
+ char pad[RTE_CACHE_LINE_SIZE*3];
+} __rte_cache_aligned;
+
+/*
+ * Transfer up to 8 mbufs at a time to/from workers, and
+ * flow matching algorithm optimised for 8 flow IDs at a time
+ */
+#define RTE_DIST_BURST_SIZE 8
+
+struct rte_distributor_backlog {
+ unsigned int start;
+ unsigned int count;
+ int64_t pkts[RTE_DIST_BURST_SIZE] __rte_cache_aligned;
+ uint16_t *tags; /* will point to second cacheline of inflights */
+} __rte_cache_aligned;
+
+
+struct rte_distributor_returned_pkts {
+ unsigned int start;
+ unsigned int count;
+ struct rte_mbuf *mbufs[RTE_DISTRIB_MAX_RETURNS];
+};
+
+struct rte_distributor_v20 {
+ TAILQ_ENTRY(rte_distributor_v20) next; /**< Next in list. */
+
+ char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
+ unsigned int num_workers; /**< Number of workers polling */
+
+ uint32_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS];
+ /**< Tracks the tag being processed per core */
+ uint64_t in_flight_bitmask;
+ /**< on/off bits for in-flight tags.
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64 then
+ * the bitmask has to expand.
+ */
+
+ struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS];
+
+ union rte_distributor_buffer_v20 bufs[RTE_DISTRIB_MAX_WORKERS];
+
+ struct rte_distributor_returned_pkts returns;
+};
+
+/* All different signature compare functions */
+enum rte_distributor_match_function {
+ RTE_DIST_MATCH_SCALAR = 0,
+ RTE_DIST_MATCH_VECTOR,
+ RTE_DIST_NUM_MATCH_FNS
+};
+
+/**
+ * Buffer structure used to pass the pointer data between cores. This is cache
+ * line aligned, but to improve performance and prevent adjacent cache-line
+ * prefetches of buffers for other workers, e.g. when worker 1's buffer is on
+ * the next cache line to worker 0, we pad this out to two cache lines.
+ * We can pass up to 8 mbufs at a time in one cacheline.
+ * There is a separate cacheline for returns in the burst API.
+ */
+struct rte_distributor_buffer {
+ volatile int64_t bufptr64[RTE_DIST_BURST_SIZE]
+ __rte_cache_aligned; /* <= outgoing to worker */
+
+ int64_t pad1 __rte_cache_aligned; /* <= one cache line */
+
+ volatile int64_t retptr64[RTE_DIST_BURST_SIZE]
+ __rte_cache_aligned; /* <= incoming from worker */
+
+ int64_t pad2 __rte_cache_aligned; /* <= one cache line */
+
+ int count __rte_cache_aligned; /* <= number of current mbufs */
+};
+
+struct rte_distributor {
+ TAILQ_ENTRY(rte_distributor) next; /**< Next in list. */
+
+ char name[RTE_DISTRIBUTOR_NAMESIZE]; /**< Name of the ring. */
+ unsigned int num_workers; /**< Number of workers polling */
+ unsigned int alg_type; /**< Number of alg types */
+
+ /**>
+ * First cache line in the this array are the tags inflight
+ * on the worker core. Second cache line are the backlog
+ * that are going to go to the worker core.
+ */
+ uint16_t in_flight_tags[RTE_DISTRIB_MAX_WORKERS][RTE_DIST_BURST_SIZE*2]
+ __rte_cache_aligned;
+
+ struct rte_distributor_backlog backlog[RTE_DISTRIB_MAX_WORKERS]
+ __rte_cache_aligned;
+
+ struct rte_distributor_buffer bufs[RTE_DISTRIB_MAX_WORKERS];
+
+ struct rte_distributor_returned_pkts returns;
+
+ enum rte_distributor_match_function dist_match_fn;
+
+ struct rte_distributor_v20 *d_v20;
+};
+
+void
+find_match_scalar(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr);
+
+void
+find_match_vec(struct rte_distributor *d,
+ uint16_t *data_ptr,
+ uint16_t *output_ptr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_distributor/rte_distributor_v1705.h b/lib/librte_distributor/rte_distributor_v1705.h
new file mode 100644
index 00000000..81b26915
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_v1705.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DISTRIB_V1705_H_
+#define _RTE_DISTRIB_V1705_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct rte_distributor *
+rte_distributor_create_v1705(const char *name, unsigned int socket_id,
+ unsigned int num_workers,
+ unsigned int alg_type);
+
+int
+rte_distributor_process_v1705(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int num_mbufs);
+
+int
+rte_distributor_returned_pkts_v1705(struct rte_distributor *d,
+ struct rte_mbuf **mbufs, unsigned int max_mbufs);
+
+int
+rte_distributor_flush_v1705(struct rte_distributor *d);
+
+void
+rte_distributor_clear_returns_v1705(struct rte_distributor *d);
+
+int
+rte_distributor_get_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **pkts,
+ struct rte_mbuf **oldpkt, unsigned int retcount);
+
+int
+rte_distributor_return_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt, int num);
+
+void
+rte_distributor_request_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **oldpkt,
+ unsigned int count);
+
+int
+rte_distributor_poll_pkt_v1705(struct rte_distributor *d,
+ unsigned int worker_id, struct rte_mbuf **mbufs);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_distributor/rte_distributor_v20.c b/lib/librte_distributor/rte_distributor_v20.c
new file mode 100644
index 00000000..bb6c5d70
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_v20.c
@@ -0,0 +1,427 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <sys/queue.h>
+#include <string.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_errno.h>
+#include <rte_compat.h>
+#include <rte_string_fns.h>
+#include <rte_eal_memconfig.h>
+#include "rte_distributor_v20.h"
+#include "rte_distributor_private.h"
+
+TAILQ_HEAD(rte_distributor_list, rte_distributor_v20);
+
+static struct rte_tailq_elem rte_distributor_tailq = {
+ .name = "RTE_DISTRIBUTOR",
+};
+EAL_REGISTER_TAILQ(rte_distributor_tailq)
+
+/**** APIs called by workers ****/
+
+void
+rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
+ int64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_GET_BUF;
+ while (unlikely(buf->bufptr64 & RTE_DISTRIB_FLAGS_MASK))
+ rte_pause();
+ buf->bufptr64 = req;
+}
+VERSION_SYMBOL(rte_distributor_request_pkt, _v20, 2.0);
+
+struct rte_mbuf *
+rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned worker_id)
+{
+ union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
+ if (buf->bufptr64 & RTE_DISTRIB_GET_BUF)
+ return NULL;
+
+ /* since bufptr64 is signed, this should be an arithmetic shift */
+ int64_t ret = buf->bufptr64 >> RTE_DISTRIB_FLAG_BITS;
+ return (struct rte_mbuf *)((uintptr_t)ret);
+}
+VERSION_SYMBOL(rte_distributor_poll_pkt, _v20, 2.0);
+
+struct rte_mbuf *
+rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ struct rte_mbuf *ret;
+ rte_distributor_request_pkt_v20(d, worker_id, oldpkt);
+ while ((ret = rte_distributor_poll_pkt_v20(d, worker_id)) == NULL)
+ rte_pause();
+ return ret;
+}
+VERSION_SYMBOL(rte_distributor_get_pkt, _v20, 2.0);
+
+int
+rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned worker_id, struct rte_mbuf *oldpkt)
+{
+ union rte_distributor_buffer_v20 *buf = &d->bufs[worker_id];
+ uint64_t req = (((int64_t)(uintptr_t)oldpkt) << RTE_DISTRIB_FLAG_BITS)
+ | RTE_DISTRIB_RETURN_BUF;
+ buf->bufptr64 = req;
+ return 0;
+}
+VERSION_SYMBOL(rte_distributor_return_pkt, _v20, 2.0);
+
+/**** APIs called on distributor core ***/
+
+/* as name suggests, adds a packet to the backlog for a particular worker */
+static int
+add_to_backlog(struct rte_distributor_backlog *bl, int64_t item)
+{
+ if (bl->count == RTE_DISTRIB_BACKLOG_SIZE)
+ return -1;
+
+ bl->pkts[(bl->start + bl->count++) & (RTE_DISTRIB_BACKLOG_MASK)]
+ = item;
+ return 0;
+}
+
+/* takes the next packet for a worker off the backlog */
+static int64_t
+backlog_pop(struct rte_distributor_backlog *bl)
+{
+ bl->count--;
+ return bl->pkts[bl->start++ & RTE_DISTRIB_BACKLOG_MASK];
+}
+
+/* stores a packet returned from a worker inside the returns array */
+static inline void
+store_return(uintptr_t oldbuf, struct rte_distributor_v20 *d,
+ unsigned *ret_start, unsigned *ret_count)
+{
+ /* store returns in a circular buffer - code is branch-free */
+ d->returns.mbufs[(*ret_start + *ret_count) & RTE_DISTRIB_RETURNS_MASK]
+ = (void *)oldbuf;
+ *ret_start += (*ret_count == RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+ *ret_count += (*ret_count != RTE_DISTRIB_RETURNS_MASK) & !!(oldbuf);
+}
+
+static inline void
+handle_worker_shutdown(struct rte_distributor_v20 *d, unsigned int wkr)
+{
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ d->bufs[wkr].bufptr64 = 0;
+ if (unlikely(d->backlog[wkr].count != 0)) {
+ /* On return of a packet, we need to move the
+ * queued packets for this core elsewhere.
+ * Easiest solution is to set things up for
+ * a recursive call. That will cause those
+ * packets to be queued up for the next free
+ * core, i.e. it will return as soon as a
+ * core becomes free to accept the first
+ * packet, as subsequent ones will be added to
+ * the backlog for that core.
+ */
+ struct rte_mbuf *pkts[RTE_DISTRIB_BACKLOG_SIZE];
+ unsigned i;
+ struct rte_distributor_backlog *bl = &d->backlog[wkr];
+
+ for (i = 0; i < bl->count; i++) {
+ unsigned idx = (bl->start + i) &
+ RTE_DISTRIB_BACKLOG_MASK;
+ pkts[i] = (void *)((uintptr_t)(bl->pkts[idx] >>
+ RTE_DISTRIB_FLAG_BITS));
+ }
+ /* recursive call.
+ * Note that the tags were set before first level call
+ * to rte_distributor_process.
+ */
+ rte_distributor_process_v20(d, pkts, i);
+ bl->count = bl->start = 0;
+ }
+}
+
+/* this function is called when process() fn is called without any new
+ * packets. It goes through all the workers and clears any returned packets
+ * to do a partial flush.
+ */
+static int
+process_returns(struct rte_distributor_v20 *d)
+{
+ unsigned wkr;
+ unsigned flushed = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ for (wkr = 0; wkr < d->num_workers; wkr++) {
+
+ const int64_t data = d->bufs[wkr].bufptr64;
+ uintptr_t oldbuf = 0;
+
+ if (data & RTE_DISTRIB_GET_BUF) {
+ flushed++;
+ if (d->backlog[wkr].count)
+ d->bufs[wkr].bufptr64 =
+ backlog_pop(&d->backlog[wkr]);
+ else {
+ d->bufs[wkr].bufptr64 = RTE_DISTRIB_GET_BUF;
+ d->in_flight_tags[wkr] = 0;
+ d->in_flight_bitmask &= ~(1UL << wkr);
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ store_return(oldbuf, d, &ret_start, &ret_count);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+
+ return flushed;
+}
+
+/* process a set of packets to distribute them to workers */
+int
+rte_distributor_process_v20(struct rte_distributor_v20 *d,
+ struct rte_mbuf **mbufs, unsigned num_mbufs)
+{
+ unsigned next_idx = 0;
+ unsigned wkr = 0;
+ struct rte_mbuf *next_mb = NULL;
+ int64_t next_value = 0;
+ uint32_t new_tag = 0;
+ unsigned ret_start = d->returns.start,
+ ret_count = d->returns.count;
+
+ if (unlikely(num_mbufs == 0))
+ return process_returns(d);
+
+ while (next_idx < num_mbufs || next_mb != NULL) {
+
+ int64_t data = d->bufs[wkr].bufptr64;
+ uintptr_t oldbuf = 0;
+
+ if (!next_mb) {
+ next_mb = mbufs[next_idx++];
+ next_value = (((int64_t)(uintptr_t)next_mb)
+ << RTE_DISTRIB_FLAG_BITS);
+ /*
+ * User is advocated to set tag vaue for each
+ * mbuf before calling rte_distributor_process.
+ * User defined tags are used to identify flows,
+ * or sessions.
+ */
+ new_tag = next_mb->hash.usr;
+
+ /*
+ * Note that if RTE_DISTRIB_MAX_WORKERS is larger than 64
+ * then the size of match has to be expanded.
+ */
+ uint64_t match = 0;
+ unsigned i;
+ /*
+ * to scan for a match use "xor" and "not" to get a 0/1
+ * value, then use shifting to merge to single "match"
+ * variable, where a one-bit indicates a match for the
+ * worker given by the bit-position
+ */
+ for (i = 0; i < d->num_workers; i++)
+ match |= (!(d->in_flight_tags[i] ^ new_tag)
+ << i);
+
+ /* Only turned-on bits are considered as match */
+ match &= d->in_flight_bitmask;
+
+ if (match) {
+ next_mb = NULL;
+ unsigned worker = __builtin_ctzl(match);
+ if (add_to_backlog(&d->backlog[worker],
+ next_value) < 0)
+ next_idx--;
+ }
+ }
+
+ if ((data & RTE_DISTRIB_GET_BUF) &&
+ (d->backlog[wkr].count || next_mb)) {
+
+ if (d->backlog[wkr].count)
+ d->bufs[wkr].bufptr64 =
+ backlog_pop(&d->backlog[wkr]);
+
+ else {
+ d->bufs[wkr].bufptr64 = next_value;
+ d->in_flight_tags[wkr] = new_tag;
+ d->in_flight_bitmask |= (1UL << wkr);
+ next_mb = NULL;
+ }
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ } else if (data & RTE_DISTRIB_RETURN_BUF) {
+ handle_worker_shutdown(d, wkr);
+ oldbuf = data >> RTE_DISTRIB_FLAG_BITS;
+ }
+
+ /* store returns in a circular buffer */
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ if (++wkr == d->num_workers)
+ wkr = 0;
+ }
+ /* to finish, check all workers for backlog and schedule work for them
+ * if they are ready */
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ if (d->backlog[wkr].count &&
+ (d->bufs[wkr].bufptr64 & RTE_DISTRIB_GET_BUF)) {
+
+ int64_t oldbuf = d->bufs[wkr].bufptr64 >>
+ RTE_DISTRIB_FLAG_BITS;
+ store_return(oldbuf, d, &ret_start, &ret_count);
+
+ d->bufs[wkr].bufptr64 = backlog_pop(&d->backlog[wkr]);
+ }
+
+ d->returns.start = ret_start;
+ d->returns.count = ret_count;
+ return num_mbufs;
+}
+VERSION_SYMBOL(rte_distributor_process, _v20, 2.0);
+
+/* return to the caller, packets returned from workers */
+int
+rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
+ struct rte_mbuf **mbufs, unsigned max_mbufs)
+{
+ struct rte_distributor_returned_pkts *returns = &d->returns;
+ unsigned retval = (max_mbufs < returns->count) ?
+ max_mbufs : returns->count;
+ unsigned i;
+
+ for (i = 0; i < retval; i++) {
+ unsigned idx = (returns->start + i) & RTE_DISTRIB_RETURNS_MASK;
+ mbufs[i] = returns->mbufs[idx];
+ }
+ returns->start += i;
+ returns->count -= i;
+
+ return retval;
+}
+VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
+
+/* return the number of packets in-flight in a distributor, i.e. packets
+ * being workered on or queued up in a backlog. */
+static inline unsigned
+total_outstanding(const struct rte_distributor_v20 *d)
+{
+ unsigned wkr, total_outstanding;
+
+ total_outstanding = __builtin_popcountl(d->in_flight_bitmask);
+
+ for (wkr = 0; wkr < d->num_workers; wkr++)
+ total_outstanding += d->backlog[wkr].count;
+
+ return total_outstanding;
+}
+
+/* flush the distributor, so that there are no outstanding packets in flight or
+ * queued up. */
+int
+rte_distributor_flush_v20(struct rte_distributor_v20 *d)
+{
+ const unsigned flushed = total_outstanding(d);
+
+ while (total_outstanding(d) > 0)
+ rte_distributor_process_v20(d, NULL, 0);
+
+ return flushed;
+}
+VERSION_SYMBOL(rte_distributor_flush, _v20, 2.0);
+
+/* clears the internal returns array in the distributor */
+void
+rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d)
+{
+ d->returns.start = d->returns.count = 0;
+#ifndef __OPTIMIZE__
+ memset(d->returns.mbufs, 0, sizeof(d->returns.mbufs));
+#endif
+}
+VERSION_SYMBOL(rte_distributor_clear_returns, _v20, 2.0);
+
+/* creates a distributor instance */
+struct rte_distributor_v20 *
+rte_distributor_create_v20(const char *name,
+ unsigned socket_id,
+ unsigned num_workers)
+{
+ struct rte_distributor_v20 *d;
+ struct rte_distributor_list *distributor_list;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(*d) & RTE_CACHE_LINE_MASK) != 0);
+ RTE_BUILD_BUG_ON((RTE_DISTRIB_MAX_WORKERS & 7) != 0);
+ RTE_BUILD_BUG_ON(RTE_DISTRIB_MAX_WORKERS >
+ sizeof(d->in_flight_bitmask) * CHAR_BIT);
+
+ if (name == NULL || num_workers >= RTE_DISTRIB_MAX_WORKERS) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ snprintf(mz_name, sizeof(mz_name), RTE_DISTRIB_PREFIX"%s", name);
+ mz = rte_memzone_reserve(mz_name, sizeof(*d), socket_id, NO_FLAGS);
+ if (mz == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ d = mz->addr;
+ snprintf(d->name, sizeof(d->name), "%s", name);
+ d->num_workers = num_workers;
+
+ distributor_list = RTE_TAILQ_CAST(rte_distributor_tailq.head,
+ rte_distributor_list);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ TAILQ_INSERT_TAIL(distributor_list, d, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return d;
+}
+VERSION_SYMBOL(rte_distributor_create, _v20, 2.0);
diff --git a/lib/librte_distributor/rte_distributor_v20.h b/lib/librte_distributor/rte_distributor_v20.h
new file mode 100644
index 00000000..f02e6aac
--- /dev/null
+++ b/lib/librte_distributor/rte_distributor_v20.h
@@ -0,0 +1,247 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DISTRIB_V20_H_
+#define _RTE_DISTRIB_V20_H_
+
+/**
+ * @file
+ * RTE distributor
+ *
+ * The distributor is a component which is designed to pass packets
+ * one-at-a-time to workers, with dynamic load balancing.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DISTRIBUTOR_NAMESIZE 32 /**< Length of name for instance */
+
+struct rte_distributor_v20;
+struct rte_mbuf;
+
+/**
+ * Function to create a new distributor instance
+ *
+ * Reserves the memory needed for the distributor operation and
+ * initializes the distributor to work with the configured number of workers.
+ *
+ * @param name
+ * The name to be given to the distributor instance.
+ * @param socket_id
+ * The NUMA node on which the memory is to be allocated
+ * @param num_workers
+ * The maximum number of workers that will request packets from this
+ * distributor
+ * @return
+ * The newly created distributor instance
+ */
+struct rte_distributor_v20 *
+rte_distributor_create_v20(const char *name, unsigned int socket_id,
+ unsigned int num_workers);
+
+/* *** APIS to be called on the distributor lcore *** */
+/*
+ * The following APIs are the public APIs which are designed for use on a
+ * single lcore which acts as the distributor lcore for a given distributor
+ * instance. These functions cannot be called on multiple cores simultaneously
+ * without using locking to protect access to the internals of the distributor.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * Process a set of packets by distributing them among workers that request
+ * packets. The distributor will ensure that no two packets that have the
+ * same flow id, or tag, in the mbuf will be processed at the same time.
+ *
+ * The user is advocated to set tag for each mbuf before calling this function.
+ * If user doesn't set the tag, the tag value can be various values depending on
+ * driver implementation and configuration.
+ *
+ * This is not multi-thread safe and should only be called on a single lcore.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs to be distributed
+ * @param num_mbufs
+ * The number of mbufs in the mbufs array
+ * @return
+ * The number of mbufs processed.
+ */
+int
+rte_distributor_process_v20(struct rte_distributor_v20 *d,
+ struct rte_mbuf **mbufs, unsigned int num_mbufs);
+
+/**
+ * Get a set of mbufs that have been returned to the distributor by workers
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param mbufs
+ * The mbufs pointer array to be filled in
+ * @param max_mbufs
+ * The size of the mbufs array
+ * @return
+ * The number of mbufs returned in the mbufs array.
+ */
+int
+rte_distributor_returned_pkts_v20(struct rte_distributor_v20 *d,
+ struct rte_mbuf **mbufs, unsigned int max_mbufs);
+
+/**
+ * Flush the distributor component, so that there are no in-flight or
+ * backlogged packets awaiting processing
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ * @return
+ * The number of queued/in-flight packets that were completed by this call.
+ */
+int
+rte_distributor_flush_v20(struct rte_distributor_v20 *d);
+
+/**
+ * Clears the array of returned packets used as the source for the
+ * rte_distributor_returned_pkts() API call.
+ *
+ * This should only be called on the same lcore as rte_distributor_process()
+ *
+ * @param d
+ * The distributor instance to be used
+ */
+void
+rte_distributor_clear_returns_v20(struct rte_distributor_v20 *d);
+
+/* *** APIS to be called on the worker lcores *** */
+/*
+ * The following APIs are the public APIs which are designed for use on
+ * multiple lcores which act as workers for a distributor. Each lcore should use
+ * a unique worker id when requesting packets.
+ *
+ * NOTE: a given lcore cannot act as both a distributor lcore and a worker lcore
+ * for the same distributor instance, otherwise deadlock will result.
+ */
+
+/**
+ * API called by a worker to get a new packet to process. Any previous packet
+ * given to the worker is assumed to have completed processing, and may be
+ * optionally returned to the distributor via the oldpkt parameter.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ *
+ * @return
+ * A new packet to be processed by the worker thread.
+ */
+struct rte_mbuf *
+rte_distributor_get_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned int worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to return a completed packet without requesting a
+ * new packet, for example, because a worker thread is shutting down
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param mbuf
+ * The previous packet being processed by the worker
+ */
+int
+rte_distributor_return_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned int worker_id, struct rte_mbuf *mbuf);
+
+/**
+ * API called by a worker to request a new packet to process.
+ * Any previous packet given to the worker is assumed to have completed
+ * processing, and may be optionally returned to the distributor via
+ * the oldpkt parameter.
+ * Unlike rte_distributor_get_pkt(), this function does not wait for a new
+ * packet to be provided by the distributor.
+ *
+ * NOTE: after calling this function, rte_distributor_poll_pkt() should
+ * be used to poll for the packet requested. The rte_distributor_get_pkt()
+ * API should *not* be used to try and retrieve the new packet.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ * @param oldpkt
+ * The previous packet, if any, being processed by the worker
+ */
+void
+rte_distributor_request_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned int worker_id, struct rte_mbuf *oldpkt);
+
+/**
+ * API called by a worker to check for a new packet that was previously
+ * requested by a call to rte_distributor_request_pkt(). It does not wait
+ * for the new packet to be available, but returns NULL if the request has
+ * not yet been fulfilled by the distributor.
+ *
+ * @param d
+ * The distributor instance to be used
+ * @param worker_id
+ * The worker instance number to use - must be less that num_workers passed
+ * at distributor creation time.
+ *
+ * @return
+ * A new packet to be processed by the worker thread, or NULL if no
+ * packet is yet available.
+ */
+struct rte_mbuf *
+rte_distributor_poll_pkt_v20(struct rte_distributor_v20 *d,
+ unsigned int worker_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_distributor/rte_distributor_version.map b/lib/librte_distributor/rte_distributor_version.map
index 73fdc437..3a285b39 100644
--- a/lib/librte_distributor/rte_distributor_version.map
+++ b/lib/librte_distributor/rte_distributor_version.map
@@ -13,3 +13,17 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.05 {
+ global:
+
+ rte_distributor_clear_returns;
+ rte_distributor_create;
+ rte_distributor_flush;
+ rte_distributor_get_pkt;
+ rte_distributor_poll_pkt;
+ rte_distributor_process;
+ rte_distributor_request_pkt;
+ rte_distributor_return_pkt;
+ rte_distributor_returned_pkts;
+} DPDK_2.0;
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
index cf11a099..5690bb49 100644
--- a/lib/librte_eal/Makefile
+++ b/lib/librte_eal/Makefile
@@ -33,6 +33,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-y += common
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linuxapp
+DEPDIRS-linuxapp := common
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += bsdapp
+DEPDIRS-bsdapp := common
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index a15b762b..a0f99502 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -48,7 +48,7 @@ LDLIBS += -lgcc_s
EXPORT_MAP := rte_eal_version.map
-LIBABIVER := 3
+LIBABIVER := 4
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
@@ -78,6 +78,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_cpuflags.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_string_fns.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_hexdump.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_bus.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_dev.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_options.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_thread.c
@@ -110,7 +111,4 @@ INC := rte_interrupts.h
SYMLINK-$(CONFIG_RTE_EXEC_ENV_BSDAPP)-include/exec-env := \
$(addprefix include/exec-env/,$(INC))
-DEPDIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += lib/librte_eal/common
-DEPDIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += lib/librte_eal/common/arch/$(ARCH_DIR)
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 35e3117a..05f0c1f9 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -56,6 +56,7 @@
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_log.h>
@@ -64,6 +65,7 @@
#include <rte_string_fns.h>
#include <rte_cpuflags.h>
#include <rte_interrupts.h>
+#include <rte_bus.h>
#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
@@ -193,7 +195,7 @@ rte_eal_config_create(void)
rte_panic("Cannot mmap memory for rte_config\n");
}
memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
- rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+ rte_config.mem_config = rte_mem_cfg_addr;
}
/* attach to an existing shared memory config */
@@ -218,7 +220,7 @@ rte_eal_config_attach(void)
if (rte_mem_cfg_addr == MAP_FAILED)
rte_panic("Cannot mmap memory for rte_config\n");
- rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+ rte_config.mem_config = rte_mem_cfg_addr;
}
/* Detect if we are a primary or a secondary process */
@@ -321,8 +323,6 @@ eal_log_level_parse(int argc, char **argv)
optind = 1;
optreset = 1;
- eal_reset_internal_config(&internal_config);
-
while ((opt = getopt_long(argc, argvopt, eal_short_options,
eal_long_options, &option_index)) != EOF) {
@@ -486,6 +486,12 @@ rte_eal_iopl_init(void)
return 0;
}
+static void rte_eal_init_alert(const char *msg)
+{
+ fprintf(stderr, "EAL: FATAL: %s\n", msg);
+ RTE_LOG(ERR, EAL, "%s\n", msg);
+}
+
/* Launch threads, called at application init(). */
int
rte_eal_init(int argc, char **argv)
@@ -497,29 +503,47 @@ rte_eal_init(int argc, char **argv)
char thread_name[RTE_MAX_THREAD_NAME_LEN];
/* checks if the machine is adequate */
- rte_cpu_check_supported();
+ if (!rte_cpu_is_supported()) {
+ rte_eal_init_alert("unsupported cpu type.");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
- if (!rte_atomic32_test_and_set(&run_once))
+ if (!rte_atomic32_test_and_set(&run_once)) {
+ rte_eal_init_alert("already called initialization.");
+ rte_errno = EALREADY;
return -1;
+ }
thread_id = pthread_self();
- eal_log_level_parse(argc, argv);
+ eal_reset_internal_config(&internal_config);
/* set log level as early as possible */
- rte_set_log_level(internal_config.log_level);
+ eal_log_level_parse(argc, argv);
- if (rte_eal_cpu_init() < 0)
- rte_panic("Cannot detect lcores\n");
+ if (rte_eal_cpu_init() < 0) {
+ rte_eal_init_alert("Cannot detect lcores.");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
fctret = eal_parse_args(argc, argv);
- if (fctret < 0)
- exit(1);
+ if (fctret < 0) {
+ rte_eal_init_alert("Invalid 'command line' arguments.");
+ rte_errno = EINVAL;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
- eal_hugepage_info_init() < 0)
- rte_panic("Cannot get hugepage information\n");
+ eal_hugepage_info_init() < 0) {
+ rte_eal_init_alert("Cannot get hugepage information.");
+ rte_errno = EACCES;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
if (internal_config.no_hugetlbfs)
@@ -543,31 +567,45 @@ rte_eal_init(int argc, char **argv)
rte_config_init();
- if (rte_eal_memory_init() < 0)
- rte_panic("Cannot init memory\n");
-
- if (rte_eal_memzone_init() < 0)
- rte_panic("Cannot init memzone\n");
+ if (rte_eal_memory_init() < 0) {
+ rte_eal_init_alert("Cannot init memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
- if (rte_eal_tailqs_init() < 0)
- rte_panic("Cannot init tail queues for objects\n");
+ if (rte_eal_memzone_init() < 0) {
+ rte_eal_init_alert("Cannot init memzone\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
- if (rte_eal_alarm_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
+ if (rte_eal_tailqs_init() < 0) {
+ rte_eal_init_alert("Cannot init tail queues for objects\n");
+ rte_errno = EFAULT;
+ return -1;
+ }
- if (rte_eal_intr_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
+ if (rte_eal_alarm_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ /* rte_eal_alarm_init sets rte_errno on failure. */
+ return -1;
+ }
- if (rte_eal_timer_init() < 0)
- rte_panic("Cannot init HPET or TSC timers\n");
+ if (rte_eal_intr_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ return -1;
+ }
- if (rte_eal_pci_init() < 0)
- rte_panic("Cannot init PCI\n");
+ if (rte_eal_timer_init() < 0) {
+ rte_eal_init_alert("Cannot init HPET or TSC timers\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
eal_check_mem_on_local_socket();
if (eal_plugins_init() < 0)
- rte_panic("Cannot init plugins\n");
+ rte_eal_init_alert("Cannot init plugins\n");
eal_thread_init_master(rte_config.master_lcore);
@@ -577,8 +615,11 @@ rte_eal_init(int argc, char **argv)
rte_config.master_lcore, thread_id, cpuset,
ret == 0 ? "" : "...");
- if (rte_eal_dev_init() < 0)
- rte_panic("Cannot init pmd devices\n");
+ if (rte_bus_scan()) {
+ rte_eal_init_alert("Cannot scan the buses for devices\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
RTE_LCORE_FOREACH_SLAVE(i) {
@@ -612,9 +653,12 @@ rte_eal_init(int argc, char **argv)
rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
rte_eal_mp_wait_lcore();
- /* Probe & Initialize PCI devices */
- if (rte_eal_pci_probe())
- rte_panic("Cannot probe PCI\n");
+ /* Probe all the buses and devices/drivers on them */
+ if (rte_bus_probe()) {
+ rte_eal_init_alert("Cannot probe devices\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
rte_eal_mcfg_complete();
diff --git a/lib/librte_eal/bsdapp/eal/eal_debug.c b/lib/librte_eal/bsdapp/eal/eal_debug.c
index 5fbc17c5..e1c75548 100644
--- a/lib/librte_eal/bsdapp/eal/eal_debug.c
+++ b/lib/librte_eal/bsdapp/eal/eal_debug.c
@@ -31,7 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifdef RTE_BACKTRACE
#include <execinfo.h>
+#endif
#include <stdarg.h>
#include <signal.h>
#include <stdlib.h>
@@ -47,6 +49,7 @@
/* dump the stack of the calling core */
void rte_dump_stack(void)
{
+#ifdef RTE_BACKTRACE
void *func[BACKTRACE_SIZE];
char **symb = NULL;
int size;
@@ -64,6 +67,7 @@ void rte_dump_stack(void)
}
free(symb);
+#endif /* RTE_BACKTRACE */
}
/* not implemented in this environment */
diff --git a/lib/librte_eal/bsdapp/eal/eal_interrupts.c b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
index 836e4836..ea2afff4 100644
--- a/lib/librte_eal/bsdapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
@@ -36,29 +36,37 @@
#include "eal_private.h"
int
-rte_intr_callback_register(struct rte_intr_handle *intr_handle __rte_unused,
- rte_intr_callback_fn cb __rte_unused,
- void *cb_arg __rte_unused)
+rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb,
+ void *cb_arg)
{
+ RTE_SET_USED(intr_handle);
+ RTE_SET_USED(cb);
+ RTE_SET_USED(cb_arg);
+
return -ENOTSUP;
}
int
-rte_intr_callback_unregister(struct rte_intr_handle *intr_handle __rte_unused,
- rte_intr_callback_fn cb_fn __rte_unused,
- void *cb_arg __rte_unused)
+rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
+ rte_intr_callback_fn cb,
+ void *cb_arg)
{
+ RTE_SET_USED(intr_handle);
+ RTE_SET_USED(cb);
+ RTE_SET_USED(cb_arg);
+
return -ENOTSUP;
}
int
-rte_intr_enable(struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_enable(const struct rte_intr_handle *intr_handle __rte_unused)
{
return -ENOTSUP;
}
int
-rte_intr_disable(struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_disable(const struct rte_intr_handle *intr_handle __rte_unused)
{
return -ENOTSUP;
}
diff --git a/lib/librte_eal/bsdapp/eal/eal_lcore.c b/lib/librte_eal/bsdapp/eal/eal_lcore.c
index b8bfafde..bc584dd5 100644
--- a/lib/librte_eal/bsdapp/eal/eal_lcore.c
+++ b/lib/librte_eal/bsdapp/eal/eal_lcore.c
@@ -53,12 +53,14 @@ eal_cpu_core_id(__rte_unused unsigned lcore_id)
static int
eal_get_ncpus(void)
{
+ static int ncpu = -1;
int mib[2] = {CTL_HW, HW_NCPU};
- int ncpu;
size_t len = sizeof(ncpu);
- sysctl(mib, 2, &ncpu, &len, NULL, 0);
- RTE_LOG(INFO, EAL, "Sysctl reports %d cpus\n", ncpu);
+ if (ncpu < 0) {
+ sysctl(mib, 2, &ncpu, &len, NULL, 0);
+ RTE_LOG(INFO, EAL, "Sysctl reports %d cpus\n", ncpu);
+ }
return ncpu;
}
diff --git a/lib/librte_eal/bsdapp/eal/eal_pci.c b/lib/librte_eal/bsdapp/eal/eal_pci.c
index 8b3ed881..e321461d 100644
--- a/lib/librte_eal/bsdapp/eal/eal_pci.c
+++ b/lib/librte_eal/bsdapp/eal/eal_pci.c
@@ -87,18 +87,11 @@
* enabling bus master.
*/
-/* unbind kernel driver for this device */
-int
-pci_unbind_kernel_driver(struct rte_pci_device *dev __rte_unused)
-{
- RTE_LOG(ERR, EAL, "RTE_PCI_DRV_FORCE_UNBIND flag is not implemented "
- "for BSD\n");
- return -ENOTSUP;
-}
+extern struct rte_pci_bus rte_pci_bus;
/* Map pci device */
int
-rte_eal_pci_map_device(struct rte_pci_device *dev)
+rte_pci_map_device(struct rte_pci_device *dev)
{
int ret = -1;
@@ -120,7 +113,7 @@ rte_eal_pci_map_device(struct rte_pci_device *dev)
/* Unmap pci device */
void
-rte_eal_pci_unmap_device(struct rte_pci_device *dev)
+rte_pci_unmap_device(struct rte_pci_device *dev)
{
/* try unmapping the NIC resources */
switch (dev->kdrv) {
@@ -289,6 +282,9 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
/* FreeBSD has no NUMA support (yet) */
dev->device.numa_node = 0;
+ rte_pci_device_name(&dev->addr, dev->name, sizeof(dev->name));
+ dev->device.name = dev->name;
+
/* FreeBSD has only one pass through driver */
dev->kdrv = RTE_KDRV_NIC_UIO;
@@ -322,20 +318,19 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
}
/* device is valid, add in list (sorted) */
- if (TAILQ_EMPTY(&pci_device_list)) {
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
}
else {
struct rte_pci_device *dev2 = NULL;
int ret;
- TAILQ_FOREACH(dev2, &pci_device_list, next) {
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
if (ret > 0)
continue;
else if (ret < 0) {
- TAILQ_INSERT_BEFORE(dev2, dev, next);
- return 0;
+ rte_pci_insert_device(dev2, dev);
} else { /* already registered */
dev2->kdrv = dev->kdrv;
dev2->max_vfs = dev->max_vfs;
@@ -343,10 +338,10 @@ pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
dev->mem_resource,
sizeof(dev->mem_resource));
free(dev);
- return 0;
}
+ return 0;
}
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ rte_pci_add_device(dev);
}
return 0;
@@ -361,7 +356,7 @@ skipdev:
* list. Call pci_scan_one() for each pci entry found.
*/
int
-rte_eal_pci_scan(void)
+rte_pci_scan(void)
{
int fd;
unsigned dev_count = 0;
@@ -374,6 +369,10 @@ rte_eal_pci_scan(void)
.matches = &matches[0],
};
+ /* for debug purposes, PCI can be disabled */
+ if (internal_config.no_pci)
+ return 0;
+
fd = open("/dev/pci", O_RDONLY);
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
@@ -456,10 +455,11 @@ error:
}
/* Read PCI config space. */
-int rte_eal_pci_read_config(const struct rte_pci_device *dev,
- void *buf, size_t len, off_t offset)
+int rte_pci_read_config(const struct rte_pci_device *dev,
+ void *buf, size_t len, off_t offset)
{
int fd = -1;
+ int size;
struct pci_io pi = {
.pi_sel = {
.pc_domain = dev->addr.domain,
@@ -468,25 +468,28 @@ int rte_eal_pci_read_config(const struct rte_pci_device *dev,
.pc_func = dev->addr.function,
},
.pi_reg = offset,
- .pi_width = len,
};
- if (len == 3 || len > sizeof(pi.pi_data)) {
- RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
- goto error;
- }
-
fd = open("/dev/pci", O_RDWR);
if (fd < 0) {
RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
goto error;
}
- if (ioctl(fd, PCIOCREAD, &pi) < 0)
- goto error;
+ while (len > 0) {
+ size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
+ pi.pi_width = size;
+
+ if (ioctl(fd, PCIOCREAD, &pi) < 0)
+ goto error;
+ memcpy(buf, &pi.pi_data, size);
+
+ buf = (char *)buf + size;
+ pi.pi_reg += size;
+ len -= size;
+ }
close(fd);
- memcpy(buf, &pi.pi_data, len);
return 0;
error:
@@ -496,8 +499,8 @@ int rte_eal_pci_read_config(const struct rte_pci_device *dev,
}
/* Write PCI config space. */
-int rte_eal_pci_write_config(const struct rte_pci_device *dev,
- const void *buf, size_t len, off_t offset)
+int rte_pci_write_config(const struct rte_pci_device *dev,
+ const void *buf, size_t len, off_t offset)
{
int fd = -1;
@@ -539,8 +542,8 @@ int rte_eal_pci_write_config(const struct rte_pci_device *dev,
}
int
-rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
- struct rte_pci_ioport *p)
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
{
int ret;
@@ -567,7 +570,7 @@ rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
static void
pci_uio_ioport_read(struct rte_pci_ioport *p,
- void *data, size_t len, off_t offset)
+ void *data, size_t len, off_t offset)
{
#if defined(RTE_ARCH_X86)
uint8_t *d;
@@ -595,8 +598,8 @@ pci_uio_ioport_read(struct rte_pci_ioport *p,
}
void
-rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
- void *data, size_t len, off_t offset)
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
{
switch (p->dev->kdrv) {
case RTE_KDRV_NIC_UIO:
@@ -609,7 +612,7 @@ rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
static void
pci_uio_ioport_write(struct rte_pci_ioport *p,
- const void *data, size_t len, off_t offset)
+ const void *data, size_t len, off_t offset)
{
#if defined(RTE_ARCH_X86)
const uint8_t *s;
@@ -619,13 +622,13 @@ pci_uio_ioport_write(struct rte_pci_ioport *p,
for (s = data; len > 0; s += size, reg += size, len -= size) {
if (len >= 4) {
size = 4;
- outl(*(const uint32_t *)s, reg);
+ outl(reg, *(const uint32_t *)s);
} else if (len >= 2) {
size = 2;
- outw(*(const uint16_t *)s, reg);
+ outw(reg, *(const uint16_t *)s);
} else {
size = 1;
- outb(*s, reg);
+ outb(reg, *s);
}
}
#else
@@ -637,8 +640,8 @@ pci_uio_ioport_write(struct rte_pci_ioport *p,
}
void
-rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
- const void *data, size_t len, off_t offset)
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
{
switch (p->dev->kdrv) {
case RTE_KDRV_NIC_UIO:
@@ -650,7 +653,7 @@ rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
}
int
-rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p)
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
{
int ret;
@@ -667,18 +670,3 @@ rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p)
return ret;
}
-
-/* Init the PCI EAL subsystem */
-int
-rte_eal_pci_init(void)
-{
- /* for debug purposes, PCI can be disabled */
- if (internal_config.no_pci)
- return 0;
-
- if (rte_eal_pci_scan() < 0) {
- RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
- return -1;
- }
- return 0;
-}
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
index 2f81f7c0..2e48a736 100644
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
@@ -6,8 +6,6 @@ DPDK_2.0 {
eal_parse_sysfs_value;
eal_timer_source;
lcore_config;
- pci_device_list;
- pci_driver_list;
per_lcore__lcore_id;
per_lcore__rte_errno;
rte_calloc;
@@ -22,12 +20,9 @@ DPDK_2.0 {
rte_dump_tailq;
rte_eal_alarm_cancel;
rte_eal_alarm_set;
- rte_eal_dev_init;
rte_eal_devargs_add;
rte_eal_devargs_dump;
rte_eal_devargs_type_count;
- rte_eal_driver_register;
- rte_eal_driver_unregister;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
rte_eal_get_physmem_layout;
@@ -40,18 +35,10 @@ DPDK_2.0 {
rte_eal_mp_remote_launch;
rte_eal_mp_wait_lcore;
rte_eal_parse_devargs_str;
- rte_eal_pci_dump;
- rte_eal_pci_probe;
- rte_eal_pci_probe_one;
- rte_eal_pci_register;
- rte_eal_pci_scan;
- rte_eal_pci_unregister;
rte_eal_process_type;
rte_eal_remote_launch;
rte_eal_tailq_lookup;
rte_eal_tailq_register;
- rte_eal_vdev_init;
- rte_eal_vdev_uninit;
rte_eal_wait_lcore;
rte_exit;
rte_free;
@@ -66,11 +53,8 @@ DPDK_2.0 {
rte_intr_disable;
rte_intr_enable;
rte_log;
- rte_log_add_in_history;
rte_log_cur_msg_loglevel;
rte_log_cur_msg_logtype;
- rte_log_dump_history;
- rte_log_set_history;
rte_logs;
rte_malloc;
rte_malloc_dump_stats;
@@ -114,9 +98,6 @@ DPDK_2.0 {
DPDK_2.1 {
global:
- rte_eal_pci_detach;
- rte_eal_pci_read_config;
- rte_eal_pci_write_config;
rte_intr_allow_others;
rte_intr_dp_is_en;
rte_intr_efd_disable;
@@ -142,12 +123,6 @@ DPDK_16.04 {
global:
rte_cpu_get_flag_name;
- rte_eal_pci_ioport_map;
- rte_eal_pci_ioport_read;
- rte_eal_pci_ioport_unmap;
- rte_eal_pci_ioport_write;
- rte_eal_pci_map_device;
- rte_eal_pci_unmap_device;
rte_eal_primary_proc_alive;
} DPDK_2.2;
@@ -170,7 +145,51 @@ DPDK_16.11 {
rte_delay_us_callback_register;
rte_eal_dev_attach;
rte_eal_dev_detach;
- rte_eal_vdrv_register;
- rte_eal_vdrv_unregister;
} DPDK_16.07;
+
+DPDK_17.02 {
+ global:
+
+ rte_bus_dump;
+ rte_bus_probe;
+ rte_bus_register;
+ rte_bus_scan;
+ rte_bus_unregister;
+
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_cpu_is_supported;
+ rte_log_dump;
+ rte_log_register;
+ rte_log_get_global_level;
+ rte_log_set_global_level;
+ rte_log_set_level;
+ rte_log_set_level_regexp;
+ rte_pci_detach;
+ rte_pci_dump;
+ rte_pci_ioport_map;
+ rte_pci_ioport_read;
+ rte_pci_ioport_unmap;
+ rte_pci_ioport_write;
+ rte_pci_map_device;
+ rte_pci_probe;
+ rte_pci_probe_one;
+ rte_pci_read_config;
+ rte_pci_register;
+ rte_pci_scan;
+ rte_pci_unmap_device;
+ rte_pci_unregister;
+ rte_pci_write_config;
+ rte_vdev_init;
+ rte_vdev_register;
+ rte_vdev_uninit;
+ rte_vdev_unregister;
+ vfio_get_container_fd;
+ vfio_get_group_fd;
+ vfio_get_group_no;
+
+} DPDK_17.02;
diff --git a/lib/librte_eal/bsdapp/nic_uio/nic_uio.c b/lib/librte_eal/bsdapp/nic_uio/nic_uio.c
index 99a4975c..4bd7545a 100644
--- a/lib/librte_eal/bsdapp/nic_uio/nic_uio.c
+++ b/lib/librte_eal/bsdapp/nic_uio/nic_uio.c
@@ -180,6 +180,10 @@ nic_uio_probe (device_t dev)
unsigned int device = pci_get_slot(dev);
unsigned int function = pci_get_function(dev);
+ char bdf_str[256];
+ char *token, *remaining;
+
+ /* First check if we found this on load */
for (i = 0; i < num_detached; i++)
if (bus == pci_get_bus(detached_devices[i]) &&
device == pci_get_slot(detached_devices[i]) &&
@@ -188,6 +192,45 @@ nic_uio_probe (device_t dev)
return BUS_PROBE_SPECIFIC;
}
+ /* otherwise check if it's a new device and if it matches the BDF */
+ memset(bdf_str, 0, sizeof(bdf_str));
+ TUNABLE_STR_FETCH("hw.nic_uio.bdfs", bdf_str, sizeof(bdf_str));
+ remaining = bdf_str;
+ while (1) {
+ if (remaining == NULL || remaining[0] == '\0')
+ break;
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ bus = strtol(token, NULL, 10);
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ device = strtol(token, NULL, 10);
+ token = strsep(&remaining, ",:");
+ if (token == NULL)
+ break;
+ function = strtol(token, NULL, 10);
+
+ if (bus == pci_get_bus(dev) &&
+ device == pci_get_slot(dev) &&
+ function == pci_get_function(dev)) {
+
+ if (num_detached < MAX_DETACHED_DEVICES) {
+ printf("%s: probed dev=%p\n",
+ __func__, dev);
+ detached_devices[num_detached++] = dev;
+ device_set_desc(dev, "DPDK PCI Device");
+ return BUS_PROBE_SPECIFIC;
+ } else {
+ printf("%s: reached MAX_DETACHED_DEVICES=%d. dev=%p won't be reattached\n",
+ __func__, MAX_DETACHED_DEVICES,
+ dev);
+ break;
+ }
+ }
+ }
+
return ENXIO;
}
@@ -248,6 +291,7 @@ nic_uio_load(void)
memset(bdf_str, 0, sizeof(bdf_str));
TUNABLE_STR_FETCH("hw.nic_uio.bdfs", bdf_str, sizeof(bdf_str));
remaining = bdf_str;
+ printf("nic_uio: hw.nic_uio.bdfs = '%s'\n", bdf_str);
/*
* Users should specify PCI BDFs in the format "b:d:f,b:d:f,b:d:f".
* But the code below does not try differentiate between : and ,
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index dfd64aa5..a5bd1089 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -38,16 +38,14 @@ INC += rte_per_lcore.h rte_random.h
INC += rte_tailq.h rte_interrupts.h rte_alarm.h
INC += rte_string_fns.h rte_version.h
INC += rte_eal_memconfig.h rte_malloc_heap.h
-INC += rte_hexdump.h rte_devargs.h rte_dev.h rte_vdev.h
+INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_vdev.h
INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
INC += rte_malloc.h rte_keepalive.h rte_time.h
-ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
-INC += rte_warnings.h
-endif
-
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
+GENERIC_INC += rte_vect.h rte_io.h
+
# defined in mk/arch/$(RTE_ARCH)/rte.vars.mk
ARCH_DIR ?= $(RTE_ARCH)
ARCH_INC := $(notdir $(wildcard $(RTE_SDK)/lib/librte_eal/common/include/arch/$(ARCH_DIR)/*.h))
diff --git a/lib/librte_eal/common/eal_common_bus.c b/lib/librte_eal/common/eal_common_bus.c
new file mode 100644
index 00000000..8f9baf8b
--- /dev/null
+++ b/lib/librte_eal/common/eal_common_bus.c
@@ -0,0 +1,147 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 NXP
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_bus.h>
+
+#include "eal_private.h"
+
+struct rte_bus_list rte_bus_list =
+ TAILQ_HEAD_INITIALIZER(rte_bus_list);
+
+void
+rte_bus_register(struct rte_bus *bus)
+{
+ RTE_VERIFY(bus);
+ RTE_VERIFY(bus->name && strlen(bus->name));
+ /* A bus should mandatorily have the scan implemented */
+ RTE_VERIFY(bus->scan);
+ RTE_VERIFY(bus->probe);
+
+ TAILQ_INSERT_TAIL(&rte_bus_list, bus, next);
+ RTE_LOG(DEBUG, EAL, "Registered [%s] bus.\n", bus->name);
+}
+
+void
+rte_bus_unregister(struct rte_bus *bus)
+{
+ TAILQ_REMOVE(&rte_bus_list, bus, next);
+ RTE_LOG(DEBUG, EAL, "Unregistered [%s] bus.\n", bus->name);
+}
+
+/* Scan all the buses for registered devices */
+int
+rte_bus_scan(void)
+{
+ int ret;
+ struct rte_bus *bus = NULL;
+
+ TAILQ_FOREACH(bus, &rte_bus_list, next) {
+ ret = bus->scan();
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Scan for (%s) bus failed.\n",
+ bus->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Probe all devices of all buses */
+int
+rte_bus_probe(void)
+{
+ int ret;
+ struct rte_bus *bus, *vbus = NULL;
+
+ TAILQ_FOREACH(bus, &rte_bus_list, next) {
+ if (!strcmp(bus->name, "virtual")) {
+ vbus = bus;
+ continue;
+ }
+
+ ret = bus->probe();
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
+ bus->name);
+ return ret;
+ }
+ }
+
+ if (vbus) {
+ ret = vbus->probe();
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Bus (%s) probe failed.\n",
+ vbus->name);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Dump information of a single bus */
+static int
+bus_dump_one(FILE *f, struct rte_bus *bus)
+{
+ int ret;
+
+ /* For now, dump only the bus name */
+ ret = fprintf(f, " %s\n", bus->name);
+
+ /* Error in case of inability in writing to stream */
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+void
+rte_bus_dump(FILE *f)
+{
+ int ret;
+ struct rte_bus *bus;
+
+ TAILQ_FOREACH(bus, &rte_bus_list, next) {
+ ret = bus_dump_one(f, bus);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Unable to write to stream (%d)\n",
+ ret);
+ break;
+ }
+ }
+}
diff --git a/lib/librte_eal/common/eal_common_cpuflags.c b/lib/librte_eal/common/eal_common_cpuflags.c
index b5f76f7f..9a2d080a 100644
--- a/lib/librte_eal/common/eal_common_cpuflags.c
+++ b/lib/librte_eal/common/eal_common_cpuflags.c
@@ -43,6 +43,13 @@
void
rte_cpu_check_supported(void)
{
+ if (!rte_cpu_is_supported())
+ exit(1);
+}
+
+int
+rte_cpu_is_supported(void)
+{
/* This is generated at compile-time by the build system */
static const enum rte_cpu_flag_t compile_time_flags[] = {
RTE_COMPILE_TIME_CPUFLAGS
@@ -57,14 +64,16 @@ rte_cpu_check_supported(void)
fprintf(stderr,
"ERROR: CPU feature flag lookup failed with error %d\n",
ret);
- exit(1);
+ return 0;
}
if (!ret) {
fprintf(stderr,
"ERROR: This system does not support \"%s\".\n"
"Please check that RTE_MACHINE is set correctly.\n",
rte_cpu_get_flag_name(compile_time_flags[i]));
- exit(1);
+ return 0;
}
}
+
+ return 1;
}
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index 4f3b4934..a400ddd0 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -45,65 +45,6 @@
#include "eal_private.h"
-/** Global list of device drivers. */
-static struct rte_driver_list dev_driver_list =
- TAILQ_HEAD_INITIALIZER(dev_driver_list);
-/** Global list of device drivers. */
-static struct rte_device_list dev_device_list =
- TAILQ_HEAD_INITIALIZER(dev_device_list);
-
-/* register a driver */
-void
-rte_eal_driver_register(struct rte_driver *driver)
-{
- TAILQ_INSERT_TAIL(&dev_driver_list, driver, next);
-}
-
-/* unregister a driver */
-void
-rte_eal_driver_unregister(struct rte_driver *driver)
-{
- TAILQ_REMOVE(&dev_driver_list, driver, next);
-}
-
-void rte_eal_device_insert(struct rte_device *dev)
-{
- TAILQ_INSERT_TAIL(&dev_device_list, dev, next);
-}
-
-void rte_eal_device_remove(struct rte_device *dev)
-{
- TAILQ_REMOVE(&dev_device_list, dev, next);
-}
-
-int
-rte_eal_dev_init(void)
-{
- struct rte_devargs *devargs;
-
- /*
- * Note that the dev_driver_list is populated here
- * from calls made to rte_eal_driver_register from constructor functions
- * embedded into PMD modules via the RTE_PMD_REGISTER_VDEV macro
- */
-
- /* call the init function for each virtual device */
- TAILQ_FOREACH(devargs, &devargs_list, next) {
-
- if (devargs->type != RTE_DEVTYPE_VIRTUAL)
- continue;
-
- if (rte_eal_vdev_init(devargs->virt.drv_name,
- devargs->args)) {
- RTE_LOG(ERR, EAL, "failed to initialize %s device\n",
- devargs->virt.drv_name);
- return -1;
- }
- }
-
- return 0;
-}
-
int rte_eal_dev_attach(const char *name, const char *devargs)
{
struct rte_pci_addr addr;
@@ -114,11 +55,11 @@ int rte_eal_dev_attach(const char *name, const char *devargs)
}
if (eal_parse_pci_DomBDF(name, &addr) == 0) {
- if (rte_eal_pci_probe_one(&addr) < 0)
+ if (rte_pci_probe_one(&addr) < 0)
goto err;
} else {
- if (rte_eal_vdev_init(name, devargs))
+ if (rte_vdev_init(name, devargs))
goto err;
}
@@ -139,10 +80,10 @@ int rte_eal_dev_detach(const char *name)
}
if (eal_parse_pci_DomBDF(name, &addr) == 0) {
- if (rte_eal_pci_detach(&addr) < 0)
+ if (rte_pci_detach(&addr) < 0)
goto err;
} else {
- if (rte_eal_vdev_uninit(name))
+ if (rte_vdev_uninit(name))
goto err;
}
return 0;
diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index 2cd41320..84fa0cb5 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -83,16 +83,17 @@ rte_eal_cpu_init(void)
config->lcore_role[lcore_id] = ROLE_RTE;
lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
- if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES)
+ if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) {
#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
lcore_config[lcore_id].socket_id = 0;
#else
- rte_panic("Socket ID (%u) is greater than "
+ RTE_LOG(ERR, EAL, "Socket ID (%u) is greater than "
"RTE_MAX_NUMA_NODES (%d)\n",
lcore_config[lcore_id].socket_id,
RTE_MAX_NUMA_NODES);
+ return -1;
#endif
-
+ }
RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
"core %u on socket %u\n",
lcore_id, lcore_config[lcore_id].core_id,
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index e45d3269..ddf65b7f 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -35,7 +35,11 @@
#include <stdint.h>
#include <stdarg.h>
#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <regex.h>
+#include <rte_eal.h>
#include <rte_log.h>
#include <rte_per_lcore.h>
@@ -60,6 +64,11 @@ struct log_cur_msg {
uint32_t logtype; /**< log type - see rte_log.h */
};
+struct rte_log_dynamic_type {
+ const char *name;
+ uint32_t loglevel;
+};
+
/* per core log */
static RTE_DEFINE_PER_LCORE(struct log_cur_msg, log_cur_msg);
@@ -75,35 +84,95 @@ rte_openlog_stream(FILE *f)
/* Set global log level */
void
-rte_set_log_level(uint32_t level)
+rte_log_set_global_level(uint32_t level)
{
rte_logs.level = (uint32_t)level;
}
+/* Set global log level */
+/* replaced by rte_log_set_global_level */
+__rte_deprecated void
+rte_set_log_level(uint32_t level)
+{
+ rte_log_set_global_level(level);
+}
+
/* Get global log level */
uint32_t
-rte_get_log_level(void)
+rte_log_get_global_level(void)
{
return rte_logs.level;
}
+/* Get global log level */
+/* replaced by rte_log_get_global_level */
+uint32_t
+rte_get_log_level(void)
+{
+ return rte_log_get_global_level();
+}
+
/* Set global log type */
-void
+__rte_deprecated void
rte_set_log_type(uint32_t type, int enable)
{
+ if (type < RTE_LOGTYPE_FIRST_EXT_ID) {
+ if (enable)
+ rte_logs.type |= 1 << type;
+ else
+ rte_logs.type &= ~(1 << type);
+ }
+
if (enable)
- rte_logs.type |= type;
+ rte_log_set_level(type, 0);
else
- rte_logs.type &= (~type);
+ rte_log_set_level(type, RTE_LOG_DEBUG);
}
/* Get global log type */
-uint32_t
+__rte_deprecated uint32_t
rte_get_log_type(void)
{
return rte_logs.type;
}
+int
+rte_log_set_level(uint32_t type, uint32_t level)
+{
+ if (type >= rte_logs.dynamic_types_len)
+ return -1;
+ if (level > RTE_LOG_DEBUG)
+ return -1;
+
+ rte_logs.dynamic_types[type].loglevel = level;
+
+ return 0;
+}
+
+/* set level */
+int
+rte_log_set_level_regexp(const char *pattern, uint32_t level)
+{
+ regex_t r;
+ size_t i;
+
+ if (level > RTE_LOG_DEBUG)
+ return -1;
+
+ if (regcomp(&r, pattern, 0) != 0)
+ return -1;
+
+ for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+ if (rte_logs.dynamic_types[i].name == NULL)
+ continue;
+ if (regexec(&r, rte_logs.dynamic_types[i].name, 0,
+ NULL, 0) == 0)
+ rte_logs.dynamic_types[i].loglevel = level;
+ }
+
+ return 0;
+}
+
/* get the current loglevel for the message beeing processed */
int rte_log_cur_msg_loglevel(void)
{
@@ -116,6 +185,161 @@ int rte_log_cur_msg_logtype(void)
return RTE_PER_LCORE(log_cur_msg).logtype;
}
+static int
+rte_log_lookup(const char *name)
+{
+ size_t i;
+
+ for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+ if (rte_logs.dynamic_types[i].name == NULL)
+ continue;
+ if (strcmp(name, rte_logs.dynamic_types[i].name) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+/* register an extended log type, assuming table is large enough, and id
+ * is not yet registered.
+ */
+static int
+__rte_log_register(const char *name, int id)
+{
+ char *dup_name = strdup(name);
+
+ if (dup_name == NULL)
+ return -ENOMEM;
+
+ rte_logs.dynamic_types[id].name = dup_name;
+ rte_logs.dynamic_types[id].loglevel = RTE_LOG_DEBUG;
+
+ return id;
+}
+
+/* register an extended log type */
+int
+rte_log_register(const char *name)
+{
+ struct rte_log_dynamic_type *new_dynamic_types;
+ int id, ret;
+
+ id = rte_log_lookup(name);
+ if (id >= 0)
+ return id;
+
+ new_dynamic_types = realloc(rte_logs.dynamic_types,
+ sizeof(struct rte_log_dynamic_type) *
+ (rte_logs.dynamic_types_len + 1));
+ if (new_dynamic_types == NULL)
+ return -ENOMEM;
+ rte_logs.dynamic_types = new_dynamic_types;
+
+ ret = __rte_log_register(name, rte_logs.dynamic_types_len);
+ if (ret < 0)
+ return ret;
+
+ rte_logs.dynamic_types_len++;
+
+ return ret;
+}
+
+struct logtype {
+ uint32_t log_id;
+ const char *logtype;
+};
+
+static const struct logtype logtype_strings[] = {
+ {RTE_LOGTYPE_EAL, "eal"},
+ {RTE_LOGTYPE_MALLOC, "malloc"},
+ {RTE_LOGTYPE_RING, "ring"},
+ {RTE_LOGTYPE_MEMPOOL, "mempool"},
+ {RTE_LOGTYPE_TIMER, "timer"},
+ {RTE_LOGTYPE_PMD, "pmd"},
+ {RTE_LOGTYPE_HASH, "hash"},
+ {RTE_LOGTYPE_LPM, "lpm"},
+ {RTE_LOGTYPE_KNI, "kni"},
+ {RTE_LOGTYPE_ACL, "acl"},
+ {RTE_LOGTYPE_POWER, "power"},
+ {RTE_LOGTYPE_METER, "meter"},
+ {RTE_LOGTYPE_SCHED, "sched"},
+ {RTE_LOGTYPE_PORT, "port"},
+ {RTE_LOGTYPE_TABLE, "table"},
+ {RTE_LOGTYPE_PIPELINE, "pipeline"},
+ {RTE_LOGTYPE_MBUF, "mbuf"},
+ {RTE_LOGTYPE_CRYPTODEV, "cryptodev"},
+ {RTE_LOGTYPE_EFD, "efd"},
+ {RTE_LOGTYPE_EVENTDEV, "eventdev"},
+ {RTE_LOGTYPE_USER1, "user1"},
+ {RTE_LOGTYPE_USER2, "user2"},
+ {RTE_LOGTYPE_USER3, "user3"},
+ {RTE_LOGTYPE_USER4, "user4"},
+ {RTE_LOGTYPE_USER5, "user5"},
+ {RTE_LOGTYPE_USER6, "user6"},
+ {RTE_LOGTYPE_USER7, "user7"},
+ {RTE_LOGTYPE_USER8, "user8"}
+};
+
+RTE_INIT(rte_log_init);
+static void
+rte_log_init(void)
+{
+ uint32_t i;
+
+#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
+ rte_log_set_global_level(RTE_LOG_INFO);
+#else
+ rte_log_set_global_level(RTE_LOG_LEVEL);
+#endif
+
+ rte_logs.dynamic_types = calloc(RTE_LOGTYPE_FIRST_EXT_ID,
+ sizeof(struct rte_log_dynamic_type));
+ if (rte_logs.dynamic_types == NULL)
+ return;
+
+ /* register legacy log types */
+ for (i = 0; i < RTE_DIM(logtype_strings); i++)
+ __rte_log_register(logtype_strings[i].logtype,
+ logtype_strings[i].log_id);
+
+ rte_logs.dynamic_types_len = RTE_LOGTYPE_FIRST_EXT_ID;
+}
+
+static const char *
+loglevel_to_string(uint32_t level)
+{
+ switch (level) {
+ case 0: return "disabled";
+ case RTE_LOG_EMERG: return "emerg";
+ case RTE_LOG_ALERT: return "alert";
+ case RTE_LOG_CRIT: return "critical";
+ case RTE_LOG_ERR: return "error";
+ case RTE_LOG_WARNING: return "warning";
+ case RTE_LOG_NOTICE: return "notice";
+ case RTE_LOG_INFO: return "info";
+ case RTE_LOG_DEBUG: return "debug";
+ default: return "unknown";
+ }
+}
+
+/* dump global level and registered log types */
+void
+rte_log_dump(FILE *f)
+{
+ size_t i;
+
+ fprintf(f, "global log level is %s\n",
+ loglevel_to_string(rte_log_get_global_level()));
+
+ for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+ if (rte_logs.dynamic_types[i].name == NULL)
+ continue;
+ fprintf(f, "id %zu: %s, level is %s\n",
+ i, rte_logs.dynamic_types[i].name,
+ loglevel_to_string(rte_logs.dynamic_types[i].loglevel));
+ }
+}
+
/*
* Generates a log message The message will be sent in the stream
* defined by the previous call to rte_openlog_stream().
@@ -139,7 +363,11 @@ rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
}
}
- if ((level > rte_logs.level) || !(logtype & rte_logs.type))
+ if (level > rte_logs.level)
+ return 0;
+ if (logtype >= rte_logs.dynamic_types_len)
+ return -1;
+ if (level > rte_logs.dynamic_types[logtype].loglevel)
return 0;
/* save loglevel and logtype in a global per-lcore variable */
@@ -176,7 +404,8 @@ eal_log_set_default(FILE *default_log)
{
default_log_stream = default_log;
-#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
- RTE_LOG(NOTICE, EAL, "Debug logs available - lower performance\n");
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ RTE_LOG(NOTICE, EAL,
+ "Debug dataplane logs available - lower performance\n");
#endif
}
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index 6ca8af17..f470195f 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -118,7 +118,7 @@ static const char *default_solib_dir = RTE_EAL_PMD_PATH;
/*
* Stringified version of solib path used by dpdk-pmdinfo.py
* Note: PLEASE DO NOT ALTER THIS without making a corresponding
- * change to tools/dpdk-pmdinfo.py
+ * change to usertools/dpdk-pmdinfo.py
*/
static const char dpdk_solib_path[] __attribute__((used)) =
"DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH;
@@ -126,6 +126,7 @@ static const char dpdk_solib_path[] __attribute__((used)) =
static int master_lcore_parsed;
static int mem_parsed;
+static int core_parsed;
void
eal_reset_internal_config(struct internal_config *internal_cfg)
@@ -147,12 +148,6 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->base_virtaddr = 0;
internal_cfg->syslog_facility = LOG_DAEMON;
- /* default value from build option */
-#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
- internal_cfg->log_level = RTE_LOG_INFO;
-#else
- internal_cfg->log_level = RTE_LOG_LEVEL;
-#endif
internal_cfg->xen_dom0_support = 0;
@@ -738,25 +733,49 @@ eal_parse_syslog(const char *facility, struct internal_config *conf)
}
static int
-eal_parse_log_level(const char *level, uint32_t *log_level)
+eal_parse_log_level(const char *arg)
{
- char *end;
+ char *end, *str, *type, *level;
unsigned long tmp;
+ str = strdup(arg);
+ if (str == NULL)
+ return -1;
+
+ if (strchr(str, ',') == NULL) {
+ type = NULL;
+ level = str;
+ } else {
+ type = strsep(&str, ",");
+ level = strsep(&str, ",");
+ }
+
errno = 0;
tmp = strtoul(level, &end, 0);
/* check for errors */
if ((errno != 0) || (level[0] == '\0') ||
- end == NULL || (*end != '\0'))
- return -1;
+ end == NULL || (*end != '\0'))
+ goto fail;
/* log_level is a uint32_t */
if (tmp >= UINT32_MAX)
- return -1;
+ goto fail;
+
+ if (type == NULL) {
+ rte_log_set_global_level(tmp);
+ } else if (rte_log_set_level_regexp(type, tmp) < 0) {
+ printf("cannot set log level %s,%lu\n",
+ type, tmp);
+ goto fail;
+ }
- *log_level = tmp;
+ free(str);
return 0;
+
+fail:
+ free(str);
+ return -1;
}
static enum rte_proc_type_t
@@ -797,6 +816,7 @@ eal_parse_common_option(int opt, const char *optarg,
RTE_LOG(ERR, EAL, "invalid coremask\n");
return -1;
}
+ core_parsed = 1;
break;
/* corelist */
case 'l':
@@ -804,6 +824,7 @@ eal_parse_common_option(int opt, const char *optarg,
RTE_LOG(ERR, EAL, "invalid core list\n");
return -1;
}
+ core_parsed = 1;
break;
/* size of memory */
case 'm':
@@ -895,15 +916,12 @@ eal_parse_common_option(int opt, const char *optarg,
break;
case OPT_LOG_LEVEL_NUM: {
- uint32_t log;
-
- if (eal_parse_log_level(optarg, &log) < 0) {
+ if (eal_parse_log_level(optarg) < 0) {
RTE_LOG(ERR, EAL,
"invalid parameters for --"
OPT_LOG_LEVEL "\n");
return -1;
}
- conf->log_level = log;
break;
}
case OPT_LCORES_NUM:
@@ -912,6 +930,7 @@ eal_parse_common_option(int opt, const char *optarg,
OPT_LCORES "\n");
return -1;
}
+ core_parsed = 1;
break;
/* don't know what to do, leave this to caller */
@@ -923,12 +942,38 @@ eal_parse_common_option(int opt, const char *optarg,
return 0;
}
+static void
+eal_auto_detect_cores(struct rte_config *cfg)
+{
+ unsigned int lcore_id;
+ unsigned int removed = 0;
+ rte_cpuset_t affinity_set;
+ pthread_t tid = pthread_self();
+
+ if (pthread_getaffinity_np(tid, sizeof(rte_cpuset_t),
+ &affinity_set) < 0)
+ CPU_ZERO(&affinity_set);
+
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (cfg->lcore_role[lcore_id] == ROLE_RTE &&
+ !CPU_ISSET(lcore_id, &affinity_set)) {
+ cfg->lcore_role[lcore_id] = ROLE_OFF;
+ removed++;
+ }
+ }
+
+ cfg->lcore_count -= removed;
+}
+
int
eal_adjust_config(struct internal_config *internal_cfg)
{
int i;
struct rte_config *cfg = rte_eal_get_configuration();
+ if (!core_parsed)
+ eal_auto_detect_cores(cfg);
+
if (internal_config.process_type == RTE_PROC_AUTO)
internal_config.process_type = eal_proc_type_detect();
@@ -1027,7 +1072,9 @@ eal_common_usage(void)
" --"OPT_VMWARE_TSC_MAP" Use VMware TSC map instead of native RDTSC\n"
" --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n"
" --"OPT_SYSLOG" Set syslog facility\n"
- " --"OPT_LOG_LEVEL" Set default log level\n"
+ " --"OPT_LOG_LEVEL"=<int> Set global log level\n"
+ " --"OPT_LOG_LEVEL"=<type-regexp>,<int>\n"
+ " Set specific log level\n"
" -v Display version information on startup\n"
" -h, --help This help\n"
"\nEAL options for DEBUG use only:\n"
diff --git a/lib/librte_eal/common/eal_common_pci.c b/lib/librte_eal/common/eal_common_pci.c
index 6bff6752..b7499913 100644
--- a/lib/librte_eal/common/eal_common_pci.c
+++ b/lib/librte_eal/common/eal_common_pci.c
@@ -69,8 +69,10 @@
#include <sys/queue.h>
#include <sys/mman.h>
+#include <rte_errno.h>
#include <rte_interrupts.h>
#include <rte_log.h>
+#include <rte_bus.h>
#include <rte_pci.h>
#include <rte_per_lcore.h>
#include <rte_memory.h>
@@ -82,10 +84,7 @@
#include "eal_private.h"
-struct pci_driver_list pci_driver_list =
- TAILQ_HEAD_INITIALIZER(pci_driver_list);
-struct pci_device_list pci_device_list =
- TAILQ_HEAD_INITIALIZER(pci_device_list);
+extern struct rte_pci_bus rte_pci_bus;
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
@@ -153,170 +152,154 @@ pci_unmap_resource(void *requested_addr, size_t size)
}
/*
- * If vendor/device ID match, call the probe() function of the
- * driver.
+ * Match the PCI Driver and Device using the ID Table
+ *
+ * @param pci_drv
+ * PCI driver from which ID table would be extracted
+ * @param pci_dev
+ * PCI device to match against the driver
+ * @return
+ * 1 for successful match
+ * 0 for unsuccessful match
*/
static int
-rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+rte_pci_match(const struct rte_pci_driver *pci_drv,
+ const struct rte_pci_device *pci_dev)
{
- int ret;
const struct rte_pci_id *id_table;
- for (id_table = dr->id_table; id_table->vendor_id != 0; id_table++) {
-
+ for (id_table = pci_drv->id_table; id_table->vendor_id != 0;
+ id_table++) {
/* check if device's identifiers match the driver's ones */
- if (id_table->vendor_id != dev->id.vendor_id &&
+ if (id_table->vendor_id != pci_dev->id.vendor_id &&
id_table->vendor_id != PCI_ANY_ID)
continue;
- if (id_table->device_id != dev->id.device_id &&
+ if (id_table->device_id != pci_dev->id.device_id &&
id_table->device_id != PCI_ANY_ID)
continue;
- if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
- id_table->subsystem_vendor_id != PCI_ANY_ID)
+ if (id_table->subsystem_vendor_id !=
+ pci_dev->id.subsystem_vendor_id &&
+ id_table->subsystem_vendor_id != PCI_ANY_ID)
continue;
- if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
- id_table->subsystem_device_id != PCI_ANY_ID)
+ if (id_table->subsystem_device_id !=
+ pci_dev->id.subsystem_device_id &&
+ id_table->subsystem_device_id != PCI_ANY_ID)
continue;
- if (id_table->class_id != dev->id.class_id &&
+ if (id_table->class_id != pci_dev->id.class_id &&
id_table->class_id != RTE_CLASS_ANY_ID)
continue;
- struct rte_pci_addr *loc = &dev->addr;
-
- RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
- loc->domain, loc->bus, loc->devid, loc->function,
- dev->device.numa_node);
-
- /* no initialization when blacklisted, return without error */
- if (dev->device.devargs != NULL &&
- dev->device.devargs->type ==
- RTE_DEVTYPE_BLACKLISTED_PCI) {
- RTE_LOG(INFO, EAL, " Device is blacklisted, not initializing\n");
- return 1;
- }
-
- RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
- dev->id.device_id, dr->driver.name);
-
- if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
- /* map resources for devices that use igb_uio */
- ret = rte_eal_pci_map_device(dev);
- if (ret != 0)
- return ret;
- } else if (dr->drv_flags & RTE_PCI_DRV_FORCE_UNBIND &&
- rte_eal_process_type() == RTE_PROC_PRIMARY) {
- /* unbind current driver */
- if (pci_unbind_kernel_driver(dev) < 0)
- return -1;
- }
-
- /* reference driver structure */
- dev->driver = dr;
-
- /* call the driver probe() function */
- ret = dr->probe(dr, dev);
- if (ret)
- dev->driver = NULL;
-
- return ret;
+ return 1;
}
- /* return positive value if driver doesn't support this device */
- return 1;
+
+ return 0;
}
/*
- * If vendor/device ID match, call the remove() function of the
+ * If vendor/device ID match, call the probe() function of the
* driver.
*/
static int
-rte_eal_pci_detach_dev(struct rte_pci_driver *dr,
- struct rte_pci_device *dev)
+rte_pci_probe_one_driver(struct rte_pci_driver *dr,
+ struct rte_pci_device *dev)
{
- const struct rte_pci_id *id_table;
+ int ret;
+ struct rte_pci_addr *loc;
if ((dr == NULL) || (dev == NULL))
return -EINVAL;
- for (id_table = dr->id_table; id_table->vendor_id != 0; id_table++) {
+ loc = &dev->addr;
- /* check if device's identifiers match the driver's ones */
- if (id_table->vendor_id != dev->id.vendor_id &&
- id_table->vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->device_id != dev->id.device_id &&
- id_table->device_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
- id_table->subsystem_vendor_id != PCI_ANY_ID)
- continue;
- if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
- id_table->subsystem_device_id != PCI_ANY_ID)
- continue;
+ /* The device is not blacklisted; Check if driver supports it */
+ if (!rte_pci_match(dr, dev)) {
+ /* Match of device and driver failed */
+ RTE_LOG(DEBUG, EAL, "Driver (%s) doesn't match the device\n",
+ dr->driver.name);
+ return 1;
+ }
- struct rte_pci_addr *loc = &dev->addr;
+ RTE_LOG(INFO, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ dev->device.numa_node);
+
+ /* no initialization when blacklisted, return without error */
+ if (dev->device.devargs != NULL &&
+ dev->device.devargs->type ==
+ RTE_DEVTYPE_BLACKLISTED_PCI) {
+ RTE_LOG(INFO, EAL, " Device is blacklisted, not"
+ " initializing\n");
+ return 1;
+ }
- RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
- loc->domain, loc->bus, loc->devid,
- loc->function, dev->device.numa_node);
+ RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
- RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
- dev->id.device_id, dr->driver.name);
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
+ /* map resources for devices that use igb_uio */
+ ret = rte_pci_map_device(dev);
+ if (ret != 0)
+ return ret;
+ }
- if (dr->remove && (dr->remove(dev) < 0))
- return -1; /* negative value is an error */
+ /* reference driver structure */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
- /* clear driver structure */
+ /* call the driver probe() function */
+ ret = dr->probe(dr, dev);
+ if (ret) {
dev->driver = NULL;
-
if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
- /* unmap resources for devices that use igb_uio */
- rte_eal_pci_unmap_device(dev);
-
- return 0;
+ rte_pci_unmap_device(dev);
}
- /* return positive value if driver doesn't support this device */
- return 1;
+ return ret;
}
/*
- * If vendor/device ID match, call the probe() function of all
- * registered driver for the given device. Return -1 if initialization
- * failed, return 1 if no driver is found for this device.
+ * If vendor/device ID match, call the remove() function of the
+ * driver.
*/
static int
-pci_probe_all_drivers(struct rte_pci_device *dev)
+rte_pci_detach_dev(struct rte_pci_device *dev)
{
- struct rte_pci_driver *dr = NULL;
- int rc = 0;
+ struct rte_pci_addr *loc;
+ struct rte_pci_driver *dr;
if (dev == NULL)
- return -1;
+ return -EINVAL;
- /* Check if a driver is already loaded */
- if (dev->driver != NULL)
- return 0;
+ dr = dev->driver;
+ loc = &dev->addr;
- TAILQ_FOREACH(dr, &pci_driver_list, next) {
- rc = rte_eal_pci_probe_one_driver(dr, dev);
- if (rc < 0)
- /* negative value is an error */
- return -1;
- if (rc > 0)
- /* positive value means driver doesn't support it */
- continue;
- return 0;
- }
- return 1;
+ RTE_LOG(DEBUG, EAL, "PCI device "PCI_PRI_FMT" on NUMA socket %i\n",
+ loc->domain, loc->bus, loc->devid,
+ loc->function, dev->device.numa_node);
+
+ RTE_LOG(DEBUG, EAL, " remove driver: %x:%x %s\n", dev->id.vendor_id,
+ dev->id.device_id, dr->driver.name);
+
+ if (dr->remove && (dr->remove(dev) < 0))
+ return -1; /* negative value is an error */
+
+ /* clear driver structure */
+ dev->driver = NULL;
+
+ if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING)
+ /* unmap resources for devices that use igb_uio */
+ rte_pci_unmap_device(dev);
+
+ return 0;
}
/*
- * If vendor/device ID match, call the remove() function of all
+ * If vendor/device ID match, call the probe() function of all
* registered driver for the given device. Return -1 if initialization
* failed, return 1 if no driver is found for this device.
*/
static int
-pci_detach_all_drivers(struct rte_pci_device *dev)
+pci_probe_all_drivers(struct rte_pci_device *dev)
{
struct rte_pci_driver *dr = NULL;
int rc = 0;
@@ -324,8 +307,12 @@ pci_detach_all_drivers(struct rte_pci_device *dev)
if (dev == NULL)
return -1;
- TAILQ_FOREACH(dr, &pci_driver_list, next) {
- rc = rte_eal_pci_detach_dev(dr, dev);
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL)
+ return 0;
+
+ FOREACH_DRIVER_ON_PCIBUS(dr) {
+ rc = rte_pci_probe_one_driver(dr, dev);
if (rc < 0)
/* negative value is an error */
return -1;
@@ -342,9 +329,10 @@ pci_detach_all_drivers(struct rte_pci_device *dev)
* the driver of the devive.
*/
int
-rte_eal_pci_probe_one(const struct rte_pci_addr *addr)
+rte_pci_probe_one(const struct rte_pci_addr *addr)
{
struct rte_pci_device *dev = NULL;
+
int ret = 0;
if (addr == NULL)
@@ -356,7 +344,7 @@ rte_eal_pci_probe_one(const struct rte_pci_addr *addr)
if (pci_update_device(addr) < 0)
goto err_return;
- TAILQ_FOREACH(dev, &pci_device_list, next) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
if (rte_eal_compare_pci_addr(&dev->addr, addr))
continue;
@@ -378,7 +366,7 @@ err_return:
* Detach device specified by its pci address.
*/
int
-rte_eal_pci_detach(const struct rte_pci_addr *addr)
+rte_pci_detach(const struct rte_pci_addr *addr)
{
struct rte_pci_device *dev = NULL;
int ret = 0;
@@ -386,15 +374,19 @@ rte_eal_pci_detach(const struct rte_pci_addr *addr)
if (addr == NULL)
return -1;
- TAILQ_FOREACH(dev, &pci_device_list, next) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
if (rte_eal_compare_pci_addr(&dev->addr, addr))
continue;
- ret = pci_detach_all_drivers(dev);
+ ret = rte_pci_detach_dev(dev);
if (ret < 0)
+ /* negative value is an error */
goto err_return;
+ if (ret > 0)
+ /* positive value means driver doesn't support it */
+ continue;
- TAILQ_REMOVE(&pci_device_list, dev, next);
+ rte_pci_remove_device(dev);
free(dev);
return 0;
}
@@ -413,9 +405,10 @@ err_return:
* for discovered devices.
*/
int
-rte_eal_pci_probe(void)
+rte_pci_probe(void)
{
struct rte_pci_device *dev = NULL;
+ size_t probed = 0, failed = 0;
struct rte_devargs *devargs;
int probe_all = 0;
int ret = 0;
@@ -423,7 +416,8 @@ rte_eal_pci_probe(void)
if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) == 0)
probe_all = 1;
- TAILQ_FOREACH(dev, &pci_device_list, next) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
+ probed++;
/* set devargs in PCI structure */
devargs = pci_devargs_lookup(dev);
@@ -436,13 +430,17 @@ rte_eal_pci_probe(void)
else if (devargs != NULL &&
devargs->type == RTE_DEVTYPE_WHITELISTED_PCI)
ret = pci_probe_all_drivers(dev);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Requested device " PCI_PRI_FMT
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Requested device " PCI_PRI_FMT
" cannot be used\n", dev->addr.domain, dev->addr.bus,
dev->addr.devid, dev->addr.function);
+ rte_errno = errno;
+ failed++;
+ ret = 0;
+ }
}
- return 0;
+ return (probed && probed == failed) ? -1 : 0;
}
/* dump one device */
@@ -467,27 +465,60 @@ pci_dump_one_device(FILE *f, struct rte_pci_device *dev)
/* dump devices on the bus */
void
-rte_eal_pci_dump(FILE *f)
+rte_pci_dump(FILE *f)
{
struct rte_pci_device *dev = NULL;
- TAILQ_FOREACH(dev, &pci_device_list, next) {
+ FOREACH_DEVICE_ON_PCIBUS(dev) {
pci_dump_one_device(f, dev);
}
}
/* register a driver */
void
-rte_eal_pci_register(struct rte_pci_driver *driver)
+rte_pci_register(struct rte_pci_driver *driver)
{
- TAILQ_INSERT_TAIL(&pci_driver_list, driver, next);
- rte_eal_driver_register(&driver->driver);
+ TAILQ_INSERT_TAIL(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = &rte_pci_bus;
}
/* unregister a driver */
void
-rte_eal_pci_unregister(struct rte_pci_driver *driver)
+rte_pci_unregister(struct rte_pci_driver *driver)
+{
+ TAILQ_REMOVE(&rte_pci_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to PCI bus */
+void
+rte_pci_add_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+/* Insert a device into a predefined position in PCI bus */
+void
+rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev)
{
- rte_eal_driver_unregister(&driver->driver);
- TAILQ_REMOVE(&pci_driver_list, driver, next);
+ TAILQ_INSERT_BEFORE(exist_pci_dev, new_pci_dev, next);
}
+
+/* Remove a device from PCI bus */
+void
+rte_pci_remove_device(struct rte_pci_device *pci_dev)
+{
+ TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
+}
+
+struct rte_pci_bus rte_pci_bus = {
+ .bus = {
+ .scan = rte_pci_scan,
+ .probe = rte_pci_probe,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_pci_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(PCI_BUS_NAME, rte_pci_bus.bus);
diff --git a/lib/librte_eal/common/eal_common_tailqs.c b/lib/librte_eal/common/eal_common_tailqs.c
index bb08ec8b..4f698288 100644
--- a/lib/librte_eal/common/eal_common_tailqs.c
+++ b/lib/librte_eal/common/eal_common_tailqs.c
@@ -188,8 +188,7 @@ rte_eal_tailqs_init(void)
if (t->head == NULL) {
RTE_LOG(ERR, EAL,
"Cannot initialize tailq: %s\n", t->name);
- /* no need to TAILQ_REMOVE, we are going to panic in
- * rte_eal_init() */
+ /* TAILQ_REMOVE not needed, error is already fatal */
goto fail;
}
}
diff --git a/lib/librte_eal/common/eal_common_vdev.c b/lib/librte_eal/common/eal_common_vdev.c
index 7d6e54f4..0037a641 100644
--- a/lib/librte_eal/common/eal_common_vdev.c
+++ b/lib/librte_eal/common/eal_common_vdev.c
@@ -37,35 +37,84 @@
#include <stdint.h>
#include <sys/queue.h>
+#include <rte_eal.h>
+#include <rte_bus.h>
#include <rte_vdev.h>
#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+/** Double linked list of virtual device drivers. */
+TAILQ_HEAD(vdev_device_list, rte_vdev_device);
+
+static struct vdev_device_list vdev_device_list =
+ TAILQ_HEAD_INITIALIZER(vdev_device_list);
struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
+static void rte_vdev_bus_register(void);
+
/* register a driver */
void
-rte_eal_vdrv_register(struct rte_vdev_driver *driver)
+rte_vdev_register(struct rte_vdev_driver *driver)
{
+ rte_vdev_bus_register();
+
TAILQ_INSERT_TAIL(&vdev_driver_list, driver, next);
- rte_eal_driver_register(&driver->driver);
}
/* unregister a driver */
void
-rte_eal_vdrv_unregister(struct rte_vdev_driver *driver)
+rte_vdev_unregister(struct rte_vdev_driver *driver)
{
- rte_eal_driver_unregister(&driver->driver);
TAILQ_REMOVE(&vdev_driver_list, driver, next);
}
-int
-rte_eal_vdev_init(const char *name, const char *args)
+/*
+ * Parse "driver" devargs without adding a dependency on rte_kvargs.h
+ */
+static char *parse_driver_arg(const char *args)
+{
+ const char *c;
+ char *str;
+
+ if (!args || args[0] == '\0')
+ return NULL;
+
+ c = args;
+
+ do {
+ if (strncmp(c, "driver=", 7) == 0) {
+ c += 7;
+ break;
+ }
+
+ c = strchr(c, ',');
+ if (c)
+ c++;
+ } while (c);
+
+ if (c)
+ str = strdup(c);
+ else
+ str = NULL;
+
+ return str;
+}
+
+static int
+vdev_probe_all_drivers(struct rte_vdev_device *dev)
{
+ const char *name;
+ char *drv_name;
struct rte_vdev_driver *driver;
+ int ret = 1;
- if (name == NULL)
- return -EINVAL;
+ drv_name = parse_driver_arg(rte_vdev_device_args(dev));
+ name = drv_name ? drv_name : rte_vdev_device_name(dev);
+
+ RTE_LOG(DEBUG, EAL, "Search driver %s to probe device %s\n", name,
+ rte_vdev_device_name(dev));
TAILQ_FOREACH(driver, &vdev_driver_list, next) {
/*
@@ -75,50 +124,235 @@ rte_eal_vdev_init(const char *name, const char *args)
* So use strncmp to compare.
*/
if (!strncmp(driver->driver.name, name,
- strlen(driver->driver.name)))
- return driver->probe(name, args);
+ strlen(driver->driver.name))) {
+ dev->device.driver = &driver->driver;
+ ret = driver->probe(dev);
+ if (ret)
+ dev->device.driver = NULL;
+ goto out;
+ }
}
/* Give new names precedence over aliases. */
TAILQ_FOREACH(driver, &vdev_driver_list, next) {
if (driver->driver.alias &&
!strncmp(driver->driver.alias, name,
- strlen(driver->driver.alias)))
- return driver->probe(name, args);
+ strlen(driver->driver.alias))) {
+ dev->device.driver = &driver->driver;
+ ret = driver->probe(dev);
+ if (ret)
+ dev->device.driver = NULL;
+ break;
+ }
+ }
+
+out:
+ free(drv_name);
+ return ret;
+}
+
+static struct rte_vdev_device *
+find_vdev(const char *name)
+{
+ struct rte_vdev_device *dev;
+
+ if (!name)
+ return NULL;
+
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ const char *devname = rte_vdev_device_name(dev);
+ if (!strncmp(devname, name, strlen(name)))
+ return dev;
}
- RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
- return -EINVAL;
+ return NULL;
+}
+
+static struct rte_devargs *
+alloc_devargs(const char *name, const char *args)
+{
+ struct rte_devargs *devargs;
+ int ret;
+
+ devargs = calloc(1, sizeof(*devargs));
+ if (!devargs)
+ return NULL;
+
+ devargs->type = RTE_DEVTYPE_VIRTUAL;
+ if (args)
+ devargs->args = strdup(args);
+
+ ret = snprintf(devargs->virt.drv_name,
+ sizeof(devargs->virt.drv_name), "%s", name);
+ if (ret < 0 || ret >= (int)sizeof(devargs->virt.drv_name)) {
+ free(devargs->args);
+ free(devargs);
+ return NULL;
+ }
+
+ return devargs;
}
int
-rte_eal_vdev_uninit(const char *name)
+rte_vdev_init(const char *name, const char *args)
{
- struct rte_vdev_driver *driver;
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
if (name == NULL)
return -EINVAL;
- TAILQ_FOREACH(driver, &vdev_driver_list, next) {
- /*
- * search a driver prefix in virtual device name.
- * For example, if the driver is pcap PMD, driver->name
- * will be "net_pcap", but "name" will be "net_pcapN".
- * So use strncmp to compare.
- */
- if (!strncmp(driver->driver.name, name,
- strlen(driver->driver.name)))
- return driver->remove(name);
+ dev = find_vdev(name);
+ if (dev)
+ return -EEXIST;
+
+ devargs = alloc_devargs(name, args);
+ if (!devargs)
+ return -ENOMEM;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto fail;
}
- /* Give new names precedence over aliases. */
- TAILQ_FOREACH(driver, &vdev_driver_list, next) {
- if (driver->driver.alias &&
- !strncmp(driver->driver.alias, name,
- strlen(driver->driver.alias)))
- return driver->remove(name);
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->virt.drv_name;
+
+ ret = vdev_probe_all_drivers(dev);
+ if (ret) {
+ if (ret > 0)
+ RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
+ goto fail;
}
- RTE_LOG(ERR, EAL, "no driver found for %s\n", name);
- return -EINVAL;
+ TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+ return 0;
+
+fail:
+ free(devargs->args);
+ free(devargs);
+ free(dev);
+ return ret;
+}
+
+static int
+vdev_remove_driver(struct rte_vdev_device *dev)
+{
+ const char *name = rte_vdev_device_name(dev);
+ const struct rte_vdev_driver *driver;
+
+ if (!dev->device.driver) {
+ RTE_LOG(DEBUG, EAL, "no driver attach to device %s\n", name);
+ return 1;
+ }
+
+ driver = container_of(dev->device.driver, const struct rte_vdev_driver,
+ driver);
+ return driver->remove(dev);
+}
+
+int
+rte_vdev_uninit(const char *name)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ dev = find_vdev(name);
+ if (!dev)
+ return -ENOENT;
+
+ devargs = dev->device.devargs;
+
+ ret = vdev_remove_driver(dev);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+
+ TAILQ_REMOVE(&devargs_list, devargs, next);
+
+ free(devargs->args);
+ free(devargs);
+ free(dev);
+ return 0;
+}
+
+static int
+vdev_scan(void)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+
+ /* for virtual devices we scan the devargs_list populated via cmdline */
+
+ TAILQ_FOREACH(devargs, &devargs_list, next) {
+
+ if (devargs->type != RTE_DEVTYPE_VIRTUAL)
+ continue;
+
+ dev = find_vdev(devargs->virt.drv_name);
+ if (dev)
+ continue;
+
+ dev = calloc(1, sizeof(*dev));
+ if (!dev)
+ return -1;
+
+ dev->device.devargs = devargs;
+ dev->device.numa_node = SOCKET_ID_ANY;
+ dev->device.name = devargs->virt.drv_name;
+
+ TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+ }
+
+ return 0;
+}
+
+static int
+vdev_probe(void)
+{
+ struct rte_vdev_device *dev;
+
+ /* call the init function for each virtual device */
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+
+ if (dev->device.driver)
+ continue;
+
+ if (vdev_probe_all_drivers(dev)) {
+ RTE_LOG(ERR, EAL, "failed to initialize %s device\n",
+ rte_vdev_device_name(dev));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static struct rte_bus rte_vdev_bus = {
+ .scan = vdev_scan,
+ .probe = vdev_probe,
+};
+
+RTE_INIT(rte_vdev_bus_register);
+
+static void rte_vdev_bus_register(void)
+{
+ static int registered;
+
+ if (registered)
+ return;
+
+ registered = 1;
+ rte_vdev_bus.name = RTE_STR(virtual);
+ rte_bus_register(&rte_vdev_bus);
}
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index 5f1367eb..7b7e8c88 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -78,7 +78,6 @@ struct internal_config {
volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
uintptr_t base_virtaddr; /**< base address to try and reserve memory from */
volatile int syslog_facility; /**< facility passed to openlog() */
- volatile uint32_t log_level; /**< default log level */
/** default interrupt mode for VFIO */
volatile enum rte_intr_mode vfio_intr_mode;
const char *hugefile_prefix; /**< the base filename of hugetlbfs files */
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 9e7d8f6b..6cacce07 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -34,6 +34,7 @@
#ifndef _EAL_PRIVATE_H_
#define _EAL_PRIVATE_H_
+#include <stdbool.h>
#include <stdio.h>
#include <rte_pci.h>
@@ -108,18 +109,43 @@ int rte_eal_timer_init(void);
*/
int rte_eal_log_init(const char *id, int facility);
+struct rte_pci_driver;
+struct rte_pci_device;
+
/**
- * Init the PCI infrastructure
- *
- * This function is private to EAL.
+ * Add a PCI device to the PCI Bus (append to PCI Device list). This function
+ * also updates the bus references of the PCI Device (and the generic device
+ * object embedded within.
*
- * @return
- * 0 on success, negative on error
+ * @param pci_dev
+ * PCI device to add
+ * @return void
*/
-int rte_eal_pci_init(void);
+void rte_pci_add_device(struct rte_pci_device *pci_dev);
-struct rte_pci_driver;
-struct rte_pci_device;
+/**
+ * Insert a PCI device in the PCI Bus at a particular location in the device
+ * list. It also updates the PCI Bus reference of the new devices to be
+ * inserted.
+ *
+ * @param exist_pci_dev
+ * Existing PCI device in PCI Bus
+ * @param new_pci_dev
+ * PCI device to be added before exist_pci_dev
+ * @return void
+ */
+void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
+ struct rte_pci_device *new_pci_dev);
+
+/**
+ * Remove a PCI device from the PCI Bus. This sets to NULL the bus references
+ * in the PCI device object as well as the generic device object.
+ *
+ * @param pci_device
+ * PCI device to be removed from PCI Bus
+ * @return void
+ */
+void rte_pci_remove_device(struct rte_pci_device *pci_device);
/**
* Update a pci device object by asking the kernel for the latest information.
@@ -301,4 +327,15 @@ int rte_eal_hugepage_init(void);
*/
int rte_eal_hugepage_attach(void);
+/**
+ * Returns true if the system is able to obtain
+ * physical addresses. Return false if using DMA
+ * addresses through an IOMMU.
+ *
+ * Drivers based on uio will not load unless physical
+ * addresses are obtainable. It is only possible to get
+ * physical addresses when running as a privileged user.
+ */
+bool rte_eal_using_phys_addrs(void);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic.h b/lib/librte_eal/common/include/arch/arm/rte_atomic.h
index 454a12b0..f3f3b6e3 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic.h
@@ -39,10 +39,4 @@
#include <rte_atomic_32.h>
#endif
-#define rte_smp_mb() rte_mb()
-
-#define rte_smp_wmb() rte_wmb()
-
-#define rte_smp_rmb() rte_rmb()
-
#endif /* _RTE_ATOMIC_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
index 9ae1e78b..14c04864 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
@@ -67,6 +67,18 @@ extern "C" {
*/
#define rte_rmb() __sync_synchronize()
+#define rte_smp_mb() rte_mb()
+
+#define rte_smp_wmb() rte_wmb()
+
+#define rte_smp_rmb() rte_rmb()
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
index 671caa76..dc3a0f3b 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_64.h
@@ -43,43 +43,26 @@ extern "C" {
#include "generic/rte_atomic.h"
-#define dmb(opt) do { asm volatile("dmb " #opt : : : "memory"); } while (0)
+#define dsb(opt) { asm volatile("dsb " #opt : : : "memory"); }
+#define dmb(opt) { asm volatile("dmb " #opt : : : "memory"); }
-/**
- * General memory barrier.
- *
- * Guarantees that the LOAD and STORE operations generated before the
- * barrier occur before the LOAD and STORE operations generated after.
- * This function is architecture dependent.
- */
-static inline void rte_mb(void)
-{
- dmb(ish);
-}
+#define rte_mb() dsb(sy)
-/**
- * Write memory barrier.
- *
- * Guarantees that the STORE operations generated before the barrier
- * occur before the STORE operations generated after.
- * This function is architecture dependent.
- */
-static inline void rte_wmb(void)
-{
- dmb(ishst);
-}
+#define rte_wmb() dsb(st)
-/**
- * Read memory barrier.
- *
- * Guarantees that the LOAD operations generated before the barrier
- * occur before the LOAD operations generated after.
- * This function is architecture dependent.
- */
-static inline void rte_rmb(void)
-{
- dmb(ishld);
-}
+#define rte_rmb() dsb(ld)
+
+#define rte_smp_mb() dmb(ish)
+
+#define rte_smp_wmb() dmb(ishst)
+
+#define rte_smp_rmb() dmb(ishld)
+
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/arch/tile/rte_cpuflags.h b/lib/librte_eal/common/include/arch/arm/rte_io.h
index 1849b520..9593b424 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_cpuflags.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_io.h
@@ -1,7 +1,8 @@
/*
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Cavium networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,26 +29,23 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#ifndef _RTE_CPUFLAGS_TILE_H_
-#define _RTE_CPUFLAGS_TILE_H_
+#ifndef _RTE_IO_ARM_H_
+#define _RTE_IO_ARM_H_
#ifdef __cplusplus
extern "C" {
#endif
-/**
- * Enumeration of all CPU features supported
- */
-enum rte_cpu_flag_t {
- RTE_CPUFLAG_NUMFLAGS /**< This should always be the last! */
-};
-
-#include "generic/rte_cpuflags.h"
+#ifdef RTE_ARCH_64
+#include "rte_io_64.h"
+#else
+#include "generic/rte_io.h"
+#endif
#ifdef __cplusplus
}
#endif
-#endif /* _RTE_CPUFLAGS_TILE_H_ */
+#endif /* _RTE_IO_ARM_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_io_64.h b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
new file mode 100644
index 00000000..0402125b
--- /dev/null
+++ b/lib/librte_eal/common/include/arch/arm/rte_io_64.h
@@ -0,0 +1,199 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_IO_ARM64_H_
+#define _RTE_IO_ARM64_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#define RTE_OVERRIDE_IO_H
+
+#include "generic/rte_io.h"
+#include "rte_atomic_64.h"
+
+static inline uint8_t __attribute__((always_inline))
+rte_read8_relaxed(const volatile void *addr)
+{
+ uint8_t val;
+
+ asm volatile(
+ "ldrb %w[val], [%x[addr]]"
+ : [val] "=r" (val)
+ : [addr] "r" (addr));
+ return val;
+}
+
+static inline uint16_t __attribute__((always_inline))
+rte_read16_relaxed(const volatile void *addr)
+{
+ uint16_t val;
+
+ asm volatile(
+ "ldrh %w[val], [%x[addr]]"
+ : [val] "=r" (val)
+ : [addr] "r" (addr));
+ return val;
+}
+
+static inline uint32_t __attribute__((always_inline))
+rte_read32_relaxed(const volatile void *addr)
+{
+ uint32_t val;
+
+ asm volatile(
+ "ldr %w[val], [%x[addr]]"
+ : [val] "=r" (val)
+ : [addr] "r" (addr));
+ return val;
+}
+
+static inline uint64_t __attribute__((always_inline))
+rte_read64_relaxed(const volatile void *addr)
+{
+ uint64_t val;
+
+ asm volatile(
+ "ldr %x[val], [%x[addr]]"
+ : [val] "=r" (val)
+ : [addr] "r" (addr));
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+rte_write8_relaxed(uint8_t val, volatile void *addr)
+{
+ asm volatile(
+ "strb %w[val], [%x[addr]]"
+ :
+ : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline void __attribute__((always_inline))
+rte_write16_relaxed(uint16_t val, volatile void *addr)
+{
+ asm volatile(
+ "strh %w[val], [%x[addr]]"
+ :
+ : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline void __attribute__((always_inline))
+rte_write32_relaxed(uint32_t val, volatile void *addr)
+{
+ asm volatile(
+ "str %w[val], [%x[addr]]"
+ :
+ : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline void __attribute__((always_inline))
+rte_write64_relaxed(uint64_t val, volatile void *addr)
+{
+ asm volatile(
+ "str %x[val], [%x[addr]]"
+ :
+ : [val] "r" (val), [addr] "r" (addr));
+}
+
+static inline uint8_t __attribute__((always_inline))
+rte_read8(const volatile void *addr)
+{
+ uint8_t val;
+ val = rte_read8_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint16_t __attribute__((always_inline))
+rte_read16(const volatile void *addr)
+{
+ uint16_t val;
+ val = rte_read16_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint32_t __attribute__((always_inline))
+rte_read32(const volatile void *addr)
+{
+ uint32_t val;
+ val = rte_read32_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint64_t __attribute__((always_inline))
+rte_read64(const volatile void *addr)
+{
+ uint64_t val;
+ val = rte_read64_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+rte_write8(uint8_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write8_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write16(uint16_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write16_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write32(uint32_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write32_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write64(uint64_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write64_relaxed(value, addr);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IO_ARM64_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_vect.h b/lib/librte_eal/common/include/arch/arm/rte_vect.h
index b86c2cf5..4107c998 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_vect.h
@@ -34,6 +34,7 @@
#define _RTE_VECT_ARM_H_
#include <stdint.h>
+#include "generic/rte_vect.h"
#include "arm_neon.h"
#ifdef __cplusplus
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
index fb4fccb4..150810cd 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
@@ -87,6 +87,12 @@ extern "C" {
#define rte_smp_rmb() rte_rmb()
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_wmb()
+
+#define rte_io_rmb() rte_rmb()
+
/*------------------------- 16 bit atomic operations -------------------------*/
/* To be compatible with Power7, use GCC built-in functions for 16 bit
* operations */
diff --git a/lib/librte_eal/common/arch/tile/rte_cpuflags.c b/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
index a2b6c51a..be192da7 100644
--- a/lib/librte_eal/common/arch/tile/rte_cpuflags.c
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_io.h
@@ -1,7 +1,8 @@
/*
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Cavium networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,20 +29,19 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#include "rte_cpuflags.h"
+#ifndef _RTE_IO_PPC_64_H_
+#define _RTE_IO_PPC_64_H_
-#include <errno.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
-const struct feature_entry rte_cpu_feature_table[] = {
-};
+#include "generic/rte_io.h"
-/*
- * Checks if a particular flag is available on current machine.
- */
-int
-rte_cpu_get_flag_enabled(__attribute__((unused)) enum rte_cpu_flag_t feature)
-{
- return -ENOENT;
+#ifdef __cplusplus
}
+#endif
+
+#endif /* _RTE_IO_PPC_64_H_ */
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h b/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
index 05209e52..99586e58 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_vect.h
@@ -34,6 +34,7 @@
#define _RTE_VECT_PPC_64_H_
#include <altivec.h>
+#include "generic/rte_vect.h"
#ifdef __cplusplus
extern "C" {
diff --git a/lib/librte_eal/common/include/arch/tile/rte_byteorder.h b/lib/librte_eal/common/include/arch/tile/rte_byteorder.h
deleted file mode 100644
index 7239e437..00000000
--- a/lib/librte_eal/common/include/arch/tile/rte_byteorder.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_BYTEORDER_TILE_H_
-#define _RTE_BYTEORDER_TILE_H_
-
-#ifndef RTE_FORCE_INTRINSICS
-# error Platform must be built with CONFIG_RTE_FORCE_INTRINSICS
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_byteorder.h"
-
-#if !(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))
-#define rte_bswap16(x) rte_constant_bswap16(x)
-#endif
-
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
-
-#define rte_cpu_to_le_16(x) (x)
-#define rte_cpu_to_le_32(x) (x)
-#define rte_cpu_to_le_64(x) (x)
-
-#define rte_cpu_to_be_16(x) rte_bswap16(x)
-#define rte_cpu_to_be_32(x) rte_bswap32(x)
-#define rte_cpu_to_be_64(x) rte_bswap64(x)
-
-#define rte_le_to_cpu_16(x) (x)
-#define rte_le_to_cpu_32(x) (x)
-#define rte_le_to_cpu_64(x) (x)
-
-#define rte_be_to_cpu_16(x) rte_bswap16(x)
-#define rte_be_to_cpu_32(x) rte_bswap32(x)
-#define rte_be_to_cpu_64(x) rte_bswap64(x)
-
-#else /* RTE_BIG_ENDIAN */
-
-#define rte_cpu_to_le_16(x) rte_bswap16(x)
-#define rte_cpu_to_le_32(x) rte_bswap32(x)
-#define rte_cpu_to_le_64(x) rte_bswap64(x)
-
-#define rte_cpu_to_be_16(x) (x)
-#define rte_cpu_to_be_32(x) (x)
-#define rte_cpu_to_be_64(x) (x)
-
-#define rte_le_to_cpu_16(x) rte_bswap16(x)
-#define rte_le_to_cpu_32(x) rte_bswap32(x)
-#define rte_le_to_cpu_64(x) rte_bswap64(x)
-
-#define rte_be_to_cpu_16(x) (x)
-#define rte_be_to_cpu_32(x) (x)
-#define rte_be_to_cpu_64(x) (x)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_BYTEORDER_TILE_H_ */
diff --git a/lib/librte_eal/common/include/arch/tile/rte_prefetch.h b/lib/librte_eal/common/include/arch/tile/rte_prefetch.h
deleted file mode 100644
index 7a1bb93e..00000000
--- a/lib/librte_eal/common/include/arch/tile/rte_prefetch.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_PREFETCH_TILE_H_
-#define _RTE_PREFETCH_TILE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_prefetch.h"
-
-static inline void rte_prefetch0(const volatile void *p)
-{
- __builtin_prefetch((const void *)(uintptr_t)p, 0, 3);
-}
-
-static inline void rte_prefetch1(const volatile void *p)
-{
- __builtin_prefetch((const void *)(uintptr_t)p, 0, 2);
-}
-
-static inline void rte_prefetch2(const volatile void *p)
-{
- __builtin_prefetch((const void *)(uintptr_t)p, 0, 1);
-}
-
-static inline void rte_prefetch_non_temporal(const volatile void *p)
-{
- /* non-temporal version not available, fallback to rte_prefetch0 */
- rte_prefetch0(p);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_PREFETCH_TILE_H_ */
diff --git a/lib/librte_eal/common/include/arch/tile/rte_rwlock.h b/lib/librte_eal/common/include/arch/tile/rte_rwlock.h
deleted file mode 100644
index 8f67a190..00000000
--- a/lib/librte_eal/common/include/arch/tile/rte_rwlock.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#ifndef _RTE_RWLOCK_TILE_H_
-#define _RTE_RWLOCK_TILE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include "generic/rte_rwlock.h"
-
-static inline void
-rte_rwlock_read_lock_tm(rte_rwlock_t *rwl)
-{
- rte_rwlock_read_lock(rwl);
-}
-
-static inline void
-rte_rwlock_read_unlock_tm(rte_rwlock_t *rwl)
-{
- rte_rwlock_read_unlock(rwl);
-}
-
-static inline void
-rte_rwlock_write_lock_tm(rte_rwlock_t *rwl)
-{
- rte_rwlock_write_lock(rwl);
-}
-
-static inline void
-rte_rwlock_write_unlock_tm(rte_rwlock_t *rwl)
-{
- rte_rwlock_write_unlock(rwl);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_RWLOCK_TILE_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
index 00b1cdf5..4eac6663 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
@@ -61,6 +61,12 @@ extern "C" {
#define rte_smp_rmb() rte_compiler_barrier()
+#define rte_io_mb() rte_mb()
+
+#define rte_io_wmb() rte_compiler_barrier()
+
+#define rte_io_rmb() rte_compiler_barrier()
+
/*------------------------- 16 bit atomic operations -------------------------*/
#ifndef RTE_FORCE_INTRINSICS
diff --git a/lib/librte_eal/common/include/arch/tile/rte_cycles.h b/lib/librte_eal/common/include/arch/x86/rte_io.h
index 0b2200a3..c8d14043 100644
--- a/lib/librte_eal/common/include/arch/tile/rte_cycles.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_io.h
@@ -1,7 +1,8 @@
/*
* BSD LICENSE
*
- * Copyright (C) EZchip Semiconductor Ltd. 2015.
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -13,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
+ * * Neither the name of Cavium networks nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -28,43 +29,19 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ */
-#ifndef _RTE_CYCLES_TILE_H_
-#define _RTE_CYCLES_TILE_H_
+#ifndef _RTE_IO_X86_H_
+#define _RTE_IO_X86_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <arch/cycle.h>
-
-#include "generic/rte_cycles.h"
-
-/**
- * Read the time base register.
- *
- * @return
- * The time base for this lcore.
- */
-static inline uint64_t
-rte_rdtsc(void)
-{
- return get_cycle_count();
-}
-
-static inline uint64_t
-rte_rdtsc_precise(void)
-{
- rte_mb();
- return rte_rdtsc();
-}
-
-static inline uint64_t
-rte_get_tsc_cycles(void) { return rte_rdtsc(); }
+#include "generic/rte_io.h"
#ifdef __cplusplus
}
#endif
-#endif /* _RTE_CYCLES_TILE_H_ */
+#endif /* _RTE_IO_X86_H_ */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index b3bfc235..b9785e85 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -69,6 +69,8 @@ rte_memcpy(void *dst, const void *src, size_t n) __attribute__((always_inline));
#ifdef RTE_MACHINE_CPUFLAG_AVX512F
+#define ALIGNMENT_MASK 0x3F
+
/**
* AVX512 implementation below
*/
@@ -189,7 +191,7 @@ rte_mov512blocks(uint8_t *dst, const uint8_t *src, size_t n)
}
static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy_generic(void *dst, const void *src, size_t n)
{
uintptr_t dstu = (uintptr_t)dst;
uintptr_t srcu = (uintptr_t)src;
@@ -308,6 +310,8 @@ COPY_BLOCK_128_BACK63:
#elif defined RTE_MACHINE_CPUFLAG_AVX2
+#define ALIGNMENT_MASK 0x1F
+
/**
* AVX2 implementation below
*/
@@ -387,7 +391,7 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, size_t n)
}
static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy_generic(void *dst, const void *src, size_t n)
{
uintptr_t dstu = (uintptr_t)dst;
uintptr_t srcu = (uintptr_t)src;
@@ -499,6 +503,8 @@ COPY_BLOCK_128_BACK31:
#else /* RTE_MACHINE_CPUFLAG */
+#define ALIGNMENT_MASK 0x0F
+
/**
* SSE & AVX implementation below
*/
@@ -677,7 +683,7 @@ __extension__ ({ \
})
static inline void *
-rte_memcpy(void *dst, const void *src, size_t n)
+rte_memcpy_generic(void *dst, const void *src, size_t n)
{
__m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
uintptr_t dstu = (uintptr_t)dst;
@@ -821,6 +827,75 @@ COPY_BLOCK_64_BACK15:
#endif /* RTE_MACHINE_CPUFLAG */
+static inline void *
+rte_memcpy_aligned(void *dst, const void *src, size_t n)
+{
+ void *ret = dst;
+
+ /* Copy size <= 16 bytes */
+ if (n < 16) {
+ if (n & 0x01) {
+ *(uint8_t *)dst = *(const uint8_t *)src;
+ src = (const uint8_t *)src + 1;
+ dst = (uint8_t *)dst + 1;
+ }
+ if (n & 0x02) {
+ *(uint16_t *)dst = *(const uint16_t *)src;
+ src = (const uint16_t *)src + 1;
+ dst = (uint16_t *)dst + 1;
+ }
+ if (n & 0x04) {
+ *(uint32_t *)dst = *(const uint32_t *)src;
+ src = (const uint32_t *)src + 1;
+ dst = (uint32_t *)dst + 1;
+ }
+ if (n & 0x08)
+ *(uint64_t *)dst = *(const uint64_t *)src;
+
+ return ret;
+ }
+
+ /* Copy 16 <= size <= 32 bytes */
+ if (n <= 32) {
+ rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov16((uint8_t *)dst - 16 + n,
+ (const uint8_t *)src - 16 + n);
+
+ return ret;
+ }
+
+ /* Copy 32 < size <= 64 bytes */
+ if (n <= 64) {
+ rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+ rte_mov32((uint8_t *)dst - 32 + n,
+ (const uint8_t *)src - 32 + n);
+
+ return ret;
+ }
+
+ /* Copy 64 bytes blocks */
+ for (; n >= 64; n -= 64) {
+ rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+ dst = (uint8_t *)dst + 64;
+ src = (const uint8_t *)src + 64;
+ }
+
+ /* Copy whatever left */
+ rte_mov64((uint8_t *)dst - 64 + n,
+ (const uint8_t *)src - 64 + n);
+
+ return ret;
+}
+
+static inline void *
+rte_memcpy(void *dst, const void *src, size_t n)
+{
+ if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK))
+ return rte_memcpy_aligned(dst, src, n);
+ else
+ return rte_memcpy_generic(dst, src, n);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/arch/x86/rte_vect.h b/lib/librte_eal/common/include/arch/x86/rte_vect.h
index 77f2e253..1b4b85dd 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_vect.h
@@ -31,8 +31,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _RTE_VECT_H_
-#define _RTE_VECT_H_
+#ifndef _RTE_VECT_X86_H_
+#define _RTE_VECT_X86_H_
/**
* @file
@@ -41,6 +41,7 @@
*/
#include <stdint.h>
+#include "generic/rte_vect.h"
#if (defined(__ICC) || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
@@ -133,4 +134,4 @@ __extension__ ({ \
}
#endif
-#endif /* _RTE_VECT_H_ */
+#endif /* _RTE_VECT_X86_H_ */
diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h
index 43a704ec..7b81705b 100644
--- a/lib/librte_eal/common/include/generic/rte_atomic.h
+++ b/lib/librte_eal/common/include/generic/rte_atomic.h
@@ -100,6 +100,33 @@ static inline void rte_smp_wmb(void);
*/
static inline void rte_smp_rmb(void);
+/**
+ * General memory barrier for I/O device
+ *
+ * Guarantees that the LOAD and STORE operations that precede the
+ * rte_io_mb() call are visible to I/O device or CPU before the
+ * LOAD and STORE operations that follow it.
+ */
+static inline void rte_io_mb(void);
+
+/**
+ * Write memory barrier for I/O device
+ *
+ * Guarantees that the STORE operations that precede the
+ * rte_io_wmb() call are visible to I/O device before the STORE
+ * operations that follow it.
+ */
+static inline void rte_io_wmb(void);
+
+/**
+ * Read memory barrier for IO device
+ *
+ * Guarantees that the LOAD operations on I/O device that precede the
+ * rte_io_rmb() call are visible to CPU before the LOAD
+ * operations that follow it.
+ */
+static inline void rte_io_rmb(void);
+
#endif /* __DOXYGEN__ */
/**
diff --git a/lib/librte_eal/common/include/generic/rte_cpuflags.h b/lib/librte_eal/common/include/generic/rte_cpuflags.h
index 71321f32..c1c5551f 100644
--- a/lib/librte_eal/common/include/generic/rte_cpuflags.h
+++ b/lib/librte_eal/common/include/generic/rte_cpuflags.h
@@ -39,6 +39,7 @@
* Architecture specific API to determine available CPU features at runtime.
*/
+#include "rte_common.h"
#include <errno.h>
/**
@@ -79,7 +80,17 @@ rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature);
* that were specified at compile time. It is called automatically within the
* EAL, so does not need to be used by applications.
*/
+__rte_deprecated
void
rte_cpu_check_supported(void);
+/**
+ * This function checks that the currently used CPU supports the CPU features
+ * that were specified at compile time. It is called automatically within the
+ * EAL, so does not need to be used by applications. This version returns a
+ * result so that decisions may be made (for instance, graceful shutdowns).
+ */
+int
+rte_cpu_is_supported(void);
+
#endif /* _RTE_CPUFLAGS_H_ */
diff --git a/lib/librte_eal/common/include/generic/rte_cycles.h b/lib/librte_eal/common/include/generic/rte_cycles.h
index 00103ca9..0e645c2c 100644
--- a/lib/librte_eal/common/include/generic/rte_cycles.h
+++ b/lib/librte_eal/common/include/generic/rte_cycles.h
@@ -150,15 +150,17 @@ int rte_eal_hpet_init(int make_default);
static inline uint64_t
rte_get_timer_cycles(void)
{
+#ifdef RTE_LIBEAL_USE_HPET
switch(eal_timer_source) {
case EAL_TIMER_TSC:
+#endif
return rte_get_tsc_cycles();
- case EAL_TIMER_HPET:
#ifdef RTE_LIBEAL_USE_HPET
+ case EAL_TIMER_HPET:
return rte_get_hpet_cycles();
-#endif
default: rte_panic("Invalid timer source specified\n");
}
+#endif
}
/**
@@ -170,15 +172,17 @@ rte_get_timer_cycles(void)
static inline uint64_t
rte_get_timer_hz(void)
{
+#ifdef RTE_LIBEAL_USE_HPET
switch(eal_timer_source) {
case EAL_TIMER_TSC:
+#endif
return rte_get_tsc_hz();
- case EAL_TIMER_HPET:
#ifdef RTE_LIBEAL_USE_HPET
+ case EAL_TIMER_HPET:
return rte_get_hpet_hz();
-#endif
default: rte_panic("Invalid timer source specified\n");
}
+#endif
}
/**
* Wait at least us microseconds.
diff --git a/lib/librte_eal/common/include/generic/rte_io.h b/lib/librte_eal/common/include/generic/rte_io.h
new file mode 100644
index 00000000..d82ee695
--- /dev/null
+++ b/lib/librte_eal/common/include/generic/rte_io.h
@@ -0,0 +1,381 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_IO_H_
+#define _RTE_IO_H_
+
+#include <rte_atomic.h>
+
+/**
+ * @file
+ * I/O device memory operations
+ *
+ * This file defines the generic API for I/O device memory read/write operations
+ */
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+
+#ifdef __DOXYGEN__
+
+/**
+ * Read a 8-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint8_t
+rte_read8_relaxed(const volatile void *addr);
+
+/**
+ * Read a 16-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint16_t
+rte_read16_relaxed(const volatile void *addr);
+
+/**
+ * Read a 32-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint32_t
+rte_read32_relaxed(const volatile void *addr);
+
+/**
+ * Read a 64-bit value from I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint64_t
+rte_read64_relaxed(const volatile void *addr);
+
+/**
+ * Write a 8-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+
+static inline void
+rte_write8_relaxed(uint8_t value, volatile void *addr);
+
+/**
+ * Write a 16-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write16_relaxed(uint16_t value, volatile void *addr);
+
+/**
+ * Write a 32-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write32_relaxed(uint32_t value, volatile void *addr);
+
+/**
+ * Write a 64-bit value to I/O device memory address *addr*.
+ *
+ * The relaxed version does not have additional I/O memory barrier, useful in
+ * accessing the device registers of integrated controllers which implicitly
+ * strongly ordered with respect to memory access.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write64_relaxed(uint64_t value, volatile void *addr);
+
+/**
+ * Read a 8-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint8_t
+rte_read8(const volatile void *addr);
+
+/**
+ * Read a 16-bit value from I/O device memory address *addr*.
+ *
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint16_t
+rte_read16(const volatile void *addr);
+
+/**
+ * Read a 32-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint32_t
+rte_read32(const volatile void *addr);
+
+/**
+ * Read a 64-bit value from I/O device memory address *addr*.
+ *
+ * @param addr
+ * I/O memory address to read the value from
+ * @return
+ * read value
+ */
+static inline uint64_t
+rte_read64(const volatile void *addr);
+
+/**
+ * Write a 8-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+
+static inline void
+rte_write8(uint8_t value, volatile void *addr);
+
+/**
+ * Write a 16-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write16(uint16_t value, volatile void *addr);
+
+/**
+ * Write a 32-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write32(uint32_t value, volatile void *addr);
+
+/**
+ * Write a 64-bit value to I/O device memory address *addr*.
+ *
+ * @param value
+ * Value to write
+ * @param addr
+ * I/O memory address to write the value to
+ */
+static inline void
+rte_write64(uint64_t value, volatile void *addr);
+
+#endif /* __DOXYGEN__ */
+
+#ifndef RTE_OVERRIDE_IO_H
+
+static inline uint8_t __attribute__((always_inline))
+rte_read8_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint8_t *)addr;
+}
+
+static inline uint16_t __attribute__((always_inline))
+rte_read16_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint16_t *)addr;
+}
+
+static inline uint32_t __attribute__((always_inline))
+rte_read32_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint32_t *)addr;
+}
+
+static inline uint64_t __attribute__((always_inline))
+rte_read64_relaxed(const volatile void *addr)
+{
+ return *(const volatile uint64_t *)addr;
+}
+
+static inline void __attribute__((always_inline))
+rte_write8_relaxed(uint8_t value, volatile void *addr)
+{
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void __attribute__((always_inline))
+rte_write16_relaxed(uint16_t value, volatile void *addr)
+{
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void __attribute__((always_inline))
+rte_write32_relaxed(uint32_t value, volatile void *addr)
+{
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void __attribute__((always_inline))
+rte_write64_relaxed(uint64_t value, volatile void *addr)
+{
+ *(volatile uint64_t *)addr = value;
+}
+
+static inline uint8_t __attribute__((always_inline))
+rte_read8(const volatile void *addr)
+{
+ uint8_t val;
+ val = rte_read8_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint16_t __attribute__((always_inline))
+rte_read16(const volatile void *addr)
+{
+ uint16_t val;
+ val = rte_read16_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint32_t __attribute__((always_inline))
+rte_read32(const volatile void *addr)
+{
+ uint32_t val;
+ val = rte_read32_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline uint64_t __attribute__((always_inline))
+rte_read64(const volatile void *addr)
+{
+ uint64_t val;
+ val = rte_read64_relaxed(addr);
+ rte_io_rmb();
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+rte_write8(uint8_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write8_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write16(uint16_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write16_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write32(uint32_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write32_relaxed(value, addr);
+}
+
+static inline void __attribute__((always_inline))
+rte_write64(uint64_t value, volatile void *addr)
+{
+ rte_io_wmb();
+ rte_write64_relaxed(value, addr);
+}
+
+#endif /* RTE_OVERRIDE_IO_H */
+
+#endif /* _RTE_IO_H_ */
diff --git a/lib/librte_eal/common/include/generic/rte_vect.h b/lib/librte_eal/common/include/generic/rte_vect.h
new file mode 100644
index 00000000..600ee9f3
--- /dev/null
+++ b/lib/librte_eal/common/include/generic/rte_vect.h
@@ -0,0 +1,214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_VECT_H_
+#define _RTE_VECT_H_
+
+/**
+ * @file
+ * SIMD vector types
+ *
+ * This file defines types to use vector instructions with generic C code.
+ */
+
+#include <stdint.h>
+
+/* Unsigned vector types */
+
+/**
+ * 64 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v64u8_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint8_t rte_v64u8_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v64u16_t){ a0, a1, a2, a3 }
+ */
+typedef uint16_t rte_v64u16_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v64u32_t){ a0, a1 }
+ */
+typedef uint32_t rte_v64u32_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 128 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v128u8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef uint8_t rte_v128u8_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v128u16_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint16_t rte_v128u16_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v128u32_t){ a0, a1, a2, a3, a4 }
+ */
+typedef uint32_t rte_v128u32_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with unsigned 64 bits elements.
+ *
+ * a = (rte_v128u64_t){ a0, a1 }
+ */
+typedef uint64_t rte_v128u64_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 256 bits vector size to use with unsigned 8 bits elements.
+ *
+ * a = (rte_v256u8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15,
+ * a16, a17, a18, a19, a20, a21, a22, a23,
+ * a24, a25, a26, a27, a28, a29, a30, a31 }
+ */
+typedef uint8_t rte_v256u8_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 16 bits elements.
+ *
+ * a = (rte_v256u16_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef uint16_t rte_v256u16_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 32 bits elements.
+ *
+ * a = (rte_v256u32_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef uint32_t rte_v256u32_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with unsigned 64 bits elements.
+ *
+ * a = (rte_v256u64_t){ a0, a1, a2, a3 }
+ */
+typedef uint64_t rte_v256u64_t __attribute__((vector_size(32), aligned(32)));
+
+
+/* Signed vector types */
+
+/**
+ * 64 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v64s8_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int8_t rte_v64s8_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v64s16_t){ a0, a1, a2, a3 }
+ */
+typedef int16_t rte_v64s16_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 64 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v64s32_t){ a0, a1 }
+ */
+typedef int32_t rte_v64s32_t __attribute__((vector_size(8), aligned(8)));
+
+/**
+ * 128 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v128s8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef int8_t rte_v128s8_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v128s16_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int16_t rte_v128s16_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v128s32_t){ a0, a1, a2, a3 }
+ */
+typedef int32_t rte_v128s32_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 128 bits vector size to use with 64 bits elements.
+ *
+ * a = (rte_v128s64_t){ a1, a2 }
+ */
+typedef int64_t rte_v128s64_t __attribute__((vector_size(16), aligned(16)));
+
+/**
+ * 256 bits vector size to use with 8 bits elements.
+ *
+ * a = (rte_v256s8_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15,
+ * a16, a17, a18, a19, a20, a21, a22, a23,
+ * a24, a25, a26, a27, a28, a29, a30, a31 }
+ */
+typedef int8_t rte_v256s8_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 16 bits elements.
+ *
+ * a = (rte_v256s16_t){ a00, a01, a02, a03, a04, a05, a06, a07,
+ * a08, a09, a10, a11, a12, a13, a14, a15 }
+ */
+typedef int16_t rte_v256s16_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 32 bits elements.
+ *
+ * a = (rte_v256s32_t){ a0, a1, a2, a3, a4, a5, a6, a7 }
+ */
+typedef int32_t rte_v256s32_t __attribute__((vector_size(32), aligned(32)));
+
+/**
+ * 256 bits vector size to use with 64 bits elements.
+ *
+ * a = (rte_v256s64_t){ a0, a1, a2, a3 }
+ */
+typedef int64_t rte_v256s64_t __attribute__((vector_size(32), aligned(32)));
+
+#endif /* _RTE_VECT_H_ */
diff --git a/lib/librte_eal/common/include/rte_bus.h b/lib/librte_eal/common/include/rte_bus.h
new file mode 100644
index 00000000..7c369692
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_bus.h
@@ -0,0 +1,158 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 NXP
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BUS_H_
+#define _RTE_BUS_H_
+
+/**
+ * @file
+ *
+ * DPDK device bus interface
+ *
+ * This file exposes API and interfaces for bus abstraction
+ * over the devices and drivers in EAL.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_dev.h>
+
+/** Double linked list of buses */
+TAILQ_HEAD(rte_bus_list, rte_bus);
+
+/**
+ * Bus specific scan for devices attached on the bus.
+ * For each bus object, the scan would be reponsible for finding devices and
+ * adding them to its private device list.
+ *
+ * A bus should mandatorily implement this method.
+ *
+ * @return
+ * 0 for successful scan
+ * <0 for unsuccessful scan with error value
+ */
+typedef int (*rte_bus_scan_t)(void);
+
+/**
+ * Implementation specific probe function which is responsible for linking
+ * devices on that bus with applicable drivers.
+ *
+ * This is called while iterating over each registered bus.
+ *
+ * @return
+ * 0 for successful probe
+ * !0 for any error while probing
+ */
+typedef int (*rte_bus_probe_t)(void);
+
+/**
+ * A structure describing a generic bus.
+ */
+struct rte_bus {
+ TAILQ_ENTRY(rte_bus) next; /**< Next bus object in linked list */
+ const char *name; /**< Name of the bus */
+ rte_bus_scan_t scan; /**< Scan for devices attached to bus */
+ rte_bus_probe_t probe; /**< Probe devices on bus */
+};
+
+/**
+ * Register a Bus handler.
+ *
+ * @param bus
+ * A pointer to a rte_bus structure describing the bus
+ * to be registered.
+ */
+void rte_bus_register(struct rte_bus *bus);
+
+/**
+ * Unregister a Bus handler.
+ *
+ * @param bus
+ * A pointer to a rte_bus structure describing the bus
+ * to be unregistered.
+ */
+void rte_bus_unregister(struct rte_bus *bus);
+
+/**
+ * Scan all the buses.
+ *
+ * @return
+ * 0 in case of success in scanning all buses
+ * !0 in case of failure to scan
+ */
+int rte_bus_scan(void);
+
+/**
+ * For each device on the buses, perform a driver 'match' and call the
+ * driver-specific probe for device initialization.
+ *
+ * @return
+ * 0 for successful match/probe
+ * !0 otherwise
+ */
+int rte_bus_probe(void);
+
+/**
+ * Dump information of all the buses registered with EAL.
+ *
+ * @param f
+ * A valid and open output stream handle
+ *
+ * @return
+ * 0 in case of success
+ * !0 in case there is error in opening the output stream
+ */
+void rte_bus_dump(FILE *f);
+
+/**
+ * Helper for Bus registration.
+ * The constructor has higher priority than PMD constructors.
+ */
+#define RTE_REGISTER_BUS(nm, bus) \
+static void __attribute__((constructor(101), used)) businitfn_ ##nm(void) \
+{\
+ (bus).name = RTE_STR(nm);\
+ rte_bus_register(&bus); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BUS_H */
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index db5ac91c..e057f6e2 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -331,6 +331,29 @@ rte_bsf32(uint32_t v)
#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
#endif
+/**
+ * Return pointer to the wrapping struct instance.
+ *
+ * Example:
+ *
+ * struct wrapper {
+ * ...
+ * struct child c;
+ * ...
+ * };
+ *
+ * struct child *x = obtain(...);
+ * struct wrapper *w = container_of(x, struct wrapper, c);
+ */
+#ifndef container_of
+#define container_of(ptr, type, member) __extension__ ({ \
+ const typeof(((type *)0)->member) *_ptr = (ptr); \
+ __attribute__((unused)) type *_target_ptr = \
+ (type *)(ptr); \
+ (type *)(((uintptr_t)_ptr) - offsetof(type, member)); \
+ })
+#endif
+
#define _RTE_STR(x) #x
/** Take a macro value and get a string version of it */
#define RTE_STR(x) _RTE_STR(x)
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index 8840380d..de20c063 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -49,6 +49,7 @@ extern "C" {
#include <stdio.h>
#include <sys/queue.h>
+#include <rte_config.h>
#include <rte_log.h>
__attribute__((format(printf, 2, 0)))
@@ -70,6 +71,19 @@ rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", func_name, buffer);
}
+/*
+ * Enable RTE_PMD_DEBUG_TRACE() when at least one component relying on the
+ * RTE_*_RET() macros defined below is compiled in debug mode.
+ */
+#if defined(RTE_LIBRTE_ETHDEV_DEBUG) || \
+ defined(RTE_LIBRTE_CRYPTODEV_DEBUG) || \
+ defined(RTE_LIBRTE_EVENTDEV_DEBUG)
+#define RTE_PMD_DEBUG_TRACE(...) \
+ rte_pmd_debug_trace(__func__, __VA_ARGS__)
+#else
+#define RTE_PMD_DEBUG_TRACE(...) (void)0
+#endif
+
/* Macros for checking for restricting functions to primary instance only */
#define RTE_PROC_PRIMARY_OR_ERR_RET(retval) do { \
if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
@@ -109,40 +123,6 @@ struct rte_mem_resource {
void *addr; /**< Virtual address, NULL when not mapped. */
};
-/** Double linked list of device drivers. */
-TAILQ_HEAD(rte_driver_list, rte_driver);
-/** Double linked list of devices. */
-TAILQ_HEAD(rte_device_list, rte_device);
-
-/* Forward declaration */
-struct rte_driver;
-
-/**
- * A structure describing a generic device.
- */
-struct rte_device {
- TAILQ_ENTRY(rte_device) next; /**< Next device */
- struct rte_driver *driver; /**< Associated driver */
- int numa_node; /**< NUMA node connection */
- struct rte_devargs *devargs; /**< Device user arguments */
-};
-
-/**
- * Insert a device detected by a bus scanning.
- *
- * @param dev
- * A pointer to a rte_device structure describing the detected device.
- */
-void rte_eal_device_insert(struct rte_device *dev);
-
-/**
- * Remove a device (e.g. when being unplugged).
- *
- * @param dev
- * A pointer to a rte_device structure describing the device to be removed.
- */
-void rte_eal_device_remove(struct rte_device *dev);
-
/**
* A structure describing a device driver.
*/
@@ -153,27 +133,15 @@ struct rte_driver {
};
/**
- * Register a device driver.
- *
- * @param driver
- * A pointer to a rte_dev structure describing the driver
- * to be registered.
- */
-void rte_eal_driver_register(struct rte_driver *driver);
-
-/**
- * Unregister a device driver.
- *
- * @param driver
- * A pointer to a rte_dev structure describing the driver
- * to be unregistered.
- */
-void rte_eal_driver_unregister(struct rte_driver *driver);
-
-/**
- * Initalize all the registered drivers in this process
+ * A structure describing a generic device.
*/
-int rte_eal_dev_init(void);
+struct rte_device {
+ TAILQ_ENTRY(rte_device) next; /**< Next device */
+ const char *name; /**< Device name */
+ const struct rte_driver *driver;/**< Associated driver */
+ int numa_node; /**< NUMA node connection */
+ struct rte_devargs *devargs; /**< Device user arguments */
+};
/**
* Initialize a driver specified by name.
@@ -185,7 +153,7 @@ int rte_eal_dev_init(void);
* @return
* 0 on success, negative on error
*/
-int rte_eal_vdev_init(const char *name, const char *args);
+int rte_vdev_init(const char *name, const char *args);
/**
* Uninitalize a driver specified by name.
@@ -195,7 +163,7 @@ int rte_eal_vdev_init(const char *name, const char *args);
* @return
* 0 on success, negative on error
*/
-int rte_eal_vdev_uninit(const char *name);
+int rte_vdev_uninit(const char *name);
/**
* Attach a device to a registered driver.
@@ -239,6 +207,31 @@ RTE_STR(table)
static const char DRV_EXP_TAG(name, param_string_export)[] \
__attribute__((used)) = str
+/**
+ * Advertise the list of kernel modules required to run this driver
+ *
+ * This string lists the kernel modules required for the devices
+ * associated to a PMD. The format of each line of the string is:
+ * "<device-pattern> <kmod-expression>".
+ *
+ * The possible formats for the device pattern are:
+ * "*" all devices supported by this driver
+ * "pci:*" all PCI devices supported by this driver
+ * "pci:v8086:d*:sv*:sd*" all PCI devices supported by this driver
+ * whose vendor id is 0x8086.
+ *
+ * The format of the kernel modules list is a parenthesed expression
+ * containing logical-and (&) and logical-or (|).
+ *
+ * The device pattern and the kmod expression are separated by a space.
+ *
+ * Example:
+ * - "* igb_uio | uio_pci_generic | vfio"
+ */
+#define RTE_PMD_REGISTER_KMOD_DEP(name, str) \
+static const char DRV_EXP_TAG(name, kmod_dep_export)[] \
+__attribute__((used)) = str
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index d150b9dd..abf020bf 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -146,15 +146,45 @@ int rte_eal_iopl_init(void);
* This behavior may change in the future.
*
* @param argc
- * The argc argument that was given to the main() function.
+ * A non-negative value. If it is greater than 0, the array members
+ * for argv[0] through argv[argc] (non-inclusive) shall contain pointers
+ * to strings.
* @param argv
- * The argv argument that was given to the main() function.
+ * An array of strings. The contents of the array, as well as the strings
+ * which are pointed to by the array, may be modified by this function.
* @return
* - On success, the number of parsed arguments, which is greater or
* equal to zero. After the call to rte_eal_init(),
- * all arguments argv[x] with x < ret may be modified and should
- * not be accessed by the application.
- * - On failure, a negative error value.
+ * all arguments argv[x] with x < ret may have been modified by this
+ * function call and should not be further interpreted by the
+ * application. The EAL does not take any ownership of the memory used
+ * for either the argv array, or its members.
+ * - On failure, -1 and rte_errno is set to a value indicating the cause
+ * for failure. In some instances, the application will need to be
+ * restarted as part of clearing the issue.
+ *
+ * Error codes returned via rte_errno:
+ * EACCES indicates a permissions issue.
+ *
+ * EAGAIN indicates either a bus or system resource was not available,
+ * setup may be attempted again.
+ *
+ * EALREADY indicates that the rte_eal_init function has already been
+ * called, and cannot be called again.
+ *
+ * EFAULT indicates the tailq configuration name was not found in
+ * memory configuration.
+ *
+ * EINVAL indicates invalid parameters were passed as argv/argc.
+ *
+ * ENOMEM indicates failure likely caused by an out-of-memory condition.
+ *
+ * ENODEV indicates memory setup issues.
+ *
+ * ENOTSUP indicates that the EAL cannot initialize on this system.
+ *
+ * EPROTO indicates that the PCI bus is either not present, or is not
+ * readable by the eal.
*/
int rte_eal_init(int argc, char **argv);
diff --git a/lib/librte_eal/common/include/rte_interrupts.h b/lib/librte_eal/common/include/rte_interrupts.h
index fd3c6eff..5d06ed79 100644
--- a/lib/librte_eal/common/include/rte_interrupts.h
+++ b/lib/librte_eal/common/include/rte_interrupts.h
@@ -51,8 +51,7 @@ extern "C" {
struct rte_intr_handle;
/** Function to be registered for the specific interrupt */
-typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle,
- void *cb_arg);
+typedef void (*rte_intr_callback_fn)(void *cb_arg);
#include <exec-env/rte_interrupts.h>
@@ -70,7 +69,7 @@ typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle,
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+int rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
/**
@@ -88,7 +87,7 @@ int rte_intr_callback_register(struct rte_intr_handle *intr_handle,
* - On success, return the number of callback entities removed.
* - On failure, a negative value.
*/
-int rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+int rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
/**
@@ -101,7 +100,7 @@ int rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_intr_enable(struct rte_intr_handle *intr_handle);
+int rte_intr_enable(const struct rte_intr_handle *intr_handle);
/**
* It disables the interrupt for the specified handle.
@@ -113,7 +112,7 @@ int rte_intr_enable(struct rte_intr_handle *intr_handle);
* - On success, zero.
* - On failure, a negative value.
*/
-int rte_intr_disable(struct rte_intr_handle *intr_handle);
+int rte_intr_disable(const struct rte_intr_handle *intr_handle);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 29f7d192..34191385 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,45 +50,56 @@ extern "C" {
#include <stdio.h>
#include <stdarg.h>
+#include <rte_common.h>
+
+struct rte_log_dynamic_type;
+
/** The rte_log structure. */
struct rte_logs {
uint32_t type; /**< Bitfield with enabled logs. */
uint32_t level; /**< Log level. */
FILE *file; /**< Output file set by rte_openlog_stream, or NULL. */
+ size_t dynamic_types_len;
+ struct rte_log_dynamic_type *dynamic_types;
};
/** Global log informations */
extern struct rte_logs rte_logs;
/* SDK log type */
-#define RTE_LOGTYPE_EAL 0x00000001 /**< Log related to eal. */
-#define RTE_LOGTYPE_MALLOC 0x00000002 /**< Log related to malloc. */
-#define RTE_LOGTYPE_RING 0x00000004 /**< Log related to ring. */
-#define RTE_LOGTYPE_MEMPOOL 0x00000008 /**< Log related to mempool. */
-#define RTE_LOGTYPE_TIMER 0x00000010 /**< Log related to timers. */
-#define RTE_LOGTYPE_PMD 0x00000020 /**< Log related to poll mode driver. */
-#define RTE_LOGTYPE_HASH 0x00000040 /**< Log related to hash table. */
-#define RTE_LOGTYPE_LPM 0x00000080 /**< Log related to LPM. */
-#define RTE_LOGTYPE_KNI 0x00000100 /**< Log related to KNI. */
-#define RTE_LOGTYPE_ACL 0x00000200 /**< Log related to ACL. */
-#define RTE_LOGTYPE_POWER 0x00000400 /**< Log related to power. */
-#define RTE_LOGTYPE_METER 0x00000800 /**< Log related to QoS meter. */
-#define RTE_LOGTYPE_SCHED 0x00001000 /**< Log related to QoS port scheduler. */
-#define RTE_LOGTYPE_PORT 0x00002000 /**< Log related to port. */
-#define RTE_LOGTYPE_TABLE 0x00004000 /**< Log related to table. */
-#define RTE_LOGTYPE_PIPELINE 0x00008000 /**< Log related to pipeline. */
-#define RTE_LOGTYPE_MBUF 0x00010000 /**< Log related to mbuf. */
-#define RTE_LOGTYPE_CRYPTODEV 0x00020000 /**< Log related to cryptodev. */
+#define RTE_LOGTYPE_EAL 0 /**< Log related to eal. */
+#define RTE_LOGTYPE_MALLOC 1 /**< Log related to malloc. */
+#define RTE_LOGTYPE_RING 2 /**< Log related to ring. */
+#define RTE_LOGTYPE_MEMPOOL 3 /**< Log related to mempool. */
+#define RTE_LOGTYPE_TIMER 4 /**< Log related to timers. */
+#define RTE_LOGTYPE_PMD 5 /**< Log related to poll mode driver. */
+#define RTE_LOGTYPE_HASH 6 /**< Log related to hash table. */
+#define RTE_LOGTYPE_LPM 7 /**< Log related to LPM. */
+#define RTE_LOGTYPE_KNI 8 /**< Log related to KNI. */
+#define RTE_LOGTYPE_ACL 9 /**< Log related to ACL. */
+#define RTE_LOGTYPE_POWER 10 /**< Log related to power. */
+#define RTE_LOGTYPE_METER 11 /**< Log related to QoS meter. */
+#define RTE_LOGTYPE_SCHED 12 /**< Log related to QoS port scheduler. */
+#define RTE_LOGTYPE_PORT 13 /**< Log related to port. */
+#define RTE_LOGTYPE_TABLE 14 /**< Log related to table. */
+#define RTE_LOGTYPE_PIPELINE 15 /**< Log related to pipeline. */
+#define RTE_LOGTYPE_MBUF 16 /**< Log related to mbuf. */
+#define RTE_LOGTYPE_CRYPTODEV 17 /**< Log related to cryptodev. */
+#define RTE_LOGTYPE_EFD 18 /**< Log related to EFD. */
+#define RTE_LOGTYPE_EVENTDEV 19 /**< Log related to eventdev. */
/* these log types can be used in an application */
-#define RTE_LOGTYPE_USER1 0x01000000 /**< User-defined log type 1. */
-#define RTE_LOGTYPE_USER2 0x02000000 /**< User-defined log type 2. */
-#define RTE_LOGTYPE_USER3 0x04000000 /**< User-defined log type 3. */
-#define RTE_LOGTYPE_USER4 0x08000000 /**< User-defined log type 4. */
-#define RTE_LOGTYPE_USER5 0x10000000 /**< User-defined log type 5. */
-#define RTE_LOGTYPE_USER6 0x20000000 /**< User-defined log type 6. */
-#define RTE_LOGTYPE_USER7 0x40000000 /**< User-defined log type 7. */
-#define RTE_LOGTYPE_USER8 0x80000000 /**< User-defined log type 8. */
+#define RTE_LOGTYPE_USER1 24 /**< User-defined log type 1. */
+#define RTE_LOGTYPE_USER2 25 /**< User-defined log type 2. */
+#define RTE_LOGTYPE_USER3 26 /**< User-defined log type 3. */
+#define RTE_LOGTYPE_USER4 27 /**< User-defined log type 4. */
+#define RTE_LOGTYPE_USER5 28 /**< User-defined log type 5. */
+#define RTE_LOGTYPE_USER6 29 /**< User-defined log type 6. */
+#define RTE_LOGTYPE_USER7 30 /**< User-defined log type 7. */
+#define RTE_LOGTYPE_USER8 31 /**< User-defined log type 8. */
+
+/** First identifier for extended logs */
+#define RTE_LOGTYPE_FIRST_EXT_ID 32
/* Can't use 0, as it gives compiler warnings */
#define RTE_LOG_EMERG 1U /**< System is unusable. */
@@ -118,18 +129,32 @@ int rte_openlog_stream(FILE *f);
/**
* Set the global log level.
*
- * After this call, all logs that are lower or equal than level and
- * lower or equal than the RTE_LOG_LEVEL configuration option will be
- * displayed.
+ * After this call, logs with a level lower or equal than the level
+ * passed as argument will be displayed.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
*/
+void rte_log_set_global_level(uint32_t level);
+
+/**
+ * Deprecated, replaced by rte_log_set_global_level().
+ */
+__rte_deprecated
void rte_set_log_level(uint32_t level);
/**
* Get the global log level.
+ *
+ * @return
+ * The current global log level.
+ */
+uint32_t rte_log_get_global_level(void);
+
+/**
+ * Deprecated, replaced by rte_log_get_global_level().
*/
+__rte_deprecated
uint32_t rte_get_log_level(void);
/**
@@ -140,14 +165,40 @@ uint32_t rte_get_log_level(void);
* @param enable
* True for enable; false for disable.
*/
+__rte_deprecated
void rte_set_log_type(uint32_t type, int enable);
/**
* Get the global log type.
*/
+__rte_deprecated
uint32_t rte_get_log_type(void);
/**
+ * Set the log level for a given type.
+ *
+ * @param pattern
+ * The regexp identifying the log type.
+ * @param level
+ * The level to be set.
+ * @return
+ * 0 on success, a negative value if level is invalid.
+ */
+int rte_log_set_level_regexp(const char *pattern, uint32_t level);
+
+/**
+ * Set the log level for a given type.
+ *
+ * @param logtype
+ * The log type identifier.
+ * @param level
+ * The level to be set.
+ * @return
+ * 0 on success, a negative value if logtype or level is invalid.
+ */
+int rte_log_set_level(uint32_t logtype, uint32_t level);
+
+/**
* Get the current loglevel for the message being processed.
*
* Before calling the user-defined stream for logging, the log
@@ -176,6 +227,30 @@ int rte_log_cur_msg_loglevel(void);
int rte_log_cur_msg_logtype(void);
/**
+ * Register a dynamic log type
+ *
+ * If a log is already registered with the same type, the returned value
+ * is the same than the previous one.
+ *
+ * @param name
+ * The string identifying the log type.
+ * @return
+ * - >0: success, the returned value is the log type identifier.
+ * - (-ENONEM): cannot allocate memory.
+ */
+int rte_log_register(const char *name);
+
+/**
+ * Dump log information.
+ *
+ * Dump the global level and the registered log types.
+ *
+ * @param f
+ * The output stream where the dump should be sent.
+ */
+void rte_log_dump(FILE *f);
+
+/**
* Generates a log message.
*
* The message will be sent in the stream defined by the previous call
@@ -184,9 +259,8 @@ int rte_log_cur_msg_logtype(void);
* The level argument determines if the log should be displayed or
* not, depending on the global rte_logs variable.
*
- * The preferred alternative is the RTE_LOG() function because debug logs may
- * be removed at compilation time if optimization is enabled. Moreover,
- * logs are automatically prefixed by type when using the macro.
+ * The preferred alternative is the RTE_LOG() because it adds the
+ * level and type in the logged string.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
@@ -217,8 +291,8 @@ int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
* not, depending on the global rte_logs variable. A trailing
* newline may be added if needed.
*
- * The preferred alternative is the RTE_LOG() because debug logs may be
- * removed at compilation time.
+ * The preferred alternative is the RTE_LOG() because it adds the
+ * level and type in the logged string.
*
* @param level
* Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
@@ -239,15 +313,8 @@ int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
/**
* Generates a log message.
*
- * The RTE_LOG() is equivalent to rte_log() with two differences:
-
- * - RTE_LOG() can be used to remove debug logs at compilation time,
- * depending on RTE_LOG_LEVEL configuration option, and compilation
- * optimization level. If optimization is enabled, the tests
- * involving constants only are pre-computed. If compilation is done
- * with -O0, these tests will be done at run time.
- * - The log level and log type names are smaller, for example:
- * RTE_LOG(INFO, EAL, "this is a %s", "log");
+ * The RTE_LOG() is a helper that prefixes the string with the log level
+ * and type, and call rte_log().
*
* @param l
* Log level. A value between EMERG (1) and DEBUG (8). The short name is
@@ -263,7 +330,31 @@ int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap)
* - Negative on error.
*/
#define RTE_LOG(l, t, ...) \
- (void)((RTE_LOG_ ## l <= RTE_LOG_LEVEL) ? \
+ rte_log(RTE_LOG_ ## l, \
+ RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__)
+
+/**
+ * Generates a log message for data path.
+ *
+ * Similar to RTE_LOG(), except that it is removed at compilation time
+ * if the RTE_LOG_DP_LEVEL configuration option is lower than the log
+ * level argument.
+ *
+ * @param l
+ * Log level. A value between EMERG (1) and DEBUG (8). The short name is
+ * expanded by the macro, so it cannot be an integer value.
+ * @param t
+ * The log type, for example, EAL. The short name is expanded by the
+ * macro, so it cannot be an integer value.
+ * @param ...
+ * The fmt string, as in printf(3), followed by the variable arguments
+ * required by the format.
+ * @return
+ * - 0: Success.
+ * - Negative on error.
+ */
+#define RTE_LOG_DP(l, t, ...) \
+ (void)((RTE_LOG_ ## l <= RTE_LOG_DP_LEVEL) ? \
rte_log(RTE_LOG_ ## l, \
RTE_LOGTYPE_ ## t, # t ": " __VA_ARGS__) : \
0)
diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h
index 9ce88472..ab64c63c 100644
--- a/lib/librte_eal/common/include/rte_pci.h
+++ b/lib/librte_eal/common/include/rte_pci.h
@@ -85,12 +85,7 @@ extern "C" {
#include <rte_debug.h>
#include <rte_interrupts.h>
#include <rte_dev.h>
-
-TAILQ_HEAD(pci_device_list, rte_pci_device); /**< PCI devices in D-linked Q. */
-TAILQ_HEAD(pci_driver_list, rte_pci_driver); /**< PCI drivers in D-linked Q. */
-
-extern struct pci_driver_list pci_driver_list; /**< Global list of PCI drivers. */
-extern struct pci_device_list pci_device_list; /**< Global list of PCI devices. */
+#include <rte_bus.h>
/** Pathname of PCI devices directory. */
const char *pci_get_sysfs_path(void);
@@ -111,6 +106,25 @@ const char *pci_get_sysfs_path(void);
/** Maximum number of PCI resources. */
#define PCI_MAX_RESOURCE 6
+/** Name of PCI Bus */
+#define PCI_BUS_NAME "PCI"
+
+/* Forward declarations */
+struct rte_pci_device;
+struct rte_pci_driver;
+
+/** List of PCI devices */
+TAILQ_HEAD(rte_pci_device_list, rte_pci_device);
+/** List of PCI drivers */
+TAILQ_HEAD(rte_pci_driver_list, rte_pci_driver);
+
+/* PCI Bus iterators */
+#define FOREACH_DEVICE_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_PCIBUS(p) \
+ TAILQ_FOREACH(p, &(rte_pci_bus.driver_list), next)
+
/**
* A structure describing an ID for a PCI driver. Each driver provides a
* table of these IDs for each device that it supports.
@@ -158,8 +172,15 @@ struct rte_pci_device {
struct rte_pci_driver *driver; /**< Associated driver */
uint16_t max_vfs; /**< sriov enable if not zero */
enum rte_kernel_driver kdrv; /**< Kernel driver passthrough */
+ char name[PCI_PRI_STR_SIZE+1]; /**< PCI location (ASCII) */
};
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_pci_device.
+ */
+#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+
/** Any PCI device identifier (vendor, device, ...) */
#define PCI_ANY_ID (0xffff)
#define RTE_CLASS_ANY_ID (0xffffff)
@@ -182,8 +203,6 @@ struct rte_pci_device {
.subsystem_device_id = PCI_ANY_ID
#endif
-struct rte_pci_driver;
-
/**
* Initialisation function for the driver called during PCI probing.
*/
@@ -200,20 +219,28 @@ typedef int (pci_remove_t)(struct rte_pci_device *);
struct rte_pci_driver {
TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */
struct rte_driver driver; /**< Inherit core driver. */
+ struct rte_pci_bus *bus; /**< PCI bus reference. */
pci_probe_t *probe; /**< Device Probe function. */
pci_remove_t *remove; /**< Device Remove function. */
const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
uint32_t drv_flags; /**< Flags contolling handling of device. */
};
+/**
+ * Structure describing the PCI bus
+ */
+struct rte_pci_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_pci_device_list device_list; /**< List of PCI devices */
+ struct rte_pci_driver_list driver_list; /**< List of PCI drivers */
+};
+
/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
-/** Device needs to be unbound even if no module is provided */
-#define RTE_PCI_DRV_FORCE_UNBIND 0x0004
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
-/** Device driver supports detaching capability */
-#define RTE_PCI_DRV_DETACHABLE 0x0010
+/** Device driver supports device removal interrupt */
+#define RTE_PCI_DRV_INTR_RMV 0x0010
/**
* A structure describing a PCI mapping.
@@ -315,8 +342,8 @@ eal_parse_pci_DomBDF(const char *input, struct rte_pci_addr *dev_addr)
* The output buffer size
*/
static inline void
-rte_eal_pci_device_name(const struct rte_pci_addr *addr,
- char *output, size_t size)
+rte_pci_device_name(const struct rte_pci_addr *addr,
+ char *output, size_t size)
{
RTE_VERIFY(size >= PCI_PRI_STR_SIZE);
RTE_VERIFY(snprintf(output, size, PCI_PRI_FMT,
@@ -366,20 +393,17 @@ rte_eal_compare_pci_addr(const struct rte_pci_addr *addr,
* @return
* 0 on success, negative on error
*/
-int rte_eal_pci_scan(void);
+int rte_pci_scan(void);
/**
- * Probe the PCI bus for registered drivers.
- *
- * Scan the content of the PCI bus, and call the probe() function for
- * all registered drivers that have a matching entry in its id_table
- * for discovered devices.
+ * Probe the PCI bus
*
* @return
* - 0 on success.
- * - Negative on error.
+ * - !0 on error.
*/
-int rte_eal_pci_probe(void);
+int
+rte_pci_probe(void);
/**
* Map the PCI device resources in user space virtual memory address
@@ -396,7 +420,7 @@ int rte_eal_pci_probe(void);
* 0 on success, negative on error and positive if no driver
* is found for the device.
*/
-int rte_eal_pci_map_device(struct rte_pci_device *dev);
+int rte_pci_map_device(struct rte_pci_device *dev);
/**
* Unmap this device
@@ -405,7 +429,7 @@ int rte_eal_pci_map_device(struct rte_pci_device *dev);
* A pointer to a rte_pci_device structure describing the device
* to use
*/
-void rte_eal_pci_unmap_device(struct rte_pci_device *dev);
+void rte_pci_unmap_device(struct rte_pci_device *dev);
/**
* @internal
@@ -452,7 +476,7 @@ void pci_unmap_resource(void *requested_addr, size_t size);
* - 0 on success.
* - Negative on error.
*/
-int rte_eal_pci_probe_one(const struct rte_pci_addr *addr);
+int rte_pci_probe_one(const struct rte_pci_addr *addr);
/**
* Close the single PCI device.
@@ -467,7 +491,7 @@ int rte_eal_pci_probe_one(const struct rte_pci_addr *addr);
* - 0 on success.
* - Negative on error.
*/
-int rte_eal_pci_detach(const struct rte_pci_addr *addr);
+int rte_pci_detach(const struct rte_pci_addr *addr);
/**
* Dump the content of the PCI bus.
@@ -475,7 +499,7 @@ int rte_eal_pci_detach(const struct rte_pci_addr *addr);
* @param f
* A pointer to a file for output
*/
-void rte_eal_pci_dump(FILE *f);
+void rte_pci_dump(FILE *f);
/**
* Register a PCI driver.
@@ -484,7 +508,7 @@ void rte_eal_pci_dump(FILE *f);
* A pointer to a rte_pci_driver structure describing the driver
* to be registered.
*/
-void rte_eal_pci_register(struct rte_pci_driver *driver);
+void rte_pci_register(struct rte_pci_driver *driver);
/** Helper for PCI device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
@@ -492,7 +516,7 @@ RTE_INIT(pciinitfn_ ##nm); \
static void pciinitfn_ ##nm(void) \
{\
(pci_drv).driver.name = RTE_STR(nm);\
- rte_eal_pci_register(&pci_drv); \
+ rte_pci_register(&pci_drv); \
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
@@ -503,7 +527,7 @@ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
* A pointer to a rte_pci_driver structure describing the driver
* to be unregistered.
*/
-void rte_eal_pci_unregister(struct rte_pci_driver *driver);
+void rte_pci_unregister(struct rte_pci_driver *driver);
/**
* Read PCI config space.
@@ -518,8 +542,8 @@ void rte_eal_pci_unregister(struct rte_pci_driver *driver);
* @param offset
* The offset into PCI config space
*/
-int rte_eal_pci_read_config(const struct rte_pci_device *device,
- void *buf, size_t len, off_t offset);
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset);
/**
* Write PCI config space.
@@ -534,8 +558,8 @@ int rte_eal_pci_read_config(const struct rte_pci_device *device,
* @param offset
* The offset into PCI config space
*/
-int rte_eal_pci_write_config(const struct rte_pci_device *device,
- const void *buf, size_t len, off_t offset);
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset);
/**
* A structure used to access io resources for a pci device.
@@ -563,8 +587,8 @@ struct rte_pci_ioport {
* @return
* 0 on success, negative on error.
*/
-int rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
- struct rte_pci_ioport *p);
+int rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p);
/**
* Release any resources used in a rte_pci_ioport object.
@@ -574,7 +598,7 @@ int rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
* @return
* 0 on success, negative on error.
*/
-int rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p);
+int rte_pci_ioport_unmap(struct rte_pci_ioport *p);
/**
* Read from a io pci resource.
@@ -588,8 +612,8 @@ int rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p);
* @param offset
* The offset into the pci io resource.
*/
-void rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
- void *data, size_t len, off_t offset);
+void rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset);
/**
* Write to a io pci resource.
@@ -603,8 +627,8 @@ void rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
* @param offset
* The offset into the pci io resource.
*/
-void rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
- const void *data, size_t len, off_t offset);
+void rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_vdev.h b/lib/librte_eal/common/include/rte_vdev.h
index 784e837d..e6b678ea 100644
--- a/lib/librte_eal/common/include/rte_vdev.h
+++ b/lib/librte_eal/common/include/rte_vdev.h
@@ -39,6 +39,28 @@ extern "C" {
#include <sys/queue.h>
#include <rte_dev.h>
+#include <rte_devargs.h>
+
+struct rte_vdev_device {
+ TAILQ_ENTRY(rte_vdev_device) next; /**< Next attached vdev */
+ struct rte_device device; /**< Inherit core device */
+};
+
+static inline const char *
+rte_vdev_device_name(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.devargs)
+ return dev->device.devargs->virt.drv_name;
+ return NULL;
+}
+
+static inline const char *
+rte_vdev_device_args(const struct rte_vdev_device *dev)
+{
+ if (dev && dev->device.devargs)
+ return dev->device.devargs->args;
+ return "";
+}
/** Double linked list of virtual device drivers. */
TAILQ_HEAD(vdev_driver_list, rte_vdev_driver);
@@ -46,12 +68,12 @@ TAILQ_HEAD(vdev_driver_list, rte_vdev_driver);
/**
* Probe function called for each virtual device driver once.
*/
-typedef int (rte_vdev_probe_t)(const char *name, const char *args);
+typedef int (rte_vdev_probe_t)(struct rte_vdev_device *dev);
/**
* Remove function called for each virtual device driver once.
*/
-typedef int (rte_vdev_remove_t)(const char *name);
+typedef int (rte_vdev_remove_t)(struct rte_vdev_device *dev);
/**
* A virtual device driver abstraction.
@@ -70,7 +92,7 @@ struct rte_vdev_driver {
* A pointer to a rte_vdev_driver structure describing the driver
* to be registered.
*/
-void rte_eal_vdrv_register(struct rte_vdev_driver *driver);
+void rte_vdev_register(struct rte_vdev_driver *driver);
/**
* Unregister a virtual device driver.
@@ -79,7 +101,7 @@ void rte_eal_vdrv_register(struct rte_vdev_driver *driver);
* A pointer to a rte_vdev_driver structure describing the driver
* to be unregistered.
*/
-void rte_eal_vdrv_unregister(struct rte_vdev_driver *driver);
+void rte_vdev_unregister(struct rte_vdev_driver *driver);
#define RTE_PMD_REGISTER_VDEV(nm, vdrv)\
RTE_INIT(vdrvinitfn_ ##vdrv);\
@@ -88,7 +110,7 @@ static void vdrvinitfn_ ##vdrv(void)\
{\
(vdrv).driver.name = RTE_STR(nm);\
(vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\
- rte_eal_vdrv_register(&vdrv);\
+ rte_vdev_register(&vdrv);\
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 0de35fb7..07a085eb 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -56,17 +56,17 @@ extern "C" {
/**
* Major version/year number i.e. the yy in yy.mm.z
*/
-#define RTE_VER_YEAR 16
+#define RTE_VER_YEAR 17
/**
* Minor version/month number i.e. the mm in yy.mm.z
*/
-#define RTE_VER_MONTH 11
+#define RTE_VER_MONTH 5
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 1
+#define RTE_VER_MINOR 0
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/linuxapp/Makefile b/lib/librte_eal/linuxapp/Makefile
index 20d2a916..4794696b 100644
--- a/lib/librte_eal/linuxapp/Makefile
+++ b/lib/librte_eal/linuxapp/Makefile
@@ -34,6 +34,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal
DIRS-$(CONFIG_RTE_EAL_IGB_UIO) += igb_uio
DIRS-$(CONFIG_RTE_KNI_KMOD) += kni
+DEPDIRS-kni := eal
DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += xen_dom0
+DEPDIRS-xen_dom0 := eal
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 4e206f09..640afd08 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -37,7 +37,7 @@ ARCH_DIR ?= $(RTE_ARCH)
EXPORT_MAP := rte_eal_version.map
VPATH += $(RTE_SDK)/lib/librte_eal/common/arch/$(ARCH_DIR)
-LIBABIVER := 3
+LIBABIVER := 4
VPATH += $(RTE_SDK)/lib/librte_eal/common
@@ -87,6 +87,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_cpuflags.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_string_fns.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_hexdump.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_bus.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_dev.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_options.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_thread.c
@@ -130,7 +131,4 @@ INC := rte_interrupts.h rte_kni_common.h rte_dom0_common.h
SYMLINK-$(CONFIG_RTE_EXEC_ENV_LINUXAPP)-include/exec-env := \
$(addprefix include/exec-env/,$(INC))
-DEPDIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += lib/librte_eal/common
-DEPDIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += lib/librte_eal/common/arch/$(ARCH_DIR)
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 2075282e..7c78f2dc 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -61,6 +61,7 @@
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_log.h>
@@ -69,6 +70,7 @@
#include <rte_string_fns.h>
#include <rte_cpuflags.h>
#include <rte_interrupts.h>
+#include <rte_bus.h>
#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
@@ -210,7 +212,7 @@ rte_eal_config_create(void)
rte_panic("Cannot mmap memory for rte_config\n");
}
memcpy(rte_mem_cfg_addr, &early_mem_config, sizeof(early_mem_config));
- rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+ rte_config.mem_config = rte_mem_cfg_addr;
/* store address of the config in the config itself so that secondary
* processes could later map the config into this exact location */
@@ -490,8 +492,6 @@ eal_log_level_parse(int argc, char **argv)
argvopt = argv;
optind = 1;
- eal_reset_internal_config(&internal_config);
-
while ((opt = getopt_long(argc, argvopt, eal_short_options,
eal_long_options, &option_index)) != EOF) {
@@ -739,6 +739,12 @@ static int rte_eal_vfio_setup(void)
}
#endif
+static void rte_eal_init_alert(const char *msg)
+{
+ fprintf(stderr, "EAL: FATAL: %s\n", msg);
+ RTE_LOG(ERR, EAL, "%s\n", msg);
+}
+
/* Launch threads, called at application init(). */
int
rte_eal_init(int argc, char **argv)
@@ -751,33 +757,51 @@ rte_eal_init(int argc, char **argv)
char thread_name[RTE_MAX_THREAD_NAME_LEN];
/* checks if the machine is adequate */
- rte_cpu_check_supported();
+ if (!rte_cpu_is_supported()) {
+ rte_eal_init_alert("unsupported cpu type.");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
- if (!rte_atomic32_test_and_set(&run_once))
+ if (!rte_atomic32_test_and_set(&run_once)) {
+ rte_eal_init_alert("already called initialization.");
+ rte_errno = EALREADY;
return -1;
+ }
logid = strrchr(argv[0], '/');
logid = strdup(logid ? logid + 1: argv[0]);
thread_id = pthread_self();
- eal_log_level_parse(argc, argv);
+ eal_reset_internal_config(&internal_config);
/* set log level as early as possible */
- rte_set_log_level(internal_config.log_level);
+ eal_log_level_parse(argc, argv);
- if (rte_eal_cpu_init() < 0)
- rte_panic("Cannot detect lcores\n");
+ if (rte_eal_cpu_init() < 0) {
+ rte_eal_init_alert("Cannot detect lcores.");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
fctret = eal_parse_args(argc, argv);
- if (fctret < 0)
- exit(1);
+ if (fctret < 0) {
+ rte_eal_init_alert("Invalid 'command line' arguments.");
+ rte_errno = EINVAL;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
if (internal_config.no_hugetlbfs == 0 &&
internal_config.process_type != RTE_PROC_SECONDARY &&
internal_config.xen_dom0_support == 0 &&
- eal_hugepage_info_init() < 0)
- rte_panic("Cannot get hugepage information\n");
+ eal_hugepage_info_init() < 0) {
+ rte_eal_init_alert("Cannot get hugepage information.");
+ rte_errno = EACCES;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
if (internal_config.no_hugetlbfs)
@@ -799,39 +823,59 @@ rte_eal_init(int argc, char **argv)
rte_config_init();
- if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0)
- rte_panic("Cannot init logs\n");
-
- if (rte_eal_pci_init() < 0)
- rte_panic("Cannot init PCI\n");
+ if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) {
+ rte_eal_init_alert("Cannot init logging.");
+ rte_errno = ENOMEM;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
#ifdef VFIO_PRESENT
- if (rte_eal_vfio_setup() < 0)
- rte_panic("Cannot init VFIO\n");
+ if (rte_eal_vfio_setup() < 0) {
+ rte_eal_init_alert("Cannot init VFIO\n");
+ rte_errno = EAGAIN;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
#endif
- if (rte_eal_memory_init() < 0)
- rte_panic("Cannot init memory\n");
+ if (rte_eal_memory_init() < 0) {
+ rte_eal_init_alert("Cannot init memory\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
/* the directories are locked during eal_hugepage_info_init */
eal_hugedirs_unlock();
- if (rte_eal_memzone_init() < 0)
- rte_panic("Cannot init memzone\n");
+ if (rte_eal_memzone_init() < 0) {
+ rte_eal_init_alert("Cannot init memzone\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
- if (rte_eal_tailqs_init() < 0)
- rte_panic("Cannot init tail queues for objects\n");
+ if (rte_eal_tailqs_init() < 0) {
+ rte_eal_init_alert("Cannot init tail queues for objects\n");
+ rte_errno = EFAULT;
+ return -1;
+ }
- if (rte_eal_alarm_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
+ if (rte_eal_alarm_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ /* rte_eal_alarm_init sets rte_errno on failure. */
+ return -1;
+ }
- if (rte_eal_timer_init() < 0)
- rte_panic("Cannot init HPET or TSC timers\n");
+ if (rte_eal_timer_init() < 0) {
+ rte_eal_init_alert("Cannot init HPET or TSC timers\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
eal_check_mem_on_local_socket();
if (eal_plugins_init() < 0)
- rte_panic("Cannot init plugins\n");
+ rte_eal_init_alert("Cannot init plugins\n");
eal_thread_init_master(rte_config.master_lcore);
@@ -841,11 +885,16 @@ rte_eal_init(int argc, char **argv)
rte_config.master_lcore, (int)thread_id, cpuset,
ret == 0 ? "" : "...");
- if (rte_eal_dev_init() < 0)
- rte_panic("Cannot init pmd devices\n");
+ if (rte_eal_intr_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ return -1;
+ }
- if (rte_eal_intr_init() < 0)
- rte_panic("Cannot init interrupt-handling thread\n");
+ if (rte_bus_scan()) {
+ rte_eal_init_alert("Cannot scan the buses for devices\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
RTE_LCORE_FOREACH_SLAVE(i) {
@@ -883,9 +932,12 @@ rte_eal_init(int argc, char **argv)
rte_eal_mp_remote_launch(sync_func, NULL, SKIP_MASTER);
rte_eal_mp_wait_lcore();
- /* Probe & Initialize PCI devices */
- if (rte_eal_pci_probe())
- rte_panic("Cannot probe PCI\n");
+ /* Probe all the buses and devices/drivers on them */
+ if (rte_bus_probe()) {
+ rte_eal_init_alert("Cannot probe devices\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
rte_eal_mcfg_complete();
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index 8b042abc..fbae4613 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -83,7 +83,7 @@ static rte_spinlock_t alarm_list_lk = RTE_SPINLOCK_INITIALIZER;
static struct rte_intr_handle intr_handle = {.fd = -1 };
static int handler_registered = 0;
-static void eal_alarm_callback(struct rte_intr_handle *hdl, void *arg);
+static void eal_alarm_callback(void *arg);
int
rte_eal_alarm_init(void)
@@ -102,8 +102,7 @@ error:
}
static void
-eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
- void *arg __rte_unused)
+eal_alarm_callback(void *arg __rte_unused)
{
struct timespec now;
struct alarm_entry *ap;
diff --git a/lib/librte_eal/linuxapp/eal/eal_debug.c b/lib/librte_eal/linuxapp/eal/eal_debug.c
index 5fbc17c5..e1c75548 100644
--- a/lib/librte_eal/linuxapp/eal/eal_debug.c
+++ b/lib/librte_eal/linuxapp/eal/eal_debug.c
@@ -31,7 +31,9 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#ifdef RTE_BACKTRACE
#include <execinfo.h>
+#endif
#include <stdarg.h>
#include <signal.h>
#include <stdlib.h>
@@ -47,6 +49,7 @@
/* dump the stack of the calling core */
void rte_dump_stack(void)
{
+#ifdef RTE_BACKTRACE
void *func[BACKTRACE_SIZE];
char **symb = NULL;
int size;
@@ -64,6 +67,7 @@ void rte_dump_stack(void)
}
free(symb);
+#endif /* RTE_BACKTRACE */
}
/* not implemented in this environment */
diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 18858e2d..7a21e8f6 100644
--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -283,9 +283,12 @@ eal_hugepage_info_init(void)
struct dirent *dirent;
dir = opendir(sys_dir_path);
- if (dir == NULL)
- rte_panic("Cannot open directory %s to read system hugepage "
- "info\n", sys_dir_path);
+ if (dir == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Cannot open directory %s to read system hugepage info\n",
+ sys_dir_path);
+ return -1;
+ }
for (dirent = readdir(dir); dirent != NULL; dirent = readdir(dir)) {
struct hugepage_info *hpi;
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index 47a3b20a..2e3bd12a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -46,6 +46,7 @@
#include <sys/ioctl.h>
#include <sys/eventfd.h>
#include <assert.h>
+#include <stdbool.h>
#include <rte_common.h>
#include <rte_interrupts.h>
@@ -136,7 +137,7 @@ static pthread_t intr_thread;
/* enable legacy (INTx) interrupts */
static int
-vfio_enable_intx(struct rte_intr_handle *intr_handle) {
+vfio_enable_intx(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
int len, ret;
@@ -183,7 +184,7 @@ vfio_enable_intx(struct rte_intr_handle *intr_handle) {
/* disable legacy (INTx) interrupts */
static int
-vfio_disable_intx(struct rte_intr_handle *intr_handle) {
+vfio_disable_intx(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
int len, ret;
@@ -194,14 +195,14 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
irq_set->count = 1;
- irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK;
irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
irq_set->start = 0;
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
- RTE_LOG(ERR, EAL, "Error unmasking INTx interrupts for fd %d\n",
+ RTE_LOG(ERR, EAL, "Error masking INTx interrupts for fd %d\n",
intr_handle->fd);
return -1;
}
@@ -226,7 +227,7 @@ vfio_disable_intx(struct rte_intr_handle *intr_handle) {
/* enable MSI interrupts */
static int
-vfio_enable_msi(struct rte_intr_handle *intr_handle) {
+vfio_enable_msi(const struct rte_intr_handle *intr_handle) {
int len, ret;
char irq_set_buf[IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
@@ -255,7 +256,7 @@ vfio_enable_msi(struct rte_intr_handle *intr_handle) {
/* disable MSI interrupts */
static int
-vfio_disable_msi(struct rte_intr_handle *intr_handle) {
+vfio_disable_msi(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[IRQ_SET_BUF_LEN];
int len, ret;
@@ -280,7 +281,7 @@ vfio_disable_msi(struct rte_intr_handle *intr_handle) {
/* enable MSI-X interrupts */
static int
-vfio_enable_msix(struct rte_intr_handle *intr_handle) {
+vfio_enable_msix(const struct rte_intr_handle *intr_handle) {
int len, ret;
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
@@ -290,12 +291,10 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
irq_set = (struct vfio_irq_set *) irq_set_buf;
irq_set->argsz = len;
- if (!intr_handle->max_intr)
- intr_handle->max_intr = 1;
- else if (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID)
- intr_handle->max_intr = RTE_MAX_RXTX_INTR_VEC_ID + 1;
-
- irq_set->count = intr_handle->max_intr;
+ /* 0 < irq_set->count < RTE_MAX_RXTX_INTR_VEC_ID + 1 */
+ irq_set->count = intr_handle->max_intr ?
+ (intr_handle->max_intr > RTE_MAX_RXTX_INTR_VEC_ID + 1 ?
+ RTE_MAX_RXTX_INTR_VEC_ID + 1 : intr_handle->max_intr) : 1;
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
@@ -318,7 +317,7 @@ vfio_enable_msix(struct rte_intr_handle *intr_handle) {
/* disable MSI-X interrupts */
static int
-vfio_disable_msix(struct rte_intr_handle *intr_handle) {
+vfio_disable_msix(const struct rte_intr_handle *intr_handle) {
struct vfio_irq_set *irq_set;
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
int len, ret;
@@ -343,7 +342,7 @@ vfio_disable_msix(struct rte_intr_handle *intr_handle) {
#endif
static int
-uio_intx_intr_disable(struct rte_intr_handle *intr_handle)
+uio_intx_intr_disable(const struct rte_intr_handle *intr_handle)
{
unsigned char command_high;
@@ -367,7 +366,7 @@ uio_intx_intr_disable(struct rte_intr_handle *intr_handle)
}
static int
-uio_intx_intr_enable(struct rte_intr_handle *intr_handle)
+uio_intx_intr_enable(const struct rte_intr_handle *intr_handle)
{
unsigned char command_high;
@@ -391,7 +390,7 @@ uio_intx_intr_enable(struct rte_intr_handle *intr_handle)
}
static int
-uio_intr_disable(struct rte_intr_handle *intr_handle)
+uio_intr_disable(const struct rte_intr_handle *intr_handle)
{
const int value = 0;
@@ -405,7 +404,7 @@ uio_intr_disable(struct rte_intr_handle *intr_handle)
}
static int
-uio_intr_enable(struct rte_intr_handle *intr_handle)
+uio_intr_enable(const struct rte_intr_handle *intr_handle)
{
const int value = 1;
@@ -419,7 +418,7 @@ uio_intr_enable(struct rte_intr_handle *intr_handle)
}
int
-rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg)
{
int ret, wake_thread;
@@ -491,7 +490,7 @@ rte_intr_callback_register(struct rte_intr_handle *intr_handle,
}
int
-rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb_fn, void *cb_arg)
{
int ret;
@@ -555,8 +554,11 @@ rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
}
int
-rte_intr_enable(struct rte_intr_handle *intr_handle)
+rte_intr_enable(const struct rte_intr_handle *intr_handle)
{
+ if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 0;
+
if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
return -1;
@@ -599,8 +601,11 @@ rte_intr_enable(struct rte_intr_handle *intr_handle)
}
int
-rte_intr_disable(struct rte_intr_handle *intr_handle)
+rte_intr_disable(const struct rte_intr_handle *intr_handle)
{
+ if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 0;
+
if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
return -1;
@@ -645,6 +650,7 @@ rte_intr_disable(struct rte_intr_handle *intr_handle)
static int
eal_intr_process_interrupts(struct epoll_event *events, int nfds)
{
+ bool call = false;
int n, bytes_read;
struct rte_intr_source *src;
struct rte_intr_callback *cb;
@@ -693,13 +699,18 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
bytes_read = sizeof(buf.vfio_intr_count);
break;
#endif
+ case RTE_INTR_HANDLE_VDEV:
case RTE_INTR_HANDLE_EXT:
+ bytes_read = 0;
+ call = true;
+ break;
+
default:
bytes_read = 1;
break;
}
- if (src->intr_handle.type != RTE_INTR_HANDLE_EXT) {
+ if (bytes_read > 0) {
/**
* read out to clear the ready-to-be-read flag
* for epoll_wait.
@@ -716,12 +727,14 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
} else if (bytes_read == 0)
RTE_LOG(ERR, EAL, "Read nothing from file "
"descriptor %d\n", events[n].data.fd);
+ else
+ call = true;
}
/* grab a lock, again to call callbacks and update status. */
rte_spinlock_lock(&intr_lock);
- if (bytes_read > 0) {
+ if (call) {
/* Finally, call all callbacks. */
TAILQ_FOREACH(cb, &src->callbacks, next) {
@@ -731,8 +744,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
rte_spinlock_unlock(&intr_lock);
/* call the actual callback */
- active_cb.cb_fn(&src->intr_handle,
- active_cb.cb_arg);
+ active_cb.cb_fn(active_cb.cb_arg);
/*get the lock back. */
rte_spinlock_lock(&intr_lock);
@@ -832,7 +844,7 @@ eal_intr_thread_main(__rte_unused void *arg)
TAILQ_FOREACH(src, &intr_sources, next) {
if (src->callbacks.tqh_first == NULL)
continue; /* skip those with no callbacks */
- ev.events = EPOLLIN | EPOLLPRI;
+ ev.events = EPOLLIN | EPOLLPRI | EPOLLRDHUP | EPOLLHUP;
ev.data.fd = src->intr_handle.fd;
/**
@@ -872,13 +884,16 @@ rte_eal_intr_init(void)
* create a pipe which will be waited by epoll and notified to
* rebuild the wait list of epoll.
*/
- if (pipe(intr_pipe.pipefd) < 0)
+ if (pipe(intr_pipe.pipefd) < 0) {
+ rte_errno = errno;
return -1;
+ }
/* create the host thread to wait/handle the interrupt */
ret = pthread_create(&intr_thread, NULL,
eal_intr_thread_main, NULL);
if (ret != 0) {
+ rte_errno = ret;
RTE_LOG(ERR, EAL,
"Failed to create thread for interrupt handling\n");
} else {
@@ -913,6 +928,14 @@ eal_intr_proc_rxtx_intr(int fd, const struct rte_intr_handle *intr_handle)
bytes_read = sizeof(buf.vfio_intr_count);
break;
#endif
+ case RTE_INTR_HANDLE_VDEV:
+ /* for vdev, fd points to:
+ * a. eventfd which does not need to read out;
+ * b. datapath fd which needs PMD to read out.
+ */
+ return;
+ case RTE_INTR_HANDLE_EXT:
+ return;
default:
bytes_read = 1;
RTE_LOG(INFO, EAL, "unexpected intr type\n");
@@ -1141,6 +1164,24 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle, int epfd,
return rc;
}
+void
+rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle)
+{
+ uint32_t i;
+ struct rte_epoll_event *rev;
+
+ for (i = 0; i < intr_handle->nb_efd; i++) {
+ rev = &intr_handle->elist[i];
+ if (rev->status == RTE_EPOLL_INVALID)
+ continue;
+ if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
+ /* force free if the entry valid */
+ eal_epoll_data_safe_free(rev);
+ rev->status = RTE_EPOLL_INVALID;
+ }
+ }
+}
+
int
rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
{
@@ -1157,12 +1198,14 @@ rte_intr_efd_enable(struct rte_intr_handle *intr_handle, uint32_t nb_efd)
RTE_LOG(ERR, EAL,
"can't setup eventfd, error %i (%s)\n",
errno, strerror(errno));
- return -1;
+ return -errno;
}
intr_handle->efds[i] = fd;
}
intr_handle->nb_efd = n;
intr_handle->max_intr = NB_OTHER_INTR + n;
+ } else if (intr_handle->type == RTE_INTR_HANDLE_VDEV) {
+ /* do nothing, and let vdev driver to initialize this struct */
} else {
intr_handle->efds[0] = intr_handle->fd;
intr_handle->nb_efd = RTE_MIN(nb_efd, 1U);
@@ -1176,19 +1219,8 @@ void
rte_intr_efd_disable(struct rte_intr_handle *intr_handle)
{
uint32_t i;
- struct rte_epoll_event *rev;
-
- for (i = 0; i < intr_handle->nb_efd; i++) {
- rev = &intr_handle->elist[i];
- if (rev->status == RTE_EPOLL_INVALID)
- continue;
- if (rte_epoll_ctl(rev->epfd, EPOLL_CTL_DEL, rev->fd, rev)) {
- /* force free if the entry valid */
- eal_epoll_data_safe_free(rev);
- rev->status = RTE_EPOLL_INVALID;
- }
- }
+ rte_intr_free_epoll_fd(intr_handle);
if (intr_handle->max_intr > intr_handle->nb_efd) {
for (i = 0; i < intr_handle->nb_efd; i++)
close(intr_handle->efds[i]);
@@ -1218,5 +1250,8 @@ rte_intr_cap_multiple(struct rte_intr_handle *intr_handle)
if (intr_handle->type == RTE_INTR_HANDLE_VFIO_MSIX)
return 1;
+ if (intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 1;
+
return 0;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index a956bb22..ebe06833 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -64,6 +64,7 @@
#define _FILE_OFFSET_BITS 64
#include <errno.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
@@ -122,26 +123,28 @@ int rte_xen_dom0_supported(void)
static uint64_t baseaddr_offset;
-static unsigned proc_pagemap_readable;
+static bool phys_addrs_available = true;
#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
static void
-test_proc_pagemap_readable(void)
+test_phys_addrs_available(void)
{
- int fd = open("/proc/self/pagemap", O_RDONLY);
+ uint64_t tmp;
+ phys_addr_t physaddr;
- if (fd < 0) {
+ /* For dom0, phys addresses can always be available */
+ if (rte_xen_dom0_supported())
+ return;
+
+ physaddr = rte_mem_virt2phy(&tmp);
+ if (physaddr == RTE_BAD_PHYS_ADDR) {
RTE_LOG(ERR, EAL,
- "Cannot open /proc/self/pagemap: %s. "
- "virt2phys address translation will not work\n",
+ "Cannot obtain physical addresses: %s. "
+ "Only vfio will function.\n",
strerror(errno));
- return;
+ phys_addrs_available = false;
}
-
- /* Is readable */
- close(fd);
- proc_pagemap_readable = 1;
}
/* Lock page in physical memory and prevent from swapping. */
@@ -190,7 +193,7 @@ rte_mem_virt2phy(const void *virtaddr)
}
/* Cannot parse /proc/self/pagemap, no need to log errors everywhere */
- if (!proc_pagemap_readable)
+ if (!phys_addrs_available)
return RTE_BAD_PHYS_ADDR;
/* standard page size */
@@ -229,6 +232,9 @@ rte_mem_virt2phy(const void *virtaddr)
* the pfn (page frame number) are bits 0-54 (see
* pagemap.txt in linux Documentation)
*/
+ if ((page & 0x7fffffffffffffULL) == 0)
+ return RTE_BAD_PHYS_ADDR;
+
physaddr = ((page & 0x7fffffffffffffULL) * page_size)
+ ((unsigned long)virtaddr % page_size);
@@ -242,7 +248,7 @@ rte_mem_virt2phy(const void *virtaddr)
static int
find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
{
- unsigned i;
+ unsigned int i;
phys_addr_t addr;
for (i = 0; i < hpi->num_pages[0]; i++) {
@@ -255,6 +261,22 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
}
/*
+ * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
+ */
+static int
+set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
+{
+ unsigned int i;
+ static phys_addr_t addr;
+
+ for (i = 0; i < hpi->num_pages[0]; i++) {
+ hugepg_tbl[i].physaddr = addr;
+ addr += hugepg_tbl[i].size;
+ }
+ return 0;
+}
+
+/*
* Check whether address-space layout randomization is enabled in
* the kernel. This is important for multi-process as it can prevent
* two processes mapping data to the same virtual address
@@ -313,7 +335,13 @@ get_virtual_area(size_t *size, size_t hugepage_sz)
}
do {
addr = mmap(addr,
- (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ (*size) + hugepage_sz, PROT_READ,
+#ifdef RTE_ARCH_PPC_64
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
+#else
+ MAP_PRIVATE,
+#endif
+ fd, 0);
if (addr == MAP_FAILED)
*size -= hugepage_sz;
} while (addr == MAP_FAILED && *size > 0);
@@ -592,12 +620,12 @@ static int
cmp_physaddr(const void *a, const void *b)
{
#ifndef RTE_ARCH_PPC_64
- const struct hugepage_file *p1 = (const struct hugepage_file *)a;
- const struct hugepage_file *p2 = (const struct hugepage_file *)b;
+ const struct hugepage_file *p1 = a;
+ const struct hugepage_file *p2 = b;
#else
/* PowerPC needs memory sorted in reverse order from x86 */
- const struct hugepage_file *p1 = (const struct hugepage_file *)b;
- const struct hugepage_file *p2 = (const struct hugepage_file *)a;
+ const struct hugepage_file *p1 = b;
+ const struct hugepage_file *p2 = a;
#endif
if (p1->physaddr < p2->physaddr)
return -1;
@@ -951,7 +979,7 @@ rte_eal_hugepage_init(void)
int nr_hugefiles, nr_hugepages = 0;
void *addr;
- test_proc_pagemap_readable();
+ test_phys_addrs_available();
memset(used_hp, 0, sizeof(used_hp));
@@ -1043,11 +1071,22 @@ rte_eal_hugepage_init(void)
continue;
}
- /* find physical addresses and sockets for each hugepage */
- if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0){
- RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
- (unsigned)(hpi->hugepage_sz / 0x100000));
- goto fail;
+ if (phys_addrs_available) {
+ /* find physical addresses for each hugepage */
+ if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
+ RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
+ "for %u MB pages\n",
+ (unsigned int)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
+ } else {
+ /* set physical addresses for each hugepage */
+ if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
+ RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
+ "for %u MB pages\n",
+ (unsigned int)(hpi->hugepage_sz / 0x100000));
+ goto fail;
+ }
}
if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
@@ -1289,7 +1328,7 @@ rte_eal_hugepage_attach(void)
"into secondary processes\n");
}
- test_proc_pagemap_readable();
+ test_phys_addrs_available();
if (internal_config.xen_dom0_support) {
#ifdef RTE_LIBRTE_XEN_DOM0
@@ -1330,7 +1369,13 @@ rte_eal_hugepage_attach(void)
* use mmap to get identical addresses as the primary process.
*/
base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
- PROT_READ, MAP_PRIVATE, fd_zero, 0);
+ PROT_READ,
+#ifdef RTE_ARCH_PPC_64
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
+#else
+ MAP_PRIVATE,
+#endif
+ fd_zero, 0);
if (base_addr == MAP_FAILED ||
base_addr != mcfg->memseg[s].addr) {
max_seg = s;
@@ -1426,3 +1471,9 @@ error:
close(fd_hugepage);
return -1;
}
+
+bool
+rte_eal_using_phys_addrs(void)
+{
+ return phys_addrs_available;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c
index 876ba381..595622b2 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -35,6 +35,7 @@
#include <dirent.h>
#include <rte_log.h>
+#include <rte_bus.h>
#include <rte_pci.h>
#include <rte_eal_memconfig.h>
#include <rte_malloc.h>
@@ -54,44 +55,7 @@
* IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
*/
-/* unbind kernel driver for this device */
-int
-pci_unbind_kernel_driver(struct rte_pci_device *dev)
-{
- int n;
- FILE *f;
- char filename[PATH_MAX];
- char buf[BUFSIZ];
- struct rte_pci_addr *loc = &dev->addr;
-
- /* open /sys/bus/pci/devices/AAAA:BB:CC.D/driver */
- snprintf(filename, sizeof(filename),
- "%s/" PCI_PRI_FMT "/driver/unbind", pci_get_sysfs_path(),
- loc->domain, loc->bus, loc->devid, loc->function);
-
- f = fopen(filename, "w");
- if (f == NULL) /* device was not bound */
- return 0;
-
- n = snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n",
- loc->domain, loc->bus, loc->devid, loc->function);
- if ((n < 0) || (n >= (int)sizeof(buf))) {
- RTE_LOG(ERR, EAL, "%s(): snprintf failed\n", __func__);
- goto error;
- }
- if (fwrite(buf, n, 1, f) == 0) {
- RTE_LOG(ERR, EAL, "%s(): could not write to %s\n", __func__,
- filename);
- goto error;
- }
-
- fclose(f);
- return 0;
-
-error:
- fclose(f);
- return -1;
-}
+extern struct rte_pci_bus rte_pci_bus;
static int
pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
@@ -124,7 +88,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
/* Map pci device */
int
-rte_eal_pci_map_device(struct rte_pci_device *dev)
+rte_pci_map_device(struct rte_pci_device *dev)
{
int ret = -1;
@@ -138,8 +102,10 @@ rte_eal_pci_map_device(struct rte_pci_device *dev)
break;
case RTE_KDRV_IGB_UIO:
case RTE_KDRV_UIO_GENERIC:
- /* map resources for devices that use uio */
- ret = pci_uio_map_resource(dev);
+ if (rte_eal_using_phys_addrs()) {
+ /* map resources for devices that use uio */
+ ret = pci_uio_map_resource(dev);
+ }
break;
default:
RTE_LOG(DEBUG, EAL,
@@ -153,12 +119,15 @@ rte_eal_pci_map_device(struct rte_pci_device *dev)
/* Unmap pci device */
void
-rte_eal_pci_unmap_device(struct rte_pci_device *dev)
+rte_pci_unmap_device(struct rte_pci_device *dev)
{
/* try unmapping the NIC resources using VFIO if it exists */
switch (dev->kdrv) {
case RTE_KDRV_VFIO:
- RTE_LOG(ERR, EAL, "Hotplug doesn't support vfio yet\n");
+#ifdef VFIO_PRESENT
+ if (pci_vfio_is_enabled())
+ pci_vfio_unmap_resource(dev);
+#endif
break;
case RTE_KDRV_IGB_UIO:
case RTE_KDRV_UIO_GENERIC:
@@ -267,8 +236,7 @@ error:
/* Scan one pci sysfs entry, and fill the devices list from it. */
static int
-pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
- uint8_t devid, uint8_t function)
+pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
{
char filename[PATH_MAX];
unsigned long tmp;
@@ -281,10 +249,7 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
return -1;
memset(dev, 0, sizeof(*dev));
- dev->addr.domain = domain;
- dev->addr.bus = bus;
- dev->addr.devid = devid;
- dev->addr.function = function;
+ dev->addr = *addr;
/* get vendor id */
snprintf(filename, sizeof(filename), "%s/vendor", dirname);
@@ -359,6 +324,9 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
dev->device.numa_node = tmp;
}
+ rte_pci_device_name(addr, dev->name, sizeof(dev->name));
+ dev->device.name = dev->name;
+
/* parse resources */
snprintf(filename, sizeof(filename), "%s/resource", dirname);
if (pci_parse_sysfs_resource(filename, dev) < 0) {
@@ -389,21 +357,19 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
dev->kdrv = RTE_KDRV_NONE;
/* device is valid, add in list (sorted) */
- if (TAILQ_EMPTY(&pci_device_list)) {
- rte_eal_device_insert(&dev->device);
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+ if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
+ rte_pci_add_device(dev);
} else {
struct rte_pci_device *dev2;
int ret;
- TAILQ_FOREACH(dev2, &pci_device_list, next) {
+ TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
ret = rte_eal_compare_pci_addr(&dev->addr, &dev2->addr);
if (ret > 0)
continue;
if (ret < 0) {
- TAILQ_INSERT_BEFORE(dev2, dev, next);
- rte_eal_device_insert(&dev->device);
+ rte_pci_insert_device(dev2, dev);
} else { /* already registered */
dev2->kdrv = dev->kdrv;
dev2->max_vfs = dev->max_vfs;
@@ -413,8 +379,8 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
}
return 0;
}
- rte_eal_device_insert(&dev->device);
- TAILQ_INSERT_TAIL(&pci_device_list, dev, next);
+
+ rte_pci_add_device(dev);
}
return 0;
@@ -429,16 +395,14 @@ pci_update_device(const struct rte_pci_addr *addr)
pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
addr->function);
- return pci_scan_one(filename, addr->domain, addr->bus, addr->devid,
- addr->function);
+ return pci_scan_one(filename, addr);
}
/*
* split up a pci address into its constituent parts.
*/
static int
-parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
- uint8_t *bus, uint8_t *devid, uint8_t *function)
+parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
{
/* first split on ':' */
union splitaddr {
@@ -466,10 +430,10 @@ parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
/* now convert to int values */
errno = 0;
- *domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);
- *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
- *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
- *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ addr->domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);
+ addr->bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ addr->devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ addr->function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
if (errno != 0)
goto error;
@@ -485,13 +449,16 @@ error:
* list
*/
int
-rte_eal_pci_scan(void)
+rte_pci_scan(void)
{
struct dirent *e;
DIR *dir;
char dirname[PATH_MAX];
- uint16_t domain;
- uint8_t bus, devid, function;
+ struct rte_pci_addr addr;
+
+ /* for debug purposes, PCI can be disabled */
+ if (internal_config.no_pci)
+ return 0;
dir = opendir(pci_get_sysfs_path());
if (dir == NULL) {
@@ -504,13 +471,13 @@ rte_eal_pci_scan(void)
if (e->d_name[0] == '.')
continue;
- if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &domain,
- &bus, &devid, &function) != 0)
+ if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
continue;
snprintf(dirname, sizeof(dirname), "%s/%s",
pci_get_sysfs_path(), e->d_name);
- if (pci_scan_one(dirname, domain, bus, devid, function) < 0)
+
+ if (pci_scan_one(dirname, &addr) < 0)
goto error;
}
closedir(dir);
@@ -522,8 +489,8 @@ error:
}
/* Read PCI config space. */
-int rte_eal_pci_read_config(const struct rte_pci_device *device,
- void *buf, size_t len, off_t offset)
+int rte_pci_read_config(const struct rte_pci_device *device,
+ void *buf, size_t len, off_t offset)
{
const struct rte_intr_handle *intr_handle = &device->intr_handle;
@@ -547,8 +514,8 @@ int rte_eal_pci_read_config(const struct rte_pci_device *device,
}
/* Write PCI config space. */
-int rte_eal_pci_write_config(const struct rte_pci_device *device,
- const void *buf, size_t len, off_t offset)
+int rte_pci_write_config(const struct rte_pci_device *device,
+ const void *buf, size_t len, off_t offset)
{
const struct rte_intr_handle *intr_handle = &device->intr_handle;
@@ -574,7 +541,7 @@ int rte_eal_pci_write_config(const struct rte_pci_device *device,
#if defined(RTE_ARCH_X86)
static int
pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
- struct rte_pci_ioport *p)
+ struct rte_pci_ioport *p)
{
uint16_t start, end;
FILE *fp;
@@ -632,8 +599,8 @@ pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
#endif
int
-rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
- struct rte_pci_ioport *p)
+rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
+ struct rte_pci_ioport *p)
{
int ret = -1;
@@ -670,8 +637,8 @@ rte_eal_pci_ioport_map(struct rte_pci_device *dev, int bar,
}
void
-rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
- void *data, size_t len, off_t offset)
+rte_pci_ioport_read(struct rte_pci_ioport *p,
+ void *data, size_t len, off_t offset)
{
switch (p->dev->kdrv) {
#ifdef VFIO_PRESENT
@@ -696,8 +663,8 @@ rte_eal_pci_ioport_read(struct rte_pci_ioport *p,
}
void
-rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
- const void *data, size_t len, off_t offset)
+rte_pci_ioport_write(struct rte_pci_ioport *p,
+ const void *data, size_t len, off_t offset)
{
switch (p->dev->kdrv) {
#ifdef VFIO_PRESENT
@@ -722,7 +689,7 @@ rte_eal_pci_ioport_write(struct rte_pci_ioport *p,
}
int
-rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p)
+rte_pci_ioport_unmap(struct rte_pci_ioport *p)
{
int ret = -1;
@@ -754,19 +721,3 @@ rte_eal_pci_ioport_unmap(struct rte_pci_ioport *p)
return ret;
}
-
-/* Init the PCI EAL subsystem */
-int
-rte_eal_pci_init(void)
-{
- /* for debug purposes, PCI can be disabled */
- if (internal_config.no_pci)
- return 0;
-
- if (rte_eal_pci_scan() < 0) {
- RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
- return -1;
- }
-
- return 0;
-}
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_init.h b/lib/librte_eal/linuxapp/eal/eal_pci_init.h
index 6a960d1b..ae2980d6 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_init.h
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_init.h
@@ -88,8 +88,9 @@ void pci_vfio_ioport_write(struct rte_pci_ioport *p,
const void *data, size_t len, off_t offset);
int pci_vfio_ioport_unmap(struct rte_pci_ioport *p);
-/* map VFIO resource prototype */
+/* map/unmap VFIO resource prototype */
int pci_vfio_map_resource(struct rte_pci_device *dev);
+int pci_vfio_unmap_resource(struct rte_pci_device *dev);
#endif
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
index 3e4ffb57..fa10329f 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_uio.c
@@ -38,6 +38,7 @@
#include <inttypes.h>
#include <sys/stat.h>
#include <sys/mman.h>
+#include <sys/sysmacros.h>
#include <linux/pci_regs.h>
#if defined(RTE_ARCH_X86)
@@ -230,7 +231,7 @@ pci_uio_free_resource(struct rte_pci_device *dev,
close(dev->intr_handle.uio_cfg_fd);
dev->intr_handle.uio_cfg_fd = -1;
}
- if (dev->intr_handle.fd) {
+ if (dev->intr_handle.fd >= 0) {
close(dev->intr_handle.fd);
dev->intr_handle.fd = -1;
dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
index 5f478c59..2be13195 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
@@ -38,6 +38,7 @@
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <stdbool.h>
#include <rte_log.h>
#include <rte_pci.h>
@@ -172,7 +173,7 @@ pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
/* set PCI bus mastering */
static int
-pci_vfio_set_bus_master(int dev_fd)
+pci_vfio_set_bus_master(int dev_fd, bool op)
{
uint16_t reg;
int ret;
@@ -185,8 +186,11 @@ pci_vfio_set_bus_master(int dev_fd)
return -1;
}
- /* set the master bit */
- reg |= PCI_COMMAND_MASTER;
+ if (op)
+ /* set the master bit */
+ reg |= PCI_COMMAND_MASTER;
+ else
+ reg &= ~(PCI_COMMAND_MASTER);
ret = pwrite64(dev_fd, &reg, sizeof(reg),
VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
@@ -355,7 +359,8 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
} else {
/* if we're in a secondary process, just find our tailq entry */
TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
- if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+ if (rte_eal_compare_pci_addr(&vfio_res->pci_addr,
+ &dev->addr))
continue;
break;
}
@@ -517,7 +522,7 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
}
/* set bus mastering for the device */
- if (pci_vfio_set_bus_master(vfio_dev_fd)) {
+ if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
RTE_LOG(ERR, EAL, " %s cannot set up bus mastering!\n", pci_addr);
close(vfio_dev_fd);
rte_free(vfio_res);
@@ -535,6 +540,79 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
}
int
+pci_vfio_unmap_resource(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ int i, ret;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+
+ struct pci_map *maps;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+
+ if (close(dev->intr_handle.fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
+ pci_addr);
+ return -1;
+ }
+
+ if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
+ RTE_LOG(ERR, EAL, " %s cannot unset bus mastering for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ ret = vfio_release_device(pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
+ }
+
+ vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ /* Get vfio_res */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+ continue;
+ break;
+ }
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
+
+ /* unmap BARs */
+ maps = vfio_res->maps;
+
+ RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
+ pci_addr);
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+
+ /*
+ * We do not need to be aware of MSI-X table BAR mappings as
+ * when mapping. Just using current maps array is enough
+ */
+ if (maps[i].addr) {
+ RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
+ pci_addr, maps[i].addr);
+ pci_unmap_resource(maps[i].addr, maps[i].size);
+ }
+ }
+
+ TAILQ_REMOVE(vfio_res_list, vfio_res, next);
+
+ return 0;
+}
+
+int
pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
struct rte_pci_ioport *p)
{
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index 702f7a2e..53ac725d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -50,12 +50,15 @@
static struct vfio_config vfio_cfg;
static int vfio_type1_dma_map(int);
+static int vfio_spapr_dma_map(int);
static int vfio_noiommu_dma_map(int);
/* IOMMU types we support */
static const struct vfio_iommu_type iommu_types[] = {
/* x86 IOMMU, otherwise known as type 1 */
{ RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
+ /* ppc64 IOMMU, otherwise known as spapr */
+ { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
/* IOMMU-less mode */
{ RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
};
@@ -65,13 +68,32 @@ vfio_get_group_fd(int iommu_group_no)
{
int i;
int vfio_group_fd;
+ int group_idx = -1;
char filename[PATH_MAX];
/* check if we already have the group descriptor open */
- for (i = 0; i < vfio_cfg.vfio_group_idx; i++)
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
return vfio_cfg.vfio_groups[i].fd;
+ /* Lets see first if there is room for a new group */
+ if (vfio_cfg.vfio_active_groups == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
+ return -1;
+ }
+
+ /* Now lets get an index for the new group */
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
+ if (vfio_cfg.vfio_groups[i].group_no == -1) {
+ group_idx = i;
+ break;
+ }
+
+ /* This should not happen */
+ if (group_idx == -1) {
+ RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
+ return -1;
+ }
/* if primary, try to open the group */
if (internal_config.process_type == RTE_PROC_PRIMARY) {
/* try regular group format */
@@ -101,14 +123,9 @@ vfio_get_group_fd(int iommu_group_no)
/* noiommu group found */
}
- /* if the fd is valid, create a new group for it */
- if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) {
- RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
- close(vfio_group_fd);
- return -1;
- }
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
+ vfio_cfg.vfio_groups[group_idx].group_no = iommu_group_no;
+ vfio_cfg.vfio_groups[group_idx].fd = vfio_group_fd;
+ vfio_cfg.vfio_active_groups++;
return vfio_group_fd;
}
/* if we're in a secondary process, request group fd from the primary
@@ -155,14 +172,115 @@ vfio_get_group_fd(int iommu_group_no)
return -1;
}
+
+static int
+get_vfio_group_idx(int vfio_group_fd)
+{
+ int i;
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
+ if (vfio_cfg.vfio_groups[i].fd == vfio_group_fd)
+ return i;
+ return -1;
+}
+
+static void
+vfio_group_device_get(int vfio_group_fd)
+{
+ int i;
+
+ i = get_vfio_group_idx(vfio_group_fd);
+ if (i < 0 || i > VFIO_MAX_GROUPS)
+ RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
+ else
+ vfio_cfg.vfio_groups[i].devices++;
+}
+
static void
-clear_current_group(void)
+vfio_group_device_put(int vfio_group_fd)
{
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0;
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1;
+ int i;
+
+ i = get_vfio_group_idx(vfio_group_fd);
+ if (i < 0 || i > VFIO_MAX_GROUPS)
+ RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
+ else
+ vfio_cfg.vfio_groups[i].devices--;
+}
+
+static int
+vfio_group_device_count(int vfio_group_fd)
+{
+ int i;
+
+ i = get_vfio_group_idx(vfio_group_fd);
+ if (i < 0 || i > VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
+ return -1;
+ }
+
+ return vfio_cfg.vfio_groups[i].devices;
+}
+
+int
+clear_group(int vfio_group_fd)
+{
+ int i;
+ int socket_fd, ret;
+
+ if (internal_config.process_type == RTE_PROC_PRIMARY) {
+
+ i = get_vfio_group_idx(vfio_group_fd);
+ if (i < 0)
+ return -1;
+ vfio_cfg.vfio_groups[i].group_no = -1;
+ vfio_cfg.vfio_groups[i].fd = -1;
+ vfio_cfg.vfio_groups[i].devices = 0;
+ vfio_cfg.vfio_active_groups--;
+ return 0;
+ }
+
+ /* This is just for SECONDARY processes */
+ socket_fd = vfio_mp_sync_connect_to_primary();
+
+ if (socket_fd < 0) {
+ RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
+ return -1;
+ }
+
+ if (vfio_mp_sync_send_request(socket_fd, SOCKET_CLR_GROUP) < 0) {
+ RTE_LOG(ERR, EAL, " cannot request container fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+
+ if (vfio_mp_sync_send_request(socket_fd, vfio_group_fd) < 0) {
+ RTE_LOG(ERR, EAL, " cannot send group fd!\n");
+ close(socket_fd);
+ return -1;
+ }
+
+ ret = vfio_mp_sync_receive_request(socket_fd);
+ switch (ret) {
+ case SOCKET_NO_FD:
+ RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
+ close(socket_fd);
+ break;
+ case SOCKET_OK:
+ close(socket_fd);
+ return 0;
+ case SOCKET_ERR:
+ RTE_LOG(ERR, EAL, " Socket error\n");
+ close(socket_fd);
+ break;
+ default:
+ RTE_LOG(ERR, EAL, " UNKNOWN reply, %d\n", ret);
+ close(socket_fd);
+ }
+ return -1;
}
-int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
+int
+vfio_setup_device(const char *sysfs_base, const char *dev_addr,
int *vfio_dev_fd, struct vfio_device_info *device_info)
{
struct vfio_group_status group_status = {
@@ -189,18 +307,10 @@ int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
if (vfio_group_fd < 0)
return -1;
- /* store group fd */
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no;
- vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd;
-
/* if group_fd == 0, that means the device isn't managed by VFIO */
if (vfio_group_fd == 0) {
- RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
dev_addr);
- /* we store 0 as group fd to distinguish between existing but
- * unbound VFIO groups, and groups that don't exist at all.
- */
- vfio_cfg.vfio_group_idx++;
return 1;
}
@@ -215,12 +325,12 @@ int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
RTE_LOG(ERR, EAL, " %s cannot get group status, "
"error %i (%s)\n", dev_addr, errno, strerror(errno));
close(vfio_group_fd);
- clear_current_group();
+ clear_group(vfio_group_fd);
return -1;
} else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
close(vfio_group_fd);
- clear_current_group();
+ clear_group(vfio_group_fd);
return -1;
}
@@ -234,60 +344,131 @@ int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
"error %i (%s)\n", dev_addr, errno, strerror(errno));
close(vfio_group_fd);
- clear_current_group();
+ clear_group(vfio_group_fd);
return -1;
}
+
/*
- * at this point we know that this group has been successfully
- * initialized, so we increment vfio_group_idx to indicate that we can
- * add new groups.
+ * pick an IOMMU type and set up DMA mappings for container
+ *
+ * needs to be done only once, only when first group is
+ * assigned to a container and only in primary process.
+ * Note this can happen several times with the hotplug
+ * functionality.
*/
- vfio_cfg.vfio_group_idx++;
- }
-
- /*
- * pick an IOMMU type and set up DMA mappings for container
- *
- * needs to be done only once, only when at least one group is assigned to
- * a container and only in primary process
- */
- if (internal_config.process_type == RTE_PROC_PRIMARY &&
- vfio_cfg.vfio_container_has_dma == 0) {
- /* select an IOMMU type which we will be using */
- const struct vfio_iommu_type *t =
+ if (internal_config.process_type == RTE_PROC_PRIMARY &&
+ vfio_cfg.vfio_active_groups == 1) {
+ /* select an IOMMU type which we will be using */
+ const struct vfio_iommu_type *t =
vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
- if (!t) {
- RTE_LOG(ERR, EAL, " %s failed to select IOMMU type\n", dev_addr);
- return -1;
- }
- ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
- if (ret) {
- RTE_LOG(ERR, EAL, " %s DMA remapping failed, "
- "error %i (%s)\n", dev_addr, errno, strerror(errno));
- return -1;
+ if (!t) {
+ RTE_LOG(ERR, EAL,
+ " %s failed to select IOMMU type\n",
+ dev_addr);
+ close(vfio_group_fd);
+ clear_group(vfio_group_fd);
+ return -1;
+ }
+ ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ " %s DMA remapping failed, error %i (%s)\n",
+ dev_addr, errno, strerror(errno));
+ close(vfio_group_fd);
+ clear_group(vfio_group_fd);
+ return -1;
+ }
}
- vfio_cfg.vfio_container_has_dma = 1;
}
/* get a file descriptor for the device */
*vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
if (*vfio_dev_fd < 0) {
- /* if we cannot get a device fd, this simply means that this
- * particular port is not bound to VFIO
- */
- RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
+ /* if we cannot get a device fd, this implies a problem with
+ * the VFIO group or the container not having IOMMU configured.
+ */
+
+ RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
dev_addr);
- return 1;
+ close(vfio_group_fd);
+ clear_group(vfio_group_fd);
+ return -1;
}
/* test and setup the device */
ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
if (ret) {
RTE_LOG(ERR, EAL, " %s cannot get device info, "
- "error %i (%s)\n", dev_addr, errno, strerror(errno));
+ "error %i (%s)\n", dev_addr, errno,
+ strerror(errno));
close(*vfio_dev_fd);
+ close(vfio_group_fd);
+ clear_group(vfio_group_fd);
return -1;
}
+ vfio_group_device_get(vfio_group_fd);
+
+ return 0;
+}
+
+int
+vfio_release_device(const char *sysfs_base, const char *dev_addr,
+ int vfio_dev_fd)
+{
+ struct vfio_group_status group_status = {
+ .argsz = sizeof(group_status)
+ };
+ int vfio_group_fd;
+ int iommu_group_no;
+ int ret;
+
+ /* get group number */
+ ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
+ if (ret <= 0) {
+ RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
+ dev_addr);
+ /* This is an error at this point. */
+ return -1;
+ }
+
+ /* get the actual group fd */
+ vfio_group_fd = vfio_get_group_fd(iommu_group_no);
+ if (vfio_group_fd <= 0) {
+ RTE_LOG(INFO, EAL, "vfio_get_group_fd failed for %s\n",
+ dev_addr);
+ return -1;
+ }
+
+ /* At this point we got an active group. Closing it will make the
+ * container detachment. If this is the last active group, VFIO kernel
+ * code will unset the container and the IOMMU mappings.
+ */
+
+ /* Closing a device */
+ if (close(vfio_dev_fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
+ dev_addr);
+ return -1;
+ }
+
+ /* An VFIO group can have several devices attached. Just when there is
+ * no devices remaining should the group be closed.
+ */
+ vfio_group_device_put(vfio_group_fd);
+ if (!vfio_group_device_count(vfio_group_fd)) {
+
+ if (close(vfio_group_fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
+ dev_addr);
+ return -1;
+ }
+
+ if (clear_group(vfio_group_fd) < 0) {
+ RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
+ dev_addr);
+ return -1;
+ }
+ }
return 0;
}
@@ -302,6 +483,7 @@ vfio_enable(const char *modname)
for (i = 0; i < VFIO_MAX_GROUPS; i++) {
vfio_cfg.vfio_groups[i].fd = -1;
vfio_cfg.vfio_groups[i].group_no = -1;
+ vfio_cfg.vfio_groups[i].devices = 0;
}
/* inform the user that we are probing for VFIO */
@@ -531,7 +713,8 @@ vfio_type1_dma_map(int vfio_container_fd)
if (ret) {
RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
- "error %i (%s)\n", errno, strerror(errno));
+ "error %i (%s)\n", errno,
+ strerror(errno));
return -1;
}
}
@@ -540,6 +723,93 @@ vfio_type1_dma_map(int vfio_container_fd)
}
static int
+vfio_spapr_dma_map(int vfio_container_fd)
+{
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ int i, ret;
+
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0
+ };
+ struct vfio_iommu_spapr_tce_info info = {
+ .argsz = sizeof(info),
+ };
+ struct vfio_iommu_spapr_tce_create create = {
+ .argsz = sizeof(create),
+ };
+ struct vfio_iommu_spapr_tce_remove remove = {
+ .argsz = sizeof(remove),
+ };
+
+ /* query spapr iommu info */
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot get iommu info, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* remove default DMA of 32 bit window */
+ remove.start_addr = info.dma32_window_start;
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* calculate window size based on number of hugepages configured */
+ create.window_size = rte_eal_get_physmem_size();
+ create.page_shift = __builtin_ctzll(ms->hugepage_sz);
+ create.levels = 2;
+
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot create new DMA window, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
+ for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+ struct vfio_iommu_type1_dma_map dma_map;
+
+ if (ms[i].addr == NULL)
+ break;
+
+ reg.vaddr = (uintptr_t) ms[i].addr;
+ reg.size = ms[i].len;
+ ret = ioctl(vfio_container_fd,
+ VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ memset(&dma_map, 0, sizeof(dma_map));
+ dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
+ dma_map.vaddr = ms[i].addr_64;
+ dma_map.size = ms[i].len;
+ dma_map.iova = ms[i].phys_addr;
+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
+ VFIO_DMA_MAP_FLAG_WRITE;
+
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
+ "error %i (%s)\n", errno, strerror(errno));
+ return -1;
+ }
+
+ }
+
+ return 0;
+}
+
+static int
vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
{
/* No-IOMMU mode does not need DMA mapping */
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h
index 29f7f3ec..5ff63e5d 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.h
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h
@@ -54,6 +54,62 @@
#define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
+#ifndef VFIO_SPAPR_TCE_v2_IOMMU
+#define RTE_VFIO_SPAPR 7
+#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
+#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
+
+struct vfio_iommu_spapr_register_memory {
+ uint32_t argsz;
+ uint32_t flags;
+ uint64_t vaddr;
+ uint64_t size;
+};
+
+struct vfio_iommu_spapr_tce_create {
+ uint32_t argsz;
+ uint32_t flags;
+ /* in */
+ uint32_t page_shift;
+ uint32_t __resv1;
+ uint64_t window_size;
+ uint32_t levels;
+ uint32_t __resv2;
+ /* out */
+ uint64_t start_addr;
+};
+
+struct vfio_iommu_spapr_tce_remove {
+ uint32_t argsz;
+ uint32_t flags;
+ /* in */
+ uint64_t start_addr;
+};
+
+struct vfio_iommu_spapr_tce_ddw_info {
+ uint64_t pgsizes;
+ uint32_t max_dynamic_windows_supported;
+ uint32_t levels;
+};
+
+/* SPAPR_v2 is not present, but SPAPR might be */
+#ifndef VFIO_SPAPR_TCE_IOMMU
+#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
+
+struct vfio_iommu_spapr_tce_info {
+ uint32_t argsz;
+ uint32_t flags;
+ uint32_t dma32_window_start;
+ uint32_t dma32_window_size;
+ struct vfio_iommu_spapr_tce_ddw_info ddw;
+};
+#endif /* VFIO_SPAPR_TCE_IOMMU */
+
+#else /* VFIO_SPAPR_TCE_v2_IOMMU */
+#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU
+#endif
+
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
#define RTE_VFIO_NOIOMMU 8
#else
@@ -78,13 +134,13 @@ int vfio_mp_sync_connect_to_primary(void);
struct vfio_group {
int group_no;
int fd;
+ int devices;
};
struct vfio_config {
int vfio_enabled;
int vfio_container_fd;
- int vfio_container_has_dma;
- int vfio_group_idx;
+ int vfio_active_groups;
struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
};
@@ -130,6 +186,10 @@ vfio_get_group_no(const char *sysfs_base,
int
vfio_get_group_fd(int iommu_group_no);
+/* remove group fd from internal VFIO group fd array */
+int
+clear_group(int vfio_group_fd);
+
/**
* Setup vfio_cfg for the device identified by its address. It discovers
* the configured I/O MMU groups or sets a new one for the device. If a new
@@ -140,6 +200,8 @@ vfio_get_group_fd(int iommu_group_no);
int vfio_setup_device(const char *sysfs_base, const char *dev_addr,
int *vfio_dev_fd, struct vfio_device_info *device_info);
+int vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
+
int vfio_enable(const char *modname);
int vfio_is_enabled(const char *modname);
@@ -150,6 +212,7 @@ int vfio_mp_sync_setup(void);
#define SOCKET_REQ_CONTAINER 0x100
#define SOCKET_REQ_GROUP 0x200
+#define SOCKET_CLR_GROUP 0x300
#define SOCKET_OK 0x0
#define SOCKET_NO_FD 0x1
#define SOCKET_ERR 0xFF
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
index fb4a2f84..7e8095cb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
@@ -267,7 +267,7 @@ vfio_mp_sync_connect_to_primary(void)
static __attribute__((noreturn)) void *
vfio_mp_sync_thread(void __rte_unused * arg)
{
- int ret, fd, vfio_group_no;
+ int ret, fd, vfio_data;
/* wait for requests on the socket */
for (;;) {
@@ -305,13 +305,13 @@ vfio_mp_sync_thread(void __rte_unused * arg)
break;
case SOCKET_REQ_GROUP:
/* wait for group number */
- vfio_group_no = vfio_mp_sync_receive_request(conn_sock);
- if (vfio_group_no < 0) {
+ vfio_data = vfio_mp_sync_receive_request(conn_sock);
+ if (vfio_data < 0) {
close(conn_sock);
continue;
}
- fd = vfio_get_group_fd(vfio_group_no);
+ fd = vfio_get_group_fd(vfio_data);
if (fd < 0)
vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
@@ -324,6 +324,21 @@ vfio_mp_sync_thread(void __rte_unused * arg)
vfio_mp_sync_send_fd(conn_sock, fd);
}
break;
+ case SOCKET_CLR_GROUP:
+ /* wait for group fd */
+ vfio_data = vfio_mp_sync_receive_request(conn_sock);
+ if (vfio_data < 0) {
+ close(conn_sock);
+ continue;
+ }
+
+ ret = clear_group(vfio_data);
+
+ if (ret < 0)
+ vfio_mp_sync_send_request(conn_sock, SOCKET_NO_FD);
+ else
+ vfio_mp_sync_send_request(conn_sock, SOCKET_OK);
+ break;
default:
vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
break;
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
index d459bf48..6daffebf 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
@@ -49,8 +49,9 @@ enum rte_intr_handle_type {
RTE_INTR_HANDLE_VFIO_LEGACY, /**< vfio device handle (legacy) */
RTE_INTR_HANDLE_VFIO_MSI, /**< vfio device handle (MSI) */
RTE_INTR_HANDLE_VFIO_MSIX, /**< vfio device handle (MSIX) */
- RTE_INTR_HANDLE_ALARM, /**< alarm handle */
- RTE_INTR_HANDLE_EXT, /**< external handler */
+ RTE_INTR_HANDLE_ALARM, /**< alarm handle */
+ RTE_INTR_HANDLE_EXT, /**< external handler */
+ RTE_INTR_HANDLE_VDEV, /**< virtual device */
RTE_INTR_HANDLE_MAX
};
@@ -171,6 +172,15 @@ rte_intr_rx_ctl(struct rte_intr_handle *intr_handle,
int epfd, int op, unsigned int vec, void *data);
/**
+ * It deletes registered eventfds.
+ *
+ * @param intr_handle
+ * Pointer to the interrupt handle.
+ */
+void
+rte_intr_free_epoll_fd(struct rte_intr_handle *intr_handle);
+
+/**
* It enables the packet I/O interrupt event if it's necessary.
* It creates event fd for each interrupt vector when MSIX is used,
* otherwise it multiplexes a single event fd.
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 09713b0c..2ac879fd 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -116,11 +116,10 @@ struct rte_kni_fifo {
struct rte_kni_mbuf {
void *buf_addr __attribute__((__aligned__(RTE_CACHE_LINE_SIZE)));
uint64_t buf_physaddr;
- char pad0[2];
uint16_t data_off; /**< Start address of data in segment buffer. */
char pad1[2];
- uint8_t nb_segs; /**< Number of segments. */
- char pad4[1];
+ uint16_t nb_segs; /**< Number of segments. */
+ char pad4[2];
uint64_t ol_flags; /**< Offload features. */
char pad2[4];
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
index 83721ba5..670bab3a 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
@@ -6,8 +6,6 @@ DPDK_2.0 {
eal_parse_sysfs_value;
eal_timer_source;
lcore_config;
- pci_device_list;
- pci_driver_list;
per_lcore__lcore_id;
per_lcore__rte_errno;
rte_calloc;
@@ -22,12 +20,9 @@ DPDK_2.0 {
rte_dump_tailq;
rte_eal_alarm_cancel;
rte_eal_alarm_set;
- rte_eal_dev_init;
rte_eal_devargs_add;
rte_eal_devargs_dump;
rte_eal_devargs_type_count;
- rte_eal_driver_register;
- rte_eal_driver_unregister;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
rte_eal_get_physmem_layout;
@@ -40,18 +35,10 @@ DPDK_2.0 {
rte_eal_mp_remote_launch;
rte_eal_mp_wait_lcore;
rte_eal_parse_devargs_str;
- rte_eal_pci_dump;
- rte_eal_pci_probe;
- rte_eal_pci_probe_one;
- rte_eal_pci_register;
- rte_eal_pci_scan;
- rte_eal_pci_unregister;
rte_eal_process_type;
rte_eal_remote_launch;
rte_eal_tailq_lookup;
rte_eal_tailq_register;
- rte_eal_vdev_init;
- rte_eal_vdev_uninit;
rte_eal_wait_lcore;
rte_exit;
rte_free;
@@ -66,11 +53,8 @@ DPDK_2.0 {
rte_intr_disable;
rte_intr_enable;
rte_log;
- rte_log_add_in_history;
rte_log_cur_msg_loglevel;
rte_log_cur_msg_logtype;
- rte_log_dump_history;
- rte_log_set_history;
rte_logs;
rte_malloc;
rte_malloc_dump_stats;
@@ -114,9 +98,6 @@ DPDK_2.0 {
DPDK_2.1 {
global:
- rte_eal_pci_detach;
- rte_eal_pci_read_config;
- rte_eal_pci_write_config;
rte_epoll_ctl;
rte_epoll_wait;
rte_intr_allow_others;
@@ -146,12 +127,6 @@ DPDK_16.04 {
global:
rte_cpu_get_flag_name;
- rte_eal_pci_ioport_map;
- rte_eal_pci_ioport_read;
- rte_eal_pci_ioport_unmap;
- rte_eal_pci_ioport_write;
- rte_eal_pci_map_device;
- rte_eal_pci_unmap_device;
rte_eal_primary_proc_alive;
} DPDK_2.2;
@@ -174,7 +149,52 @@ DPDK_16.11 {
rte_delay_us_callback_register;
rte_eal_dev_attach;
rte_eal_dev_detach;
- rte_eal_vdrv_register;
- rte_eal_vdrv_unregister;
} DPDK_16.07;
+
+DPDK_17.02 {
+ global:
+
+ rte_bus_dump;
+ rte_bus_probe;
+ rte_bus_register;
+ rte_bus_scan;
+ rte_bus_unregister;
+
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_cpu_is_supported;
+ rte_intr_free_epoll_fd;
+ rte_log_dump;
+ rte_log_get_global_level;
+ rte_log_register;
+ rte_log_set_global_level;
+ rte_log_set_level;
+ rte_log_set_level_regexp;
+ rte_pci_detach;
+ rte_pci_dump;
+ rte_pci_ioport_map;
+ rte_pci_ioport_read;
+ rte_pci_ioport_unmap;
+ rte_pci_ioport_write;
+ rte_pci_map_device;
+ rte_pci_probe;
+ rte_pci_probe_one;
+ rte_pci_read_config;
+ rte_pci_register;
+ rte_pci_scan;
+ rte_pci_unmap_device;
+ rte_pci_unregister;
+ rte_pci_write_config;
+ rte_vdev_init;
+ rte_vdev_register;
+ rte_vdev_uninit;
+ rte_vdev_unregister;
+ vfio_get_container_fd;
+ vfio_get_group_fd;
+ vfio_get_group_no;
+
+} DPDK_17.02;
diff --git a/lib/librte_eal/linuxapp/igb_uio/compat.h b/lib/librte_eal/linuxapp/igb_uio/compat.h
index 0d781e48..b800a53c 100644
--- a/lib/librte_eal/linuxapp/igb_uio/compat.h
+++ b/lib/librte_eal/linuxapp/igb_uio/compat.h
@@ -123,3 +123,7 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev)
}
#endif /* < 3.3.0 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
+#define HAVE_PCI_ENABLE_MSIX
+#endif
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
index df41e457..b9d427c5 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -314,7 +314,7 @@ igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
}
}
- return (iom != 0) ? ret : -ENOENT;
+ return (iom != 0 || iop != 0) ? ret : -ENOENT;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
@@ -325,7 +325,11 @@ static int
igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct rte_uio_pci_dev *udev;
+#ifdef HAVE_PCI_ENABLE_MSIX
struct msix_entry msix_entry;
+#endif
+ dma_addr_t map_dma_addr;
+ void *map_addr;
int err;
udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
@@ -379,18 +383,28 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
switch (igbuio_intr_mode_preferred) {
case RTE_INTR_MODE_MSIX:
/* Only 1 msi-x vector needed */
+#ifdef HAVE_PCI_ENABLE_MSIX
msix_entry.entry = 0;
if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
dev_dbg(&dev->dev, "using MSI-X");
+ udev->info.irq_flags = IRQF_NO_THREAD;
udev->info.irq = msix_entry.vector;
udev->mode = RTE_INTR_MODE_MSIX;
break;
}
+#else
+ if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
+ dev_dbg(&dev->dev, "using MSI-X");
+ udev->info.irq = pci_irq_vector(dev, 0);
+ udev->mode = RTE_INTR_MODE_MSIX;
+ break;
+ }
+#endif
/* fall back to INTX */
case RTE_INTR_MODE_LEGACY:
if (pci_intx_mask_supported(dev)) {
dev_dbg(&dev->dev, "using INTX");
- udev->info.irq_flags = IRQF_SHARED;
+ udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
udev->info.irq = dev->irq;
udev->mode = RTE_INTR_MODE_LEGACY;
break;
@@ -423,6 +437,27 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info(&dev->dev, "uio device registered with irq %lx\n",
udev->info.irq);
+ /*
+ * Doing a harmless dma mapping for attaching the device to
+ * the iommu identity mapping if kernel boots with iommu=pt.
+ * Note this is not a problem if no IOMMU at all.
+ */
+ map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
+ GFP_KERNEL);
+ if (map_addr)
+ memset(map_addr, 0, 1024);
+
+ if (!map_addr)
+ dev_info(&dev->dev, "dma mapping failed\n");
+ else {
+ dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
+ (unsigned long long)map_dma_addr, map_addr);
+
+ dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
+ dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
+ (unsigned long long)map_dma_addr, map_addr);
+ }
+
return 0;
fail_remove_group:
diff --git a/lib/librte_eal/linuxapp/kni/Makefile b/lib/librte_eal/linuxapp/kni/Makefile
index 4e99e07e..154c528d 100644
--- a/lib/librte_eal/linuxapp/kni/Makefile
+++ b/lib/librte_eal/linuxapp/kni/Makefile
@@ -44,45 +44,43 @@ MODULE_CFLAGS += -I$(RTE_OUTPUT)/include -I$(SRCDIR)/ethtool/ixgbe -I$(SRCDIR)/e
MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
MODULE_CFLAGS += -Wall -Werror
-ifeq ($(shell lsb_release -si 2>/dev/null),Ubuntu)
-MODULE_CFLAGS += -DUBUNTU_RELEASE_CODE=$(shell lsb_release -sr | tr -d .)
+-include /etc/lsb-release
+
+ifeq ($(DISTRIB_ID),Ubuntu)
+MODULE_CFLAGS += -DUBUNTU_RELEASE_CODE=$(subst .,,$(DISTRIB_RELEASE))
UBUNTU_KERNEL_CODE := $(shell echo `grep UTS_RELEASE $(RTE_KERNELDIR)/include/generated/utsrelease.h \
| cut -d '"' -f2 | cut -d- -f1,2 | tr .- ,`,1)
MODULE_CFLAGS += -D"UBUNTU_KERNEL_CODE=UBUNTU_KERNEL_VERSION($(UBUNTU_KERNEL_CODE))"
endif
-# this lib needs main eal
-DEPDIRS-y += lib/librte_eal/linuxapp/eal
-
#
# all source are stored in SRCS-y
#
-SRCS-y := ethtool/ixgbe/ixgbe_main.c
-SRCS-y += ethtool/ixgbe/ixgbe_api.c
-SRCS-y += ethtool/ixgbe/ixgbe_common.c
-SRCS-y += ethtool/ixgbe/ixgbe_ethtool.c
-SRCS-y += ethtool/ixgbe/ixgbe_82599.c
-SRCS-y += ethtool/ixgbe/ixgbe_82598.c
-SRCS-y += ethtool/ixgbe/ixgbe_x540.c
-SRCS-y += ethtool/ixgbe/ixgbe_phy.c
-SRCS-y += ethtool/ixgbe/kcompat.c
+SRCS-y := kni_misc.c
+SRCS-y += kni_net.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += kni_ethtool.c
-SRCS-y += ethtool/igb/e1000_82575.c
-SRCS-y += ethtool/igb/e1000_i210.c
-SRCS-y += ethtool/igb/e1000_api.c
-SRCS-y += ethtool/igb/e1000_mac.c
-SRCS-y += ethtool/igb/e1000_manage.c
-SRCS-y += ethtool/igb/e1000_mbx.c
-SRCS-y += ethtool/igb/e1000_nvm.c
-SRCS-y += ethtool/igb/e1000_phy.c
-SRCS-y += ethtool/igb/igb_ethtool.c
-SRCS-y += ethtool/igb/igb_main.c
-SRCS-y += ethtool/igb/igb_param.c
-SRCS-y += ethtool/igb/igb_vmdq.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_main.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_api.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_common.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_ethtool.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_82599.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_82598.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_x540.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_phy.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/kcompat.c
-SRCS-y += kni_misc.c
-SRCS-y += kni_net.c
-SRCS-y += kni_ethtool.c
-SRCS-$(CONFIG_RTE_KNI_VHOST) += kni_vhost.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_82575.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_i210.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_api.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_mac.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_manage.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_mbx.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_nvm.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_phy.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_ethtool.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_main.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_param.c
+SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_vmdq.c
include $(RTE_SDK)/mk/rte.module.mk
diff --git a/lib/librte_eal/linuxapp/kni/compat.h b/lib/librte_eal/linuxapp/kni/compat.h
index 78da08e5..d96275af 100644
--- a/lib/librte_eal/linuxapp/kni/compat.h
+++ b/lib/librte_eal/linuxapp/kni/compat.h
@@ -2,6 +2,8 @@
* Minimal wrappers to allow compiling kni on older kernels.
*/
+#include <linux/version.h>
+
#ifndef RHEL_RELEASE_VERSION
#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
#endif
@@ -67,3 +69,7 @@
(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)))
#undef NET_NAME_UNKNOWN
#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+#define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
+#endif
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
index d7a987d5..95e262b7 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
@@ -1126,7 +1126,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
static irqreturn_t igb_test_intr(int irq, void *data)
{
- struct igb_adapter *adapter = (struct igb_adapter *) data;
+ struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
adapter->test_icr |= E1000_READ_REG(hw, E1000_ICR);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
index f4dca5a3..5f1f3a6b 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
@@ -1031,8 +1031,15 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
+#ifdef HAVE_PCI_ENABLE_MSIX
err = pci_enable_msix(pdev,
adapter->msix_entries, numvecs);
+#else
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+#endif
if (err == 0)
break;
}
@@ -1629,7 +1636,7 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
*/
static int igb_get_i2c_data(void *data)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
@@ -1644,7 +1651,7 @@ static int igb_get_i2c_data(void *data)
*/
static void igb_set_i2c_data(void *data, int state)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
@@ -1669,7 +1676,7 @@ static void igb_set_i2c_data(void *data, int state)
*/
static void igb_set_i2c_clk(void *data, int state)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
@@ -1691,7 +1698,7 @@ static void igb_set_i2c_clk(void *data, int state)
*/
static int igb_get_i2c_clk(void *data)
{
- struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct igb_adapter *adapter = data;
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
index 84826b26..4c52da3c 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
@@ -710,6 +710,9 @@ struct _kc_ethtool_pauseparam {
#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,28) )
/* SLES12 is at least 3.12.28+ based */
#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
+/* SLES12SP3 is at least 4.4.57+ based */
+#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
#endif /* CONFIG_SUSE_KERNEL */
#ifndef SLE_VERSION_CODE
@@ -3929,8 +3932,13 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
#define vlan_tx_tag_present skb_vlan_tag_present
#endif
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0) )
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) || \
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
#define HAVE_VF_VLAN_PROTO
-#endif /* >= 4.9.0 */
+#endif /* >= 4.9.0, >= SLES12SP3 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
+#define HAVE_PCI_ENABLE_MSIX
+#endif
#endif /* _KCOMPAT_H_ */
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
index bc3cb2f4..cdfcb959 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
@@ -1462,7 +1462,7 @@ static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
static irqreturn_t ixgbe_test_intr(int irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
+ struct net_device *netdev = data;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
@@ -2447,7 +2447,7 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
- (u32 *)rule_locs);
+ rule_locs);
break;
case ETHTOOL_GRXFH:
ret = ixgbe_get_rss_hash_opts(adapter, cmd);
diff --git a/lib/librte_eal/linuxapp/kni/kni_dev.h b/lib/librte_eal/linuxapp/kni/kni_dev.h
index 58cbadd3..72385ab4 100644
--- a/lib/librte_eal/linuxapp/kni/kni_dev.h
+++ b/lib/librte_eal/linuxapp/kni/kni_dev.h
@@ -30,17 +30,19 @@
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include "compat.h"
+
#include <linux/if.h>
#include <linux/wait.h>
+#ifdef HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
+#include <linux/sched/signal.h>
+#else
#include <linux/sched.h>
+#endif
#include <linux/netdevice.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#ifdef RTE_KNI_VHOST
-#include <net/sock.h>
-#endif
-
#include <exec-env/rte_kni_common.h>
#define KNI_KTHREAD_RESCHEDULE_INTERVAL 5 /* us */
@@ -102,15 +104,6 @@ struct kni_dev {
/* synchro for request processing */
unsigned long synchro;
-#ifdef RTE_KNI_VHOST
- struct kni_vhost_queue *vhost_queue;
-
- volatile enum {
- BE_STOP = 0x1,
- BE_START = 0x2,
- BE_FINISH = 0x4,
- } vq_status;
-#endif
/* buffers */
void *pa[MBUF_BURST_SZ];
void *va[MBUF_BURST_SZ];
@@ -118,26 +111,6 @@ struct kni_dev {
void *alloc_va[MBUF_BURST_SZ];
};
-#ifdef RTE_KNI_VHOST
-uint32_t
-kni_poll(struct file *file, struct socket *sock, poll_table * wait);
-int kni_chk_vhost_rx(struct kni_dev *kni);
-int kni_vhost_init(struct kni_dev *kni);
-int kni_vhost_backend_release(struct kni_dev *kni);
-
-struct kni_vhost_queue {
- struct sock sk;
- struct socket *sock;
- int vnet_hdr_sz;
- struct kni_dev *kni;
- int sockfd;
- uint32_t flags;
- struct sk_buff *cache;
- struct rte_kni_fifo *fifo;
-};
-
-#endif
-
void kni_net_rx(struct kni_dev *kni);
void kni_net_init(struct net_device *dev);
void kni_net_config_lo_mode(char *lo_str);
diff --git a/lib/librte_eal/linuxapp/kni/kni_fifo.h b/lib/librte_eal/linuxapp/kni/kni_fifo.h
index 025ec1c9..14f4141f 100644
--- a/lib/librte_eal/linuxapp/kni/kni_fifo.h
+++ b/lib/librte_eal/linuxapp/kni/kni_fifo.h
@@ -91,18 +91,4 @@ kni_fifo_free_count(struct rte_kni_fifo *fifo)
return (fifo->read - fifo->write - 1) & (fifo->len - 1);
}
-#ifdef RTE_KNI_VHOST
-/**
- * Initializes the kni fifo structure
- */
-static inline void
-kni_fifo_init(struct rte_kni_fifo *fifo, uint32_t size)
-{
- fifo->write = 0;
- fifo->read = 0;
- fifo->len = size;
- fifo->elem_size = sizeof(void *);
-}
-#endif
-
#endif /* _KNI_FIFO_H_ */
diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c b/lib/librte_eal/linuxapp/kni/kni_misc.c
index 497db9bd..7590f1fd 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/lib/librte_eal/linuxapp/kni/kni_misc.c
@@ -140,11 +140,7 @@ kni_thread_single(void *data)
down_read(&knet->kni_list_lock);
for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
list_for_each_entry(dev, &knet->kni_list_head, list) {
-#ifdef RTE_KNI_VHOST
- kni_chk_vhost_rx(dev);
-#else
kni_net_rx(dev);
-#endif
kni_net_poll_resp(dev);
}
}
@@ -163,15 +159,11 @@ static int
kni_thread_multiple(void *param)
{
int j;
- struct kni_dev *dev = (struct kni_dev *)param;
+ struct kni_dev *dev = param;
while (!kthread_should_stop()) {
for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
-#ifdef RTE_KNI_VHOST
- kni_chk_vhost_rx(dev);
-#else
kni_net_rx(dev);
-#endif
kni_net_poll_resp(dev);
}
#ifdef RTE_KNI_PREEMPT_DEFAULT
@@ -205,12 +197,14 @@ kni_dev_remove(struct kni_dev *dev)
if (!dev)
return -ENODEV;
+#ifdef RTE_KNI_KMOD_ETHTOOL
if (dev->pci_dev) {
if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
ixgbe_kni_remove(dev->pci_dev);
else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
igb_kni_remove(dev->pci_dev);
}
+#endif
if (dev->net_dev) {
unregister_netdev(dev->net_dev);
@@ -246,9 +240,6 @@ kni_release(struct inode *inode, struct file *file)
dev->pthread = NULL;
}
-#ifdef RTE_KNI_VHOST
- kni_vhost_backend_release(dev);
-#endif
kni_dev_remove(dev);
list_del(&dev->list);
}
@@ -326,11 +317,13 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
struct kni_net *knet = net_generic(net, kni_net_id);
int ret;
struct rte_kni_device_info dev_info;
- struct pci_dev *pci = NULL;
- struct pci_dev *found_pci = NULL;
struct net_device *net_dev = NULL;
- struct net_device *lad_dev = NULL;
struct kni_dev *kni, *dev, *n;
+#ifdef RTE_KNI_KMOD_ETHTOOL
+ struct pci_dev *found_pci = NULL;
+ struct net_device *lad_dev = NULL;
+ struct pci_dev *pci = NULL;
+#endif
pr_info("Creating kni...\n");
/* Check the buffer size, to avoid warning */
@@ -344,6 +337,12 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
return -EIO;
}
+ /* Check if name is zero-ended */
+ if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) {
+ pr_err("kni.name not zero-terminated");
+ return -EINVAL;
+ }
+
/**
* Check if the cpu core id is valid for binding.
*/
@@ -363,8 +362,8 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
up_read(&knet->kni_list_lock);
net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
-#ifdef NET_NAME_UNKNOWN
- NET_NAME_UNKNOWN,
+#ifdef NET_NAME_USER
+ NET_NAME_USER,
#endif
kni_net_init);
if (net_dev == NULL) {
@@ -392,10 +391,6 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
kni->sync_va = dev_info.sync_va;
kni->sync_kva = phys_to_virt(dev_info.sync_phys);
-#ifdef RTE_KNI_VHOST
- kni->vhost_queue = NULL;
- kni->vq_status = BE_STOP;
-#endif
kni->mbuf_size = dev_info.mbuf_size;
pr_debug("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
@@ -418,7 +413,7 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
dev_info.function,
dev_info.vendor_id,
dev_info.device_id);
-
+#ifdef RTE_KNI_KMOD_ETHTOOL
pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
/* Support Ethtool */
@@ -459,6 +454,7 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
}
if (pci)
pci_dev_put(pci);
+#endif
if (kni->lad_dev)
ether_addr_copy(net_dev->dev_addr, kni->lad_dev->dev_addr);
@@ -479,10 +475,6 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
return -ENODEV;
}
-#ifdef RTE_KNI_VHOST
- kni_vhost_init(kni);
-#endif
-
ret = kni_run_thread(knet, kni, dev_info.force_bind);
if (ret != 0)
return ret;
@@ -526,9 +518,6 @@ kni_ioctl_release(struct net *net, uint32_t ioctl_num,
dev->pthread = NULL;
}
-#ifdef RTE_KNI_VHOST
- kni_vhost_backend_release(dev);
-#endif
kni_dev_remove(dev);
list_del(&dev->list);
ret = 0;
diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c
index 4ac99cfe..db9f4898 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/lib/librte_eal/linuxapp/kni/kni_net.c
@@ -198,18 +198,6 @@ kni_net_config(struct net_device *dev, struct ifmap *map)
/*
* Transmit a packet (called by the kernel)
*/
-#ifdef RTE_KNI_VHOST
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-#else
static int
kni_net_tx(struct sk_buff *skb, struct net_device *dev)
{
@@ -289,7 +277,6 @@ drop:
return NETDEV_TX_OK;
}
-#endif
/*
* RX: normal working mode
diff --git a/lib/librte_eal/linuxapp/kni/kni_vhost.c b/lib/librte_eal/linuxapp/kni/kni_vhost.c
deleted file mode 100644
index f54c34b1..00000000
--- a/lib/librte_eal/linuxapp/kni/kni_vhost.c
+++ /dev/null
@@ -1,842 +0,0 @@
-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- */
-
-#include <linux/module.h>
-#include <linux/net.h>
-#include <net/sock.h>
-#include <linux/virtio_net.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/nsproxy.h>
-#include <linux/sched.h>
-#include <linux/if_tun.h>
-#include <linux/version.h>
-#include <linux/file.h>
-
-#include "compat.h"
-#include "kni_dev.h"
-#include "kni_fifo.h"
-
-#define RX_BURST_SZ 4
-
-#ifdef HAVE_STATIC_SOCK_MAP_FD
-static int kni_sock_map_fd(struct socket *sock)
-{
- struct file *file;
- int fd = get_unused_fd_flags(0);
-
- if (fd < 0)
- return fd;
-
- file = sock_alloc_file(sock, 0, NULL);
- if (IS_ERR(file)) {
- put_unused_fd(fd);
- return PTR_ERR(file);
- }
- fd_install(fd, file);
- return fd;
-}
-#endif
-
-static struct proto kni_raw_proto = {
- .name = "kni_vhost",
- .owner = THIS_MODULE,
- .obj_size = sizeof(struct kni_vhost_queue),
-};
-
-static inline int
-kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
- uint32_t offset, uint32_t len)
-{
- struct rte_kni_mbuf *pkt_kva = NULL;
- struct rte_kni_mbuf *pkt_va = NULL;
- int ret;
-
- pr_debug("tx offset=%d, len=%d, iovlen=%d\n",
-#ifdef HAVE_IOV_ITER_MSGHDR
- offset, len, (int)m->msg_iter.iov->iov_len);
-#else
- offset, len, (int)m->msg_iov->iov_len);
-#endif
-
- /**
- * Check if it has at least one free entry in tx_q and
- * one entry in alloc_q.
- */
- if (kni_fifo_free_count(kni->tx_q) == 0 ||
- kni_fifo_count(kni->alloc_q) == 0) {
- /**
- * If no free entry in tx_q or no entry in alloc_q,
- * drops skb and goes out.
- */
- goto drop;
- }
-
- /* dequeue a mbuf from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
- if (likely(ret == 1)) {
- void *data_kva;
-
- pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
- data_kva = pkt_kva->buf_addr + pkt_kva->data_off
- - kni->mbuf_va + kni->mbuf_kva;
-
-#ifdef HAVE_IOV_ITER_MSGHDR
- copy_from_iter(data_kva, len, &m->msg_iter);
-#else
- memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
-#endif
-
- if (unlikely(len < ETH_ZLEN)) {
- memset(data_kva + len, 0, ETH_ZLEN - len);
- len = ETH_ZLEN;
- }
- pkt_kva->pkt_len = len;
- pkt_kva->data_len = len;
-
- /* enqueue mbuf into tx_q */
- ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
- if (unlikely(ret != 1)) {
- /* Failing should not happen */
- pr_err("Fail to enqueue mbuf into tx_q\n");
- goto drop;
- }
- } else {
- /* Failing should not happen */
- pr_err("Fail to dequeue mbuf from alloc_q\n");
- goto drop;
- }
-
- /* update statistics */
- kni->stats.tx_bytes += len;
- kni->stats.tx_packets++;
-
- return 0;
-
-drop:
- /* update statistics */
- kni->stats.tx_dropped++;
-
- return 0;
-}
-
-static inline int
-kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
- uint32_t offset, uint32_t len)
-{
- uint32_t pkt_len;
- struct rte_kni_mbuf *kva;
- struct rte_kni_mbuf *va;
- void *data_kva;
- struct sk_buff *skb;
- struct kni_vhost_queue *q = kni->vhost_queue;
-
- if (unlikely(q == NULL))
- return 0;
-
- /* ensure at least one entry in free_q */
- if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
- return 0;
-
- skb = skb_dequeue(&q->sk.sk_receive_queue);
- if (unlikely(skb == NULL))
- return 0;
-
- kva = (struct rte_kni_mbuf *)skb->data;
-
- /* free skb to cache */
- skb->data = NULL;
- if (unlikely(kni_fifo_put(q->fifo, (void **)&skb, 1) != 1))
- /* Failing should not happen */
- pr_err("Fail to enqueue entries into rx cache fifo\n");
-
- pkt_len = kva->data_len;
- if (unlikely(pkt_len > len))
- goto drop;
-
- pr_debug("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
-#ifdef HAVE_IOV_ITER_MSGHDR
- offset, len, pkt_len, (int)m->msg_iter.iov->iov_len);
-#else
- offset, len, pkt_len, (int)m->msg_iov->iov_len);
-#endif
-
- data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
-#ifdef HAVE_IOV_ITER_MSGHDR
- if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter)))
-#else
- if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len)))
-#endif
- goto drop;
-
- /* Update statistics */
- kni->stats.rx_bytes += pkt_len;
- kni->stats.rx_packets++;
-
- /* enqueue mbufs into free_q */
- va = (void *)kva - kni->mbuf_kva + kni->mbuf_va;
- if (unlikely(kni_fifo_put(kni->free_q, (void **)&va, 1) != 1))
- /* Failing should not happen */
- pr_err("Fail to enqueue entries into free_q\n");
-
- pr_debug("receive done %d\n", pkt_len);
-
- return pkt_len;
-
-drop:
- /* Update drop statistics */
- kni->stats.rx_dropped++;
-
- return 0;
-}
-
-static uint32_t
-kni_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
- uint32_t mask = 0;
-
- if (unlikely(q == NULL || q->kni == NULL))
- return POLLERR;
-
- kni = q->kni;
-#ifdef HAVE_SOCKET_WQ
- pr_debug("start kni_poll on group %d, wq 0x%16llx\n",
- kni->group_id, (uint64_t)sock->wq);
- poll_wait(file, &sock->wq->wait, wait);
-#else
- pr_debug("start kni_poll on group %d, wait at 0x%16llx\n",
- kni->group_id, (uint64_t)&sock->wait);
- poll_wait(file, &sock->wait, wait);
-#endif
-
- if (kni_fifo_count(kni->rx_q) > 0)
- mask |= POLLIN | POLLRDNORM;
-
- if (sock_writeable(&q->sk) ||
-#ifdef SOCKWQ_ASYNC_NOSPACE
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock->flags) &&
- sock_writeable(&q->sk)))
-#else
- (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock->flags) &&
- sock_writeable(&q->sk)))
-#endif
- mask |= POLLOUT | POLLWRNORM;
-
- return mask;
-}
-
-static inline void
-kni_vhost_enqueue(struct kni_dev *kni, struct kni_vhost_queue *q,
- struct sk_buff *skb, struct rte_kni_mbuf *va)
-{
- struct rte_kni_mbuf *kva;
-
- kva = (void *)(va) - kni->mbuf_va + kni->mbuf_kva;
- (skb)->data = (unsigned char *)kva;
- (skb)->len = kva->data_len;
- skb_queue_tail(&q->sk.sk_receive_queue, skb);
-}
-
-static inline void
-kni_vhost_enqueue_burst(struct kni_dev *kni, struct kni_vhost_queue *q,
- struct sk_buff **skb, struct rte_kni_mbuf **va)
-{
- int i;
-
- for (i = 0; i < RX_BURST_SZ; skb++, va++, i++)
- kni_vhost_enqueue(kni, q, *skb, *va);
-}
-
-int
-kni_chk_vhost_rx(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q = kni->vhost_queue;
- uint32_t nb_in, nb_mbuf, nb_skb;
- const uint32_t BURST_MASK = RX_BURST_SZ - 1;
- uint32_t nb_burst, nb_backlog, i;
- struct sk_buff *skb[RX_BURST_SZ];
- struct rte_kni_mbuf *va[RX_BURST_SZ];
-
- if (unlikely(BE_STOP & kni->vq_status)) {
- kni->vq_status |= BE_FINISH;
- return 0;
- }
-
- if (unlikely(q == NULL))
- return 0;
-
- nb_skb = kni_fifo_count(q->fifo);
- nb_mbuf = kni_fifo_count(kni->rx_q);
-
- nb_in = min(nb_mbuf, nb_skb);
- nb_in = min_t(uint32_t, nb_in, RX_BURST_SZ);
- nb_burst = (nb_in & ~BURST_MASK);
- nb_backlog = (nb_in & BURST_MASK);
-
- /* enqueue skb_queue per BURST_SIZE bulk */
- if (nb_burst != 0) {
- if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, RX_BURST_SZ)
- != RX_BURST_SZ))
- goto except;
-
- if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, RX_BURST_SZ)
- != RX_BURST_SZ))
- goto except;
-
- kni_vhost_enqueue_burst(kni, q, skb, va);
- }
-
- /* all leftover, do one by one */
- for (i = 0; i < nb_backlog; ++i) {
- if (unlikely(kni_fifo_get(kni->rx_q, (void **)&va, 1) != 1))
- goto except;
-
- if (unlikely(kni_fifo_get(q->fifo, (void **)&skb, 1) != 1))
- goto except;
-
- kni_vhost_enqueue(kni, q, *skb, *va);
- }
-
- /* Ondemand wake up */
- if ((nb_in == RX_BURST_SZ) || (nb_skb == 0) ||
- ((nb_mbuf < RX_BURST_SZ) && (nb_mbuf != 0))) {
- wake_up_interruptible_poll(sk_sleep(&q->sk),
- POLLIN | POLLRDNORM | POLLRDBAND);
- pr_debug("RX CHK KICK nb_mbuf %d, nb_skb %d, nb_in %d\n",
- nb_mbuf, nb_skb, nb_in);
- }
-
- return 0;
-
-except:
- /* Failing should not happen */
- pr_err("Fail to enqueue fifo, it shouldn't happen\n");
- BUG_ON(1);
-
- return 0;
-}
-
-static int
-#ifdef HAVE_KIOCB_MSG_PARAM
-kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
-#else
-kni_sock_sndmsg(struct socket *sock,
- struct msghdr *m, size_t total_len)
-#endif /* HAVE_KIOCB_MSG_PARAM */
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- int vnet_hdr_len = 0;
- unsigned long len = total_len;
-
- if (unlikely(q == NULL || q->kni == NULL))
- return 0;
-
- pr_debug("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
-#ifdef HAVE_IOV_ITER_MSGHDR
- len, q->flags, (int)m->msg_iter.iov->iov_len);
-#else
- len, q->flags, (int)m->msg_iovlen);
-#endif
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- if (likely(q->flags & IFF_VNET_HDR)) {
- vnet_hdr_len = q->vnet_hdr_sz;
- if (unlikely(len < vnet_hdr_len))
- return -EINVAL;
- len -= vnet_hdr_len;
- }
-#endif
-
- if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
- return -EINVAL;
-
- return kni_vhost_net_tx(q->kni, m, vnet_hdr_len, len);
-}
-
-static int
-#ifdef HAVE_KIOCB_MSG_PARAM
-kni_sock_rcvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t len, int flags)
-#else
-kni_sock_rcvmsg(struct socket *sock,
- struct msghdr *m, size_t len, int flags)
-#endif /* HAVE_KIOCB_MSG_PARAM */
-{
- int vnet_hdr_len = 0;
- int pkt_len = 0;
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- static struct virtio_net_hdr
- __attribute__ ((unused)) vnet_hdr = {
- .flags = 0,
- .gso_type = VIRTIO_NET_HDR_GSO_NONE
- };
-
- if (unlikely(q == NULL || q->kni == NULL))
- return 0;
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- if (likely(q->flags & IFF_VNET_HDR)) {
- vnet_hdr_len = q->vnet_hdr_sz;
- len -= vnet_hdr_len;
- if (len < 0)
- return -EINVAL;
- }
-#endif
-
- pkt_len = kni_vhost_net_rx(q->kni, m, vnet_hdr_len, len);
- if (unlikely(pkt_len == 0))
- return 0;
-
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- /* no need to copy hdr when no pkt received */
-#ifdef HAVE_IOV_ITER_MSGHDR
- if (unlikely(copy_to_iter((void *)&vnet_hdr, vnet_hdr_len,
- &m->msg_iter)))
-#else
- if (unlikely(memcpy_toiovecend(m->msg_iov,
- (void *)&vnet_hdr, 0, vnet_hdr_len)))
-#endif /* HAVE_IOV_ITER_MSGHDR */
- return -EFAULT;
-#endif /* RTE_KNI_VHOST_VNET_HDR_EN */
- pr_debug("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
- (unsigned long)len, q->flags, pkt_len);
-
- return pkt_len + vnet_hdr_len;
-}
-
-/* dummy tap like ioctl */
-static int
-kni_sock_ioctl(struct socket *sock, uint32_t cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct ifreq __user *ifr = argp;
- uint32_t __user *up = argp;
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
- uint32_t u;
- int __user *sp = argp;
- int s;
- int ret;
-
- pr_debug("tap ioctl cmd 0x%08x\n", cmd);
-
- switch (cmd) {
- case TUNSETIFF:
- pr_debug("TUNSETIFF\n");
- /* ignore the name, just look at flags */
- if (get_user(u, &ifr->ifr_flags))
- return -EFAULT;
-
- ret = 0;
- if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
- ret = -EINVAL;
- else
- q->flags = u;
-
- return ret;
-
- case TUNGETIFF:
- pr_debug("TUNGETIFF\n");
- rcu_read_lock_bh();
- kni = rcu_dereference_bh(q->kni);
- if (kni)
- dev_hold(kni->net_dev);
- rcu_read_unlock_bh();
-
- if (!kni)
- return -ENOLINK;
-
- ret = 0;
- if (copy_to_user(&ifr->ifr_name, kni->net_dev->name, IFNAMSIZ)
- || put_user(q->flags, &ifr->ifr_flags))
- ret = -EFAULT;
- dev_put(kni->net_dev);
- return ret;
-
- case TUNGETFEATURES:
- pr_debug("TUNGETFEATURES\n");
- u = IFF_TAP | IFF_NO_PI;
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- u |= IFF_VNET_HDR;
-#endif
- if (put_user(u, up))
- return -EFAULT;
- return 0;
-
- case TUNSETSNDBUF:
- pr_debug("TUNSETSNDBUF\n");
- if (get_user(u, up))
- return -EFAULT;
-
- q->sk.sk_sndbuf = u;
- return 0;
-
- case TUNGETVNETHDRSZ:
- s = q->vnet_hdr_sz;
- if (put_user(s, sp))
- return -EFAULT;
- pr_debug("TUNGETVNETHDRSZ %d\n", s);
- return 0;
-
- case TUNSETVNETHDRSZ:
- if (get_user(s, sp))
- return -EFAULT;
- if (s < (int)sizeof(struct virtio_net_hdr))
- return -EINVAL;
-
- pr_debug("TUNSETVNETHDRSZ %d\n", s);
- q->vnet_hdr_sz = s;
- return 0;
-
- case TUNSETOFFLOAD:
- pr_debug("TUNSETOFFLOAD %lx\n", arg);
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- /* not support any offload yet */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
-
- return 0;
-#else
- return -EINVAL;
-#endif
-
- default:
- pr_debug("NOT SUPPORT\n");
- return -EINVAL;
- }
-}
-
-static int
-kni_sock_compat_ioctl(struct socket *sock, uint32_t cmd,
- unsigned long arg)
-{
- /* 32 bits app on 64 bits OS to be supported later */
- pr_debug("Not implemented.\n");
-
- return -EINVAL;
-}
-
-#define KNI_VHOST_WAIT_WQ_SAFE() \
-do { \
- while ((BE_FINISH | BE_STOP) == kni->vq_status) \
- msleep(1); \
-} while (0) \
-
-
-static int
-kni_sock_release(struct socket *sock)
-{
- struct kni_vhost_queue *q =
- container_of(sock->sk, struct kni_vhost_queue, sk);
- struct kni_dev *kni;
-
- if (q == NULL)
- return 0;
-
- kni = q->kni;
- if (kni != NULL) {
- kni->vq_status = BE_STOP;
- KNI_VHOST_WAIT_WQ_SAFE();
- kni->vhost_queue = NULL;
- q->kni = NULL;
- }
-
- if (q->sockfd != -1)
- q->sockfd = -1;
-
- sk_set_socket(&q->sk, NULL);
- sock->sk = NULL;
-
- sock_put(&q->sk);
-
- pr_debug("dummy sock release done\n");
-
- return 0;
-}
-
-int
-kni_sock_getname(struct socket *sock, struct sockaddr *addr,
- int *sockaddr_len, int peer)
-{
- pr_debug("dummy sock getname\n");
- ((struct sockaddr_ll *)addr)->sll_family = AF_PACKET;
- return 0;
-}
-
-static const struct proto_ops kni_socket_ops = {
- .getname = kni_sock_getname,
- .sendmsg = kni_sock_sndmsg,
- .recvmsg = kni_sock_rcvmsg,
- .release = kni_sock_release,
- .poll = kni_sock_poll,
- .ioctl = kni_sock_ioctl,
- .compat_ioctl = kni_sock_compat_ioctl,
-};
-
-static void
-kni_sk_write_space(struct sock *sk)
-{
- wait_queue_head_t *wqueue;
-
- if (!sock_writeable(sk) ||
-#ifdef SOCKWQ_ASYNC_NOSPACE
- !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
-#else
- !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
-#endif
- return;
- wqueue = sk_sleep(sk);
- if (wqueue && waitqueue_active(wqueue))
- wake_up_interruptible_poll(
- wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
-}
-
-static void
-kni_sk_destruct(struct sock *sk)
-{
- struct kni_vhost_queue *q =
- container_of(sk, struct kni_vhost_queue, sk);
-
- if (!q)
- return;
-
- /* make sure there's no packet in buffer */
- while (skb_dequeue(&sk->sk_receive_queue) != NULL)
- ;
-
- mb();
-
- if (q->fifo != NULL) {
- kfree(q->fifo);
- q->fifo = NULL;
- }
-
- if (q->cache != NULL) {
- kfree(q->cache);
- q->cache = NULL;
- }
-}
-
-static int
-kni_vhost_backend_init(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q;
- struct net *net = current->nsproxy->net_ns;
- int err, i, sockfd;
- struct rte_kni_fifo *fifo;
- struct sk_buff *elem;
-
- if (kni->vhost_queue != NULL)
- return -1;
-
-#ifdef HAVE_SK_ALLOC_KERN_PARAM
- q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
- &kni_raw_proto, 0);
-#else
- q = (struct kni_vhost_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
- &kni_raw_proto);
-#endif
- if (!q)
- return -ENOMEM;
-
- err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
- if (err)
- goto free_sk;
-
- sockfd = kni_sock_map_fd(q->sock);
- if (sockfd < 0) {
- err = sockfd;
- goto free_sock;
- }
-
- /* cache init */
- q->cache = kzalloc(
- RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
- GFP_KERNEL);
- if (!q->cache)
- goto free_fd;
-
- fifo = kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
- + sizeof(struct rte_kni_fifo), GFP_KERNEL);
- if (!fifo)
- goto free_cache;
-
- kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);
-
- for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
- elem = &q->cache[i];
- kni_fifo_put(fifo, (void **)&elem, 1);
- }
- q->fifo = fifo;
-
- /* store sockfd in vhost_queue */
- q->sockfd = sockfd;
-
- /* init socket */
- q->sock->type = SOCK_RAW;
- q->sock->state = SS_CONNECTED;
- q->sock->ops = &kni_socket_ops;
- sock_init_data(q->sock, &q->sk);
-
- /* init sock data */
- q->sk.sk_write_space = kni_sk_write_space;
- q->sk.sk_destruct = kni_sk_destruct;
- q->flags = IFF_NO_PI | IFF_TAP;
- q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-#ifdef RTE_KNI_VHOST_VNET_HDR_EN
- q->flags |= IFF_VNET_HDR;
-#endif
-
- /* bind kni_dev with vhost_queue */
- q->kni = kni;
- kni->vhost_queue = q;
-
- wmb();
-
- kni->vq_status = BE_START;
-
-#ifdef HAVE_SOCKET_WQ
- pr_debug("backend init sockfd=%d, sock->wq=0x%16llx,sk->sk_wq=0x%16llx",
- q->sockfd, (uint64_t)q->sock->wq,
- (uint64_t)q->sk.sk_wq);
-#else
- pr_debug("backend init sockfd=%d, sock->wait at 0x%16llx,sk->sk_sleep=0x%16llx",
- q->sockfd, (uint64_t)&q->sock->wait,
- (uint64_t)q->sk.sk_sleep);
-#endif
-
- return 0;
-
-free_cache:
- kfree(q->cache);
- q->cache = NULL;
-
-free_fd:
- put_unused_fd(sockfd);
-
-free_sock:
- q->kni = NULL;
- kni->vhost_queue = NULL;
- kni->vq_status |= BE_FINISH;
- sock_release(q->sock);
- q->sock->ops = NULL;
- q->sock = NULL;
-
-free_sk:
- sk_free((struct sock *)q);
-
- return err;
-}
-
-/* kni vhost sock sysfs */
-static ssize_t
-show_sock_fd(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
- int sockfd = -1;
-
- if (kni->vhost_queue != NULL)
- sockfd = kni->vhost_queue->sockfd;
- return snprintf(buf, 10, "%d\n", sockfd);
-}
-
-static ssize_t
-show_sock_en(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
-
- return snprintf(buf, 10, "%u\n", (kni->vhost_queue == NULL ? 0 : 1));
-}
-
-static ssize_t
-set_sock_en(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct net_device *net_dev = container_of(dev, struct net_device, dev);
- struct kni_dev *kni = netdev_priv(net_dev);
- unsigned long en;
- int err = 0;
-
- if (kstrtoul(buf, 0, &en) != 0)
- return -EINVAL;
-
- if (en)
- err = kni_vhost_backend_init(kni);
-
- return err ? err : count;
-}
-
-static DEVICE_ATTR(sock_fd, S_IRUGO | S_IRUSR, show_sock_fd, NULL);
-static DEVICE_ATTR(sock_en, S_IRUGO | S_IWUSR, show_sock_en, set_sock_en);
-static struct attribute *dev_attrs[] = {
- &dev_attr_sock_fd.attr,
- &dev_attr_sock_en.attr,
- NULL,
-};
-
-static const struct attribute_group dev_attr_grp = {
- .attrs = dev_attrs,
-};
-
-int
-kni_vhost_backend_release(struct kni_dev *kni)
-{
- struct kni_vhost_queue *q = kni->vhost_queue;
-
- if (q == NULL)
- return 0;
-
- /* dettach from kni */
- q->kni = NULL;
-
- pr_debug("release backend done\n");
-
- return 0;
-}
-
-int
-kni_vhost_init(struct kni_dev *kni)
-{
- struct net_device *dev = kni->net_dev;
-
- if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
- sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
-
- kni->vq_status = BE_STOP;
-
- pr_debug("kni_vhost_init done\n");
-
- return 0;
-}
diff --git a/lib/librte_eal/linuxapp/xen_dom0/Makefile b/lib/librte_eal/linuxapp/xen_dom0/Makefile
index 9d22fb97..be51a82a 100644
--- a/lib/librte_eal/linuxapp/xen_dom0/Makefile
+++ b/lib/librte_eal/linuxapp/xen_dom0/Makefile
@@ -44,9 +44,6 @@ MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
MODULE_CFLAGS += -Wall -Werror
-# this lib needs main eal
-DEPDIRS-y += lib/librte_eal/linuxapp/eal
-
#
# all source are stored in SRCS-y
#
diff --git a/lib/librte_efd/Makefile b/lib/librte_efd/Makefile
new file mode 100644
index 00000000..b9277bc5
--- /dev/null
+++ b/lib/librte_efd/Makefile
@@ -0,0 +1,50 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_efd.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_efd_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_EFD) := rte_efd.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_EFD)-include := rte_efd.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
new file mode 100644
index 00000000..f601d62e
--- /dev/null
+++ b/lib/librte_efd/rte_efd.c
@@ -0,0 +1,1343 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+#include <rte_jhash.h>
+#include <rte_hash_crc.h>
+
+#include "rte_efd.h"
+#if defined(RTE_ARCH_X86)
+#include "rte_efd_x86.h"
+#endif
+
+#define EFD_KEY(key_idx, table) (table->keys + ((key_idx) * table->key_len))
+/** Hash function used to determine chunk_id and bin_id for a group */
+#define EFD_HASH(key, table) \
+ (uint32_t)(rte_jhash(key, table->key_len, 0xbc9f1d34))
+/** Hash function used as constant component of perfect hash search */
+#define EFD_HASHFUNCA(key, table) \
+ (uint32_t)(rte_hash_crc(key, table->key_len, 0xbc9f1d35))
+/** Hash function used as multiplicative component of perfect hash search */
+#define EFD_HASHFUNCB(key, table) \
+ (uint32_t)(rte_hash_crc(key, table->key_len, 0xbc9f1d36))
+
+/*************************************************************************
+ * Fixed constants
+ *************************************************************************/
+
+/* These parameters are fixed by the efd_bin_to_group balancing table */
+#define EFD_CHUNK_NUM_GROUPS (64)
+#define EFD_CHUNK_NUM_BINS (256)
+#define EFD_CHUNK_NUM_BIN_TO_GROUP_SETS \
+ (EFD_CHUNK_NUM_BINS / EFD_CHUNK_NUM_GROUPS)
+
+/*
+ * Target number of rules that each chunk is created to handle.
+ * Used when initially allocating the table
+ */
+#define EFD_TARGET_CHUNK_NUM_RULES \
+ (EFD_CHUNK_NUM_GROUPS * EFD_TARGET_GROUP_NUM_RULES)
+/*
+ * Max number of rules that each chunk is created to handle.
+ * Used when initially allocating the table
+ */
+#define EFD_TARGET_CHUNK_MAX_NUM_RULES \
+ (EFD_CHUNK_NUM_GROUPS * EFD_MAX_GROUP_NUM_RULES)
+
+/** This is fixed based on the bin_to_group permutation array */
+#define EFD_MAX_GROUP_NUM_BINS (16)
+
+/**
+ * The end of the chunks array needs some extra padding to ensure
+ * that vectorization over-reads on the last online chunk stay within
+allocated memory
+ */
+#define EFD_NUM_CHUNK_PADDING_BYTES (256)
+
+/* All different internal lookup functions */
+enum efd_lookup_internal_function {
+ EFD_LOOKUP_SCALAR = 0,
+ EFD_LOOKUP_AVX2,
+ EFD_LOOKUP_NUM
+};
+
+TAILQ_HEAD(rte_efd_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_efd_tailq = {
+ .name = "RTE_EFD",
+};
+EAL_REGISTER_TAILQ(rte_efd_tailq);
+
+/** Internal permutation array used to shuffle bins into pseudorandom groups */
+const uint32_t efd_bin_to_group[EFD_CHUNK_NUM_BIN_TO_GROUP_SETS][EFD_CHUNK_NUM_BINS] = {
+ {
+ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,
+ 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11,
+ 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15,
+ 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19,
+ 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23,
+ 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27,
+ 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31,
+ 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 35, 35, 35, 35,
+ 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 39, 39, 39,
+ 40, 40, 40, 40, 41, 41, 41, 41, 42, 42, 42, 42, 43, 43, 43, 43,
+ 44, 44, 44, 44, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47,
+ 48, 48, 48, 48, 49, 49, 49, 49, 50, 50, 50, 50, 51, 51, 51, 51,
+ 52, 52, 52, 52, 53, 53, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55,
+ 56, 56, 56, 56, 57, 57, 57, 57, 58, 58, 58, 58, 59, 59, 59, 59,
+ 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 63, 63, 63
+ },
+ {
+ 34, 33, 48, 59, 0, 21, 36, 18, 9, 49, 54, 38, 51, 23, 31, 5,
+ 44, 23, 37, 52, 11, 4, 58, 20, 38, 40, 38, 22, 26, 28, 42, 6,
+ 46, 16, 31, 28, 46, 14, 60, 0, 35, 53, 16, 58, 16, 29, 39, 7,
+ 1, 54, 15, 11, 48, 3, 62, 9, 58, 5, 30, 43, 17, 7, 36, 34,
+ 6, 36, 2, 14, 10, 1, 47, 47, 20, 45, 62, 56, 34, 25, 39, 18,
+ 51, 41, 61, 25, 56, 40, 41, 37, 52, 35, 30, 57, 11, 42, 37, 27,
+ 54, 19, 26, 13, 48, 31, 46, 15, 12, 10, 16, 20, 43, 17, 12, 55,
+ 45, 18, 8, 41, 7, 31, 42, 63, 12, 14, 21, 57, 24, 40, 5, 41,
+ 13, 44, 23, 59, 25, 57, 52, 50, 62, 1, 2, 49, 32, 57, 26, 43,
+ 56, 60, 55, 5, 49, 6, 3, 50, 46, 39, 27, 33, 17, 4, 53, 13,
+ 2, 19, 36, 51, 63, 0, 22, 33, 59, 28, 29, 23, 45, 33, 53, 27,
+ 22, 21, 40, 56, 4, 18, 44, 47, 28, 17, 4, 50, 21, 62, 8, 39,
+ 0, 8, 15, 24, 29, 24, 9, 11, 48, 61, 35, 55, 43, 1, 54, 42,
+ 53, 60, 22, 3, 32, 52, 25, 8, 15, 60, 7, 55, 27, 63, 19, 10,
+ 63, 24, 61, 19, 12, 38, 6, 29, 13, 37, 10, 3, 45, 32, 32, 30,
+ 49, 61, 44, 14, 20, 58, 35, 30, 2, 26, 34, 51, 9, 59, 47, 50
+ },
+ {
+ 32, 35, 32, 34, 55, 5, 6, 23, 49, 11, 6, 23, 52, 37, 29, 54,
+ 55, 40, 63, 50, 29, 52, 61, 25, 12, 56, 39, 38, 29, 11, 46, 1,
+ 40, 11, 19, 56, 7, 28, 51, 16, 15, 48, 21, 51, 60, 31, 14, 22,
+ 41, 47, 59, 56, 53, 28, 58, 26, 43, 27, 41, 33, 24, 52, 44, 38,
+ 13, 59, 48, 51, 60, 15, 3, 30, 15, 0, 10, 62, 44, 14, 28, 51,
+ 38, 2, 41, 26, 25, 49, 10, 12, 55, 57, 27, 35, 19, 33, 0, 30,
+ 5, 36, 47, 53, 5, 53, 20, 43, 34, 37, 52, 41, 21, 63, 59, 9,
+ 24, 1, 45, 24, 39, 44, 45, 16, 9, 17, 7, 50, 57, 22, 18, 28,
+ 25, 45, 2, 40, 58, 15, 17, 3, 1, 27, 61, 39, 19, 0, 19, 21,
+ 57, 62, 54, 60, 54, 40, 48, 33, 36, 37, 4, 42, 1, 43, 58, 8,
+ 13, 42, 10, 56, 35, 22, 48, 61, 63, 10, 49, 9, 24, 9, 25, 57,
+ 33, 18, 13, 31, 42, 36, 36, 55, 30, 37, 53, 34, 59, 4, 4, 23,
+ 8, 16, 58, 14, 30, 11, 12, 63, 49, 62, 2, 39, 47, 22, 2, 60,
+ 18, 8, 46, 31, 6, 20, 32, 29, 46, 42, 20, 31, 32, 61, 34, 4,
+ 47, 26, 20, 43, 26, 21, 7, 3, 16, 35, 18, 44, 27, 62, 13, 23,
+ 6, 50, 12, 8, 45, 17, 3, 46, 50, 7, 14, 5, 17, 54, 38, 0
+ },
+ {
+ 29, 56, 5, 7, 54, 48, 23, 37, 35, 44, 52, 40, 33, 49, 60, 0,
+ 59, 51, 28, 12, 41, 26, 2, 23, 34, 5, 59, 40, 3, 19, 6, 26,
+ 35, 53, 45, 49, 29, 57, 28, 62, 58, 59, 19, 53, 59, 62, 6, 54,
+ 13, 15, 48, 50, 45, 21, 41, 12, 34, 40, 24, 56, 19, 21, 35, 18,
+ 55, 45, 9, 61, 47, 61, 19, 15, 16, 39, 17, 31, 3, 51, 21, 50,
+ 17, 25, 25, 11, 44, 16, 18, 28, 14, 2, 37, 61, 58, 27, 62, 4,
+ 14, 17, 1, 9, 46, 28, 37, 0, 53, 43, 57, 7, 57, 46, 21, 41,
+ 39, 14, 52, 60, 44, 53, 49, 60, 49, 63, 13, 11, 29, 1, 55, 47,
+ 55, 12, 60, 43, 54, 37, 13, 6, 42, 10, 36, 13, 9, 8, 34, 51,
+ 31, 32, 12, 7, 57, 2, 26, 14, 3, 30, 63, 3, 32, 1, 5, 11,
+ 27, 24, 26, 44, 31, 23, 56, 38, 62, 0, 40, 30, 6, 23, 38, 2,
+ 47, 5, 15, 27, 16, 10, 31, 25, 22, 63, 30, 25, 20, 33, 32, 50,
+ 29, 43, 55, 10, 50, 45, 56, 20, 4, 7, 27, 46, 11, 16, 22, 52,
+ 35, 20, 41, 54, 46, 33, 42, 18, 63, 8, 22, 58, 36, 4, 51, 42,
+ 38, 32, 38, 22, 17, 0, 47, 8, 48, 8, 48, 1, 61, 36, 33, 20,
+ 24, 39, 39, 18, 30, 36, 9, 43, 42, 24, 10, 58, 4, 15, 34, 52
+ },
+};
+
+/*************************************************************************
+ * Offline region structures
+ *************************************************************************/
+
+/** Online group containing number of rules, values, keys and their bins
+ * for EFD_MAX_GROUP_NUM_RULES rules.
+ */
+struct efd_offline_group_rules {
+ uint32_t num_rules;
+ /**< Sum of the number of rules in all bins assigned to this group. */
+
+ uint32_t key_idx[EFD_MAX_GROUP_NUM_RULES];
+ /**< Array with all keys of the group. */
+ efd_value_t value[EFD_MAX_GROUP_NUM_RULES];
+ /**< Array with all values of the keys of the group. */
+
+ uint8_t bin_id[EFD_MAX_GROUP_NUM_RULES];
+ /**< Stores the bin for each correspending key to
+ * avoid having to recompute it
+ */
+};
+
+/** Offline chunk record, containing EFD_TARGET_CHUNK_NUM_RULES rules.
+ * Those rules are split into EFD_CHUNK_NUM_GROUPS groups per chunk.
+ */
+struct efd_offline_chunk_rules {
+ uint16_t num_rules;
+ /**< Number of rules in the entire chunk;
+ * used to detect unbalanced groups
+ */
+
+ struct efd_offline_group_rules group_rules[EFD_CHUNK_NUM_GROUPS];
+ /**< Array of all groups in the chunk. */
+};
+
+/*************************************************************************
+ * Online region structures
+ *************************************************************************/
+
+/** Online group containing values for EFD_MAX_GROUP_NUM_RULES rules. */
+struct efd_online_group_entry {
+ efd_hashfunc_t hash_idx[RTE_EFD_VALUE_NUM_BITS];
+ efd_lookuptbl_t lookup_table[RTE_EFD_VALUE_NUM_BITS];
+} __attribute__((__packed__));
+
+/**
+ * A single chunk record, containing EFD_TARGET_CHUNK_NUM_RULES rules.
+ * Those rules are split into EFD_CHUNK_NUM_GROUPS groups per chunk.
+ */
+struct efd_online_chunk {
+ uint8_t bin_choice_list[(EFD_CHUNK_NUM_BINS * 2 + 7) / 8];
+ /**< This is a packed indirection index into the 'groups' array.
+ * Each byte contains four two-bit values which index into
+ * the efd_bin_to_group array.
+ * The efd_bin_to_group array returns the index into the groups array
+ */
+
+ struct efd_online_group_entry groups[EFD_CHUNK_NUM_GROUPS];
+ /**< Array of all the groups in the chunk. */
+} __attribute__((__packed__));
+
+/**
+ * EFD table structure
+ */
+struct rte_efd_table {
+ char name[RTE_EFD_NAMESIZE]; /**< Name of the efd table. */
+
+ uint32_t key_len; /**< Length of the key stored offline */
+
+ uint32_t max_num_rules;
+ /**< Static maximum number of entries the table was constructed to hold. */
+
+ uint32_t num_rules;
+ /**< Number of entries currently in the table . */
+
+ uint32_t num_chunks;
+ /**< Number of chunks in the table needed to support num_rules. */
+
+ uint32_t num_chunks_shift;
+ /**< Bits to shift to get chunk id, instead of dividing by num_chunk. */
+
+ enum efd_lookup_internal_function lookup_fn;
+ /**< Indicates which lookup function to use. */
+
+ struct efd_online_chunk *chunks[RTE_MAX_NUMA_NODES];
+ /**< Dynamic array of size num_chunks of chunk records. */
+
+ struct efd_offline_chunk_rules *offline_chunks;
+ /**< Dynamic array of size num_chunks of key-value pairs. */
+
+ struct rte_ring *free_slots;
+ /**< Ring that stores all indexes of the free slots in the key table */
+
+ uint8_t *keys; /**< Dynamic array of size max_num_rules of keys */
+};
+
+/**
+ * Computes the chunk ID for a given key hash
+ *
+ * @param table
+ * EFD table to reference
+ * @param hashed_key
+ * 32-bit key hash returned by EFD_HASH
+ *
+ * @return
+ * chunk ID containing this key hash
+ */
+static inline uint32_t
+efd_get_chunk_id(const struct rte_efd_table * const table,
+ const uint32_t hashed_key)
+{
+ return hashed_key & (table->num_chunks - 1);
+}
+
+/**
+ * Computes the bin ID for a given key hash
+ *
+ * @param table
+ * EFD table to reference
+ * @param hashed_key
+ * 32-bit key hash returned by EFD_HASH
+ *
+ * @return bin ID containing this key hash
+ */
+static inline uint32_t
+efd_get_bin_id(const struct rte_efd_table * const table,
+ const uint32_t hashed_key)
+{
+ return (hashed_key >> table->num_chunks_shift) & (EFD_CHUNK_NUM_BINS - 1);
+}
+
+/**
+ * Looks up the current permutation choice for a particular bin in the online table
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to look up existing values (ideally caller's socket id)
+ * @param chunk_id
+ * Chunk ID of bin to look up
+ * @param bin_id
+ * Bin ID to look up
+ *
+ * @return
+ * Currently active permutation choice in the online table
+ */
+static inline uint8_t
+efd_get_choice(const struct rte_efd_table * const table,
+ const unsigned int socket_id, const uint32_t chunk_id,
+ const uint32_t bin_id)
+{
+ struct efd_online_chunk *chunk = &table->chunks[socket_id][chunk_id];
+
+ /*
+ * Grab the chunk (byte) that contains the choices
+ * for four neighboring bins.
+ */
+ uint8_t choice_chunk =
+ chunk->bin_choice_list[bin_id / EFD_CHUNK_NUM_BIN_TO_GROUP_SETS];
+
+ /*
+ * Compute the offset into the chunk that contains
+ * the group_id lookup position
+ */
+ int offset = (bin_id & 0x3) * 2;
+
+ /* Extract from the byte just the desired lookup position */
+ return (uint8_t) ((choice_chunk >> offset) & 0x3);
+}
+
+/**
+ * Compute the chunk_id and bin_id for a given key
+ *
+ * @param table
+ * EFD table to reference
+ * @param key
+ * Key to hash and find location of
+ * @param chunk_id
+ * Computed chunk ID
+ * @param bin_id
+ * Computed bin ID
+ *
+ */
+static inline void
+efd_compute_ids(const struct rte_efd_table * const table,
+ const void *key, uint32_t * const chunk_id, uint32_t * const bin_id)
+{
+ /* Compute the position of the entry in the hash table */
+ uint32_t h = EFD_HASH(key, table);
+
+ /* Compute the chunk_id where that entry can be found */
+ *chunk_id = efd_get_chunk_id(table, h);
+
+ /*
+ * Compute the bin within that chunk where the entry
+ * can be found (0 - 255)
+ */
+ *bin_id = efd_get_bin_id(table, h);
+}
+
+/**
+ * Search for a hash function for a group that satisfies all group results
+ */
+static inline int
+efd_search_hash(struct rte_efd_table * const table,
+ const struct efd_offline_group_rules * const off_group,
+ struct efd_online_group_entry * const on_group)
+{
+ efd_hashfunc_t hash_idx;
+ efd_hashfunc_t start_hash_idx[RTE_EFD_VALUE_NUM_BITS];
+ efd_lookuptbl_t start_lookup_table[RTE_EFD_VALUE_NUM_BITS];
+
+ uint32_t i, j, rule_id;
+ uint32_t hash_val_a[EFD_MAX_GROUP_NUM_RULES];
+ uint32_t hash_val_b[EFD_MAX_GROUP_NUM_RULES];
+ uint32_t hash_val[EFD_MAX_GROUP_NUM_RULES];
+
+
+ rte_prefetch0(off_group->value);
+
+ /*
+ * Prepopulate the hash_val tables by running the two hash functions
+ * for each provided rule
+ */
+ for (i = 0; i < off_group->num_rules; i++) {
+ void *key_stored = EFD_KEY(off_group->key_idx[i], table);
+ hash_val_b[i] = EFD_HASHFUNCB(key_stored, table);
+ hash_val_a[i] = EFD_HASHFUNCA(key_stored, table);
+ }
+
+ for (i = 0; i < RTE_EFD_VALUE_NUM_BITS; i++) {
+ hash_idx = on_group->hash_idx[i];
+ start_hash_idx[i] = hash_idx;
+ start_lookup_table[i] = on_group->lookup_table[i];
+
+ do {
+ efd_lookuptbl_t lookup_table = 0;
+ efd_lookuptbl_t lookup_table_complement = 0;
+
+ for (rule_id = 0; rule_id < off_group->num_rules; rule_id++)
+ hash_val[rule_id] = hash_val_a[rule_id] + (hash_idx *
+ hash_val_b[rule_id]);
+
+ /*
+ * The goal here is to find a hash function for this
+ * particular bit entry that meets the following criteria:
+ * The most significant bits of the hash result define a
+ * shift into the lookup table where the bit will be stored
+ */
+
+ /* Iterate over each provided rule */
+ for (rule_id = 0; rule_id < off_group->num_rules;
+ rule_id++) {
+ /*
+ * Use the few most significant bits (number based on
+ * EFD_LOOKUPTBL_SIZE) to see what position the
+ * expected bit should be set in the lookup_table
+ */
+ uint32_t bucket_idx = hash_val[rule_id] >>
+ EFD_LOOKUPTBL_SHIFT;
+
+ /*
+ * Get the current bit of interest.
+ * This only find an appropriate hash function
+ * for one bit at a time of the rule
+ */
+ efd_lookuptbl_t expected =
+ (off_group->value[rule_id] >> i) & 0x1;
+
+ /*
+ * Add the expected bit (if set) to a map
+ * (lookup_table). Also set its complement
+ * in lookup_table_complement
+ */
+ lookup_table |= expected << bucket_idx;
+ lookup_table_complement |= (1 - expected)
+ << bucket_idx;
+
+ /*
+ * If ever the hash function of two different
+ * elements result in different values at the
+ * same location in the lookup_table,
+ * the current hash_idx is not valid.
+ */
+ if (lookup_table & lookup_table_complement)
+ break;
+ }
+
+ /*
+ * Check if the previous loop completed without
+ * breaking early
+ */
+ if (rule_id == off_group->num_rules) {
+ /*
+ * Current hash function worked, store it
+ * for the current group
+ */
+ on_group->hash_idx[i] = hash_idx;
+ on_group->lookup_table[i] = lookup_table;
+
+ /*
+ * Make sure that the hash function has changed
+ * from the starting value
+ */
+ hash_idx = start_hash_idx[i] + 1;
+ break;
+ }
+ hash_idx++;
+
+ } while (hash_idx != start_hash_idx[i]);
+
+ /* Failed to find perfect hash for this group */
+ if (hash_idx == start_hash_idx[i]) {
+ /*
+ * Restore previous hash_idx and lookup_table
+ * for all value bits
+ */
+ for (j = 0; j < i; j++) {
+ on_group->hash_idx[j] = start_hash_idx[j];
+ on_group->lookup_table[j] = start_lookup_table[j];
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+struct rte_efd_table *
+rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len,
+ uint8_t online_cpu_socket_bitmask, uint8_t offline_cpu_socket)
+{
+ struct rte_efd_table *table = NULL;
+ uint8_t *key_array = NULL;
+ uint32_t num_chunks, num_chunks_shift;
+ uint8_t socket_id;
+ struct rte_efd_list *efd_list = NULL;
+ struct rte_tailq_entry *te;
+ uint64_t offline_table_size;
+ char ring_name[RTE_RING_NAMESIZE];
+ struct rte_ring *r = NULL;
+ unsigned int i;
+
+ efd_list = RTE_TAILQ_CAST(rte_efd_tailq.head, rte_efd_list);
+
+ if (online_cpu_socket_bitmask == 0) {
+ RTE_LOG(ERR, EFD, "At least one CPU socket must be enabled "
+ "in the bitmask\n");
+ return NULL;
+ }
+
+ if (max_num_rules == 0) {
+ RTE_LOG(ERR, EFD, "Max num rules must be higher than 0\n");
+ return NULL;
+ }
+
+ /*
+ * Compute the minimum number of chunks (smallest power of 2)
+ * that can hold all of the rules
+ */
+ if (max_num_rules % EFD_TARGET_CHUNK_NUM_RULES == 0)
+ num_chunks = rte_align32pow2(max_num_rules /
+ EFD_TARGET_CHUNK_NUM_RULES);
+ else
+ num_chunks = rte_align32pow2((max_num_rules /
+ EFD_TARGET_CHUNK_NUM_RULES) + 1);
+
+ num_chunks_shift = rte_bsf32(num_chunks);
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /*
+ * Guarantee there's no existing: this is normally already checked
+ * by ring creation above
+ */
+ TAILQ_FOREACH(te, efd_list, next)
+ {
+ table = (struct rte_efd_table *) te->data;
+ if (strncmp(name, table->name, RTE_EFD_NAMESIZE) == 0)
+ break;
+ }
+
+ table = NULL;
+ if (te != NULL) {
+ rte_errno = EEXIST;
+ te = NULL;
+ goto error_unlock_exit;
+ }
+
+ te = rte_zmalloc("EFD_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, EFD, "tailq entry allocation failed\n");
+ goto error_unlock_exit;
+ }
+
+ /* Create a new EFD table management structure */
+ table = (struct rte_efd_table *) rte_zmalloc_socket(NULL,
+ sizeof(struct rte_efd_table),
+ RTE_CACHE_LINE_SIZE,
+ offline_cpu_socket);
+ if (table == NULL) {
+ RTE_LOG(ERR, EFD, "Allocating EFD table management structure"
+ " on socket %u failed\n",
+ offline_cpu_socket);
+ goto error_unlock_exit;
+ }
+
+
+ RTE_LOG(DEBUG, EFD, "Allocated EFD table management structure "
+ "on socket %u\n", offline_cpu_socket);
+
+ table->max_num_rules = num_chunks * EFD_TARGET_CHUNK_MAX_NUM_RULES;
+ table->num_rules = 0;
+ table->num_chunks = num_chunks;
+ table->num_chunks_shift = num_chunks_shift;
+ table->key_len = key_len;
+
+ /* key_array */
+ key_array = (uint8_t *) rte_zmalloc_socket(NULL,
+ table->max_num_rules * table->key_len,
+ RTE_CACHE_LINE_SIZE,
+ offline_cpu_socket);
+ if (key_array == NULL) {
+ RTE_LOG(ERR, EFD, "Allocating key array"
+ " on socket %u failed\n",
+ offline_cpu_socket);
+ goto error_unlock_exit;
+ }
+ table->keys = key_array;
+ snprintf(table->name, sizeof(table->name), "%s", name);
+
+ RTE_LOG(DEBUG, EFD, "Creating an EFD table with %u chunks,"
+ " which potentially supports %u entries\n",
+ num_chunks, table->max_num_rules);
+
+ /* Make sure all the allocatable table pointers are NULL initially */
+ for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES; socket_id++)
+ table->chunks[socket_id] = NULL;
+ table->offline_chunks = NULL;
+
+ /*
+ * Allocate one online table per socket specified
+ * in the user-supplied bitmask
+ */
+ uint64_t online_table_size = num_chunks * sizeof(struct efd_online_chunk) +
+ EFD_NUM_CHUNK_PADDING_BYTES;
+
+ for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES; socket_id++) {
+ if ((online_cpu_socket_bitmask >> socket_id) & 0x01) {
+ /*
+ * Allocate all of the EFD table chunks (the online portion)
+ * as a continuous block
+ */
+ table->chunks[socket_id] =
+ (struct efd_online_chunk *) rte_zmalloc_socket(
+ NULL,
+ online_table_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (table->chunks[socket_id] == NULL) {
+ RTE_LOG(ERR, EFD,
+ "Allocating EFD online table on "
+ "socket %u failed\n",
+ socket_id);
+ goto error_unlock_exit;
+ }
+ RTE_LOG(DEBUG, EFD,
+ "Allocated EFD online table of size "
+ "%"PRIu64" bytes (%.2f MB) on socket %u\n",
+ online_table_size,
+ (float) online_table_size /
+ (1024.0F * 1024.0F),
+ socket_id);
+ }
+ }
+
+#if defined(RTE_ARCH_X86)
+ /*
+ * For less than 4 bits, scalar function performs better
+ * than vectorised version
+ */
+ if (RTE_EFD_VALUE_NUM_BITS > 3 && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ table->lookup_fn = EFD_LOOKUP_AVX2;
+ else
+#endif
+ table->lookup_fn = EFD_LOOKUP_SCALAR;
+
+ /*
+ * Allocate the EFD table offline portion (with the actual rules
+ * mapping keys to values) as a continuous block.
+ * This could be several gigabytes of memory.
+ */
+ offline_table_size = num_chunks * sizeof(struct efd_offline_chunk_rules);
+ table->offline_chunks =
+ (struct efd_offline_chunk_rules *) rte_zmalloc_socket(NULL,
+ offline_table_size,
+ RTE_CACHE_LINE_SIZE,
+ offline_cpu_socket);
+ if (table->offline_chunks == NULL) {
+ RTE_LOG(ERR, EFD, "Allocating EFD offline table on socket %u "
+ "failed\n", offline_cpu_socket);
+ goto error_unlock_exit;
+ }
+
+ RTE_LOG(DEBUG, EFD,
+ "Allocated EFD offline table of size %"PRIu64" bytes "
+ " (%.2f MB) on socket %u\n", offline_table_size,
+ (float) offline_table_size / (1024.0F * 1024.0F),
+ offline_cpu_socket);
+
+ te->data = (void *) table;
+ TAILQ_INSERT_TAIL(efd_list, te, next);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ snprintf(ring_name, sizeof(ring_name), "HT_%s", table->name);
+ /* Create ring (Dummy slot index is not enqueued) */
+ r = rte_ring_create(ring_name, rte_align32pow2(table->max_num_rules),
+ offline_cpu_socket, 0);
+ if (r == NULL) {
+ RTE_LOG(ERR, EFD, "memory allocation failed\n");
+ goto error_unlock_exit;
+ }
+
+ /* Populate free slots ring. Entry zero is reserved for key misses. */
+ for (i = 0; i < table->max_num_rules; i++)
+ rte_ring_sp_enqueue(r, (void *) ((uintptr_t) i));
+
+ table->free_slots = r;
+ return table;
+
+error_unlock_exit:
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_efd_free(table);
+
+ return NULL;
+}
+
+struct rte_efd_table *
+rte_efd_find_existing(const char *name)
+{
+ struct rte_efd_table *table = NULL;
+ struct rte_tailq_entry *te;
+ struct rte_efd_list *efd_list;
+
+ efd_list = RTE_TAILQ_CAST(rte_efd_tailq.head, rte_efd_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, efd_list, next)
+ {
+ table = (struct rte_efd_table *) te->data;
+ if (strncmp(name, table->name, RTE_EFD_NAMESIZE) == 0)
+ break;
+ }
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+ return table;
+}
+
+void
+rte_efd_free(struct rte_efd_table *table)
+{
+ uint8_t socket_id;
+
+ if (table == NULL)
+ return;
+
+ for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES; socket_id++)
+ rte_free(table->chunks[socket_id]);
+
+ rte_ring_free(table->free_slots);
+ rte_free(table->offline_chunks);
+ rte_free(table->keys);
+ rte_free(table);
+}
+
+/**
+ * Applies a previously computed table entry to the specified table for all
+ * socket-local copies of the online table.
+ * Intended to apply an update for only a single change
+ * to a key/value pair at a time
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing values (ideally caller's socket id)
+ * @param chunk_id
+ * Chunk index to update
+ * @param group_id
+ * Group index to update
+ * @param bin_id
+ * Bin within the group that this update affects
+ * @param new_bin_choice
+ * Newly chosen permutation which this bin should use - only lower 2 bits
+ * @param new_group_entry
+ * Previously computed updated chunk/group entry
+ */
+static inline void
+efd_apply_update(struct rte_efd_table * const table, const unsigned int socket_id,
+ const uint32_t chunk_id, const uint32_t group_id,
+ const uint32_t bin_id, const uint8_t new_bin_choice,
+ const struct efd_online_group_entry * const new_group_entry)
+{
+ int i;
+ struct efd_online_chunk *chunk = &table->chunks[socket_id][chunk_id];
+ uint8_t bin_index = bin_id / EFD_CHUNK_NUM_BIN_TO_GROUP_SETS;
+
+ /*
+ * Grab the current byte that contains the choices
+ * for four neighboring bins
+ */
+ uint8_t choice_chunk =
+ chunk->bin_choice_list[bin_index];
+
+
+ /* Compute the offset into the chunk that needs to be updated */
+ int offset = (bin_id & 0x3) * 2;
+
+ /* Zero the two bits of interest and set them to new_bin_choice */
+ choice_chunk = (choice_chunk & (~(0x03 << offset)))
+ | ((new_bin_choice & 0x03) << offset);
+
+ /* Update the online table with the new data across all sockets */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ if (table->chunks[i] != NULL) {
+ memcpy(&(table->chunks[i][chunk_id].groups[group_id]),
+ new_group_entry,
+ sizeof(struct efd_online_group_entry));
+ table->chunks[i][chunk_id].bin_choice_list[bin_index] =
+ choice_chunk;
+ }
+ }
+}
+
+/*
+ * Move the bin from prev group to the new group
+ */
+static inline void
+move_groups(uint32_t bin_id, uint8_t bin_size,
+ struct efd_offline_group_rules *new_group,
+ struct efd_offline_group_rules * const current_group)
+{
+
+ uint8_t empty_idx = 0;
+ unsigned int i;
+
+ if (new_group == current_group)
+ return;
+
+ for (i = 0; i < current_group->num_rules; i++) {
+ /*
+ * Move keys that belong to the same bin
+ * to the new group
+ */
+ if (current_group->bin_id[i] == bin_id) {
+ new_group->key_idx[new_group->num_rules] =
+ current_group->key_idx[i];
+ new_group->value[new_group->num_rules] =
+ current_group->value[i];
+ new_group->bin_id[new_group->num_rules] =
+ current_group->bin_id[i];
+ new_group->num_rules++;
+ } else {
+ if (i != empty_idx) {
+ /*
+ * Need to move this key towards
+ * the top of the array
+ */
+ current_group->key_idx[empty_idx] =
+ current_group->key_idx[i];
+ current_group->value[empty_idx] =
+ current_group->value[i];
+ current_group->bin_id[empty_idx] =
+ current_group->bin_id[i];
+ }
+ empty_idx++;
+ }
+
+ }
+ current_group->num_rules -= bin_size;
+}
+
+/*
+ * Revert group/s to their previous state before
+ * trying to insert/add a new key
+ */
+static inline void
+revert_groups(struct efd_offline_group_rules *previous_group,
+ struct efd_offline_group_rules *current_group, uint8_t bin_size)
+{
+ unsigned int i;
+
+ if (current_group == previous_group)
+ return;
+
+ /* Move keys back to previous group */
+ for (i = current_group->num_rules - bin_size;
+ i < current_group->num_rules; i++) {
+ previous_group->key_idx[previous_group->num_rules] =
+ current_group->key_idx[i];
+ previous_group->value[previous_group->num_rules] =
+ current_group->value[i];
+ previous_group->bin_id[previous_group->num_rules] =
+ current_group->bin_id[i];
+ previous_group->num_rules++;
+ }
+
+ /*
+ * Decrease number of rules after the move
+ * in the new group
+ */
+ current_group->num_rules -= bin_size;
+}
+
+/**
+ * Computes an updated table entry where the supplied key points to a new host.
+ * If no entry exists, one is inserted.
+ *
+ * This function does NOT modify the online table(s)
+ * This function DOES modify the offline table
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing values (ideally caller's socket id)
+ * @param key
+ * Key to insert
+ * @param value
+ * Value to associate with key
+ * @param chunk_id
+ * Chunk ID of the chunk that was modified
+ * @param group_id
+ * Group ID of the group that was modified
+ * @param bin_id
+ * Bin ID that was modified
+ * @param new_bin_choice
+ * Newly chosen permutation which this bin will use
+ * @param entry
+ * Newly computed online entry to apply later with efd_apply_update
+ *
+ * @return
+ * RTE_EFD_UPDATE_WARN_GROUP_FULL
+ * Operation is insert, and the last available space in the
+ * key's group was just used. Future inserts may fail as groups fill up.
+ * This operation was still successful, and entry contains a valid update
+ * RTE_EFD_UPDATE_FAILED
+ * Either the EFD failed to find a suitable perfect hash or the group was full
+ * This is a fatal error, and the table is now in an indeterminite state
+ * RTE_EFD_UPDATE_NO_CHANGE
+ * Operation resulted in no change to the table (same value already exists)
+ * 0
+ * Insert or update was successful, and the new efd_online_group_entry
+ * is stored in *entry
+ *
+ * @warning
+ * Note that entry will be UNCHANGED if the update has no effect, and thus any
+ * subsequent use of the entry content will likely be invalid
+ */
+static inline int
+efd_compute_update(struct rte_efd_table * const table,
+ const unsigned int socket_id, const void *key,
+ const efd_value_t value, uint32_t * const chunk_id,
+ uint32_t * const group_id, uint32_t * const bin_id,
+ uint8_t * const new_bin_choice,
+ struct efd_online_group_entry * const entry)
+{
+ unsigned int i;
+ int ret;
+ uint32_t new_idx;
+ void *new_k, *slot_id = NULL;
+ int status = EXIT_SUCCESS;
+ unsigned int found = 0;
+
+ efd_compute_ids(table, key, chunk_id, bin_id);
+
+ struct efd_offline_chunk_rules * const chunk =
+ &table->offline_chunks[*chunk_id];
+ struct efd_offline_group_rules *new_group;
+
+ uint8_t current_choice = efd_get_choice(table, socket_id,
+ *chunk_id, *bin_id);
+ uint32_t current_group_id = efd_bin_to_group[current_choice][*bin_id];
+ struct efd_offline_group_rules * const current_group =
+ &chunk->group_rules[current_group_id];
+ uint8_t bin_size = 0;
+ uint8_t key_changed_index = 0;
+ efd_value_t key_changed_previous_value = 0;
+ uint32_t key_idx_previous = 0;
+
+ /* Scan the current group and see if the key is already present */
+ for (i = 0; i < current_group->num_rules; i++) {
+ if (current_group->bin_id[i] == *bin_id)
+ bin_size++;
+ else
+ continue;
+
+ void *key_stored = EFD_KEY(current_group->key_idx[i], table);
+ if (found == 0 && unlikely(memcmp(key_stored, key,
+ table->key_len) == 0)) {
+ /* Key is already present */
+
+ /*
+ * If previous value is same as new value,
+ * no additional work is required
+ */
+ if (current_group->value[i] == value)
+ return RTE_EFD_UPDATE_NO_CHANGE;
+
+ key_idx_previous = current_group->key_idx[i];
+ key_changed_previous_value = current_group->value[i];
+ key_changed_index = i;
+ current_group->value[i] = value;
+ found = 1;
+ }
+ }
+
+ if (found == 0) {
+ /* Key does not exist. Insert the rule into the bin/group */
+ if (unlikely(current_group->num_rules >= EFD_MAX_GROUP_NUM_RULES)) {
+ RTE_LOG(ERR, EFD,
+ "Fatal: No room remaining for insert into "
+ "chunk %u group %u bin %u\n",
+ *chunk_id,
+ current_group_id, *bin_id);
+ return RTE_EFD_UPDATE_FAILED;
+ }
+
+ if (unlikely(current_group->num_rules ==
+ (EFD_MAX_GROUP_NUM_RULES - 1))) {
+ RTE_LOG(INFO, EFD, "Warn: Insert into last "
+ "available slot in chunk %u "
+ "group %u bin %u\n", *chunk_id,
+ current_group_id, *bin_id);
+ status = RTE_EFD_UPDATE_WARN_GROUP_FULL;
+ }
+
+ if (rte_ring_sc_dequeue(table->free_slots, &slot_id) != 0)
+ return RTE_EFD_UPDATE_FAILED;
+
+ new_k = RTE_PTR_ADD(table->keys, (uintptr_t) slot_id *
+ table->key_len);
+ rte_prefetch0(new_k);
+ new_idx = (uint32_t) ((uintptr_t) slot_id);
+
+ rte_memcpy(EFD_KEY(new_idx, table), key, table->key_len);
+ current_group->key_idx[current_group->num_rules] = new_idx;
+ current_group->value[current_group->num_rules] = value;
+ current_group->bin_id[current_group->num_rules] = *bin_id;
+ current_group->num_rules++;
+ table->num_rules++;
+ bin_size++;
+ } else {
+ uint32_t last = current_group->num_rules - 1;
+ /* Swap the key with the last key inserted*/
+ current_group->key_idx[key_changed_index] =
+ current_group->key_idx[last];
+ current_group->value[key_changed_index] =
+ current_group->value[last];
+ current_group->bin_id[key_changed_index] =
+ current_group->bin_id[last];
+
+ /*
+ * Key to be updated will always be available
+ * at the end of the group
+ */
+ current_group->key_idx[last] = key_idx_previous;
+ current_group->value[last] = value;
+ current_group->bin_id[last] = *bin_id;
+ }
+
+ *new_bin_choice = current_choice;
+ *group_id = current_group_id;
+ new_group = current_group;
+
+ /* Group need to be rebalanced when it starts to get loaded */
+ if (current_group->num_rules > EFD_MIN_BALANCED_NUM_RULES) {
+
+ /*
+ * Subtract the number of entries in the bin from
+ * the original group
+ */
+ current_group->num_rules -= bin_size;
+
+ /*
+ * Figure out which of the available groups that this bin
+ * can map to is the smallest (using the current group
+ * as baseline)
+ */
+ uint8_t smallest_choice = current_choice;
+ uint8_t smallest_size = current_group->num_rules;
+ uint32_t smallest_group_id = current_group_id;
+ unsigned char choice;
+
+ for (choice = 0; choice < EFD_CHUNK_NUM_BIN_TO_GROUP_SETS;
+ choice++) {
+ uint32_t test_group_id =
+ efd_bin_to_group[choice][*bin_id];
+ uint32_t num_rules =
+ chunk->group_rules[test_group_id].num_rules;
+ if (num_rules < smallest_size) {
+ smallest_choice = choice;
+ smallest_size = num_rules;
+ smallest_group_id = test_group_id;
+ }
+ }
+
+ *new_bin_choice = smallest_choice;
+ *group_id = smallest_group_id;
+ new_group = &chunk->group_rules[smallest_group_id];
+ current_group->num_rules += bin_size;
+
+ }
+
+ uint8_t choice = 0;
+ for (;;) {
+ if (current_group != new_group &&
+ new_group->num_rules + bin_size >
+ EFD_MAX_GROUP_NUM_RULES) {
+ RTE_LOG(DEBUG, EFD,
+ "Unable to move_groups to dest group "
+ "containing %u entries."
+ "bin_size:%u choice:%02x\n",
+ new_group->num_rules, bin_size,
+ choice - 1);
+ goto next_choice;
+ }
+ move_groups(*bin_id, bin_size, new_group, current_group);
+ /*
+ * Recompute the hash function for the modified group,
+ * and return it to the caller
+ */
+ ret = efd_search_hash(table, new_group, entry);
+
+ if (!ret)
+ return status;
+
+ RTE_LOG(DEBUG, EFD,
+ "Failed to find perfect hash for group "
+ "containing %u entries. bin_size:%u choice:%02x\n",
+ new_group->num_rules, bin_size, choice - 1);
+ /* Restore groups modified to their previous state */
+ revert_groups(current_group, new_group, bin_size);
+
+next_choice:
+ if (choice == EFD_CHUNK_NUM_BIN_TO_GROUP_SETS)
+ break;
+ *new_bin_choice = choice;
+ *group_id = efd_bin_to_group[choice][*bin_id];
+ new_group = &chunk->group_rules[*group_id];
+ choice++;
+ }
+
+ if (!found) {
+ current_group->num_rules--;
+ table->num_rules--;
+ } else
+ current_group->value[current_group->num_rules - 1] =
+ key_changed_previous_value;
+ return RTE_EFD_UPDATE_FAILED;
+}
+
+int
+rte_efd_update(struct rte_efd_table * const table, const unsigned int socket_id,
+ const void *key, const efd_value_t value)
+{
+ uint32_t chunk_id = 0, group_id = 0, bin_id = 0;
+ uint8_t new_bin_choice = 0;
+ struct efd_online_group_entry entry;
+
+ int status = efd_compute_update(table, socket_id, key, value,
+ &chunk_id, &group_id, &bin_id,
+ &new_bin_choice, &entry);
+
+ if (status == RTE_EFD_UPDATE_NO_CHANGE)
+ return EXIT_SUCCESS;
+
+ if (status == RTE_EFD_UPDATE_FAILED)
+ return status;
+
+ efd_apply_update(table, socket_id, chunk_id, group_id, bin_id,
+ new_bin_choice, &entry);
+ return status;
+}
+
+int
+rte_efd_delete(struct rte_efd_table * const table, const unsigned int socket_id,
+ const void *key, efd_value_t * const prev_value)
+{
+ unsigned int i;
+ uint32_t chunk_id, bin_id;
+ uint8_t not_found = 1;
+
+ efd_compute_ids(table, key, &chunk_id, &bin_id);
+
+ struct efd_offline_chunk_rules * const chunk =
+ &table->offline_chunks[chunk_id];
+
+ uint8_t current_choice = efd_get_choice(table, socket_id,
+ chunk_id, bin_id);
+ uint32_t current_group_id = efd_bin_to_group[current_choice][bin_id];
+ struct efd_offline_group_rules * const current_group =
+ &chunk->group_rules[current_group_id];
+
+ /*
+ * Search the current group for the specified key.
+ * If it exists, remove it and re-pack the other values
+ */
+ for (i = 0; i < current_group->num_rules; i++) {
+ if (not_found) {
+ /* Found key that needs to be removed */
+ if (memcmp(EFD_KEY(current_group->key_idx[i], table),
+ key, table->key_len) == 0) {
+ /* Store previous value if requested by caller */
+ if (prev_value != NULL)
+ *prev_value = current_group->value[i];
+
+ not_found = 0;
+ rte_ring_sp_enqueue(table->free_slots,
+ (void *)((uintptr_t)current_group->key_idx[i]));
+ }
+ } else {
+ /*
+ * If the desired key has been found,
+ * need to shift other values up one
+ */
+
+ /* Need to shift this entry back up one index */
+ current_group->key_idx[i - 1] = current_group->key_idx[i];
+ current_group->value[i - 1] = current_group->value[i];
+ current_group->bin_id[i - 1] = current_group->bin_id[i];
+ }
+ }
+
+ if (not_found == 0) {
+ table->num_rules--;
+ current_group->num_rules--;
+ }
+
+ return not_found;
+}
+
+static inline efd_value_t
+efd_lookup_internal_scalar(const efd_hashfunc_t *group_hash_idx,
+ const efd_lookuptbl_t *group_lookup_table,
+ const uint32_t hash_val_a, const uint32_t hash_val_b)
+{
+ efd_value_t value = 0;
+ uint32_t i;
+
+ for (i = 0; i < RTE_EFD_VALUE_NUM_BITS; i++) {
+ value <<= 1;
+ uint32_t h = hash_val_a + (hash_val_b *
+ group_hash_idx[RTE_EFD_VALUE_NUM_BITS - i - 1]);
+ uint16_t bucket_idx = h >> EFD_LOOKUPTBL_SHIFT;
+ value |= (group_lookup_table[
+ RTE_EFD_VALUE_NUM_BITS - i - 1] >>
+ bucket_idx) & 0x1;
+ }
+
+ return value;
+}
+
+
+static inline efd_value_t
+efd_lookup_internal(const struct efd_online_group_entry * const group,
+ const uint32_t hash_val_a, const uint32_t hash_val_b,
+ enum efd_lookup_internal_function lookup_fn)
+{
+ efd_value_t value = 0;
+
+ switch (lookup_fn) {
+
+#if defined(RTE_ARCH_X86)
+ case EFD_LOOKUP_AVX2:
+ return efd_lookup_internal_avx2(group->hash_idx,
+ group->lookup_table,
+ hash_val_a,
+ hash_val_b);
+#endif
+ case EFD_LOOKUP_SCALAR:
+ /* Fall-through */
+ default:
+ return efd_lookup_internal_scalar(group->hash_idx,
+ group->lookup_table,
+ hash_val_a,
+ hash_val_b);
+ }
+
+ return value;
+}
+
+efd_value_t
+rte_efd_lookup(const struct rte_efd_table * const table,
+ const unsigned int socket_id, const void *key)
+{
+ uint32_t chunk_id, group_id, bin_id;
+ uint8_t bin_choice;
+ const struct efd_online_group_entry *group;
+ const struct efd_online_chunk * const chunks = table->chunks[socket_id];
+
+ /* Determine the chunk and group location for the given key */
+ efd_compute_ids(table, key, &chunk_id, &bin_id);
+ bin_choice = efd_get_choice(table, socket_id, chunk_id, bin_id);
+ group_id = efd_bin_to_group[bin_choice][bin_id];
+ group = &chunks[chunk_id].groups[group_id];
+
+ return efd_lookup_internal(group,
+ EFD_HASHFUNCA(key, table),
+ EFD_HASHFUNCB(key, table),
+ table->lookup_fn);
+}
+
+void rte_efd_lookup_bulk(const struct rte_efd_table * const table,
+ const unsigned int socket_id, const int num_keys,
+ const void **key_list, efd_value_t * const value_list)
+{
+ int i;
+ uint32_t chunk_id_list[RTE_EFD_BURST_MAX];
+ uint32_t bin_id_list[RTE_EFD_BURST_MAX];
+ uint8_t bin_choice_list[RTE_EFD_BURST_MAX];
+ uint32_t group_id_list[RTE_EFD_BURST_MAX];
+ struct efd_online_group_entry *group;
+
+ struct efd_online_chunk *chunks = table->chunks[socket_id];
+
+ for (i = 0; i < num_keys; i++) {
+ efd_compute_ids(table, key_list[i], &chunk_id_list[i],
+ &bin_id_list[i]);
+ rte_prefetch0(&chunks[chunk_id_list[i]].bin_choice_list);
+ }
+
+ for (i = 0; i < num_keys; i++) {
+ bin_choice_list[i] = efd_get_choice(table, socket_id,
+ chunk_id_list[i], bin_id_list[i]);
+ group_id_list[i] =
+ efd_bin_to_group[bin_choice_list[i]][bin_id_list[i]];
+ group = &chunks[chunk_id_list[i]].groups[group_id_list[i]];
+ rte_prefetch0(group);
+ }
+
+ for (i = 0; i < num_keys; i++) {
+ group = &chunks[chunk_id_list[i]].groups[group_id_list[i]];
+ value_list[i] = efd_lookup_internal(group,
+ EFD_HASHFUNCA(key_list[i], table),
+ EFD_HASHFUNCB(key_list[i], table),
+ table->lookup_fn);
+ }
+}
diff --git a/lib/librte_efd/rte_efd.h b/lib/librte_efd/rte_efd.h
new file mode 100644
index 00000000..15968635
--- /dev/null
+++ b/lib/librte_efd/rte_efd.h
@@ -0,0 +1,308 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EFD_H_
+#define _RTE_EFD_H_
+
+/**
+ * @file
+ *
+ * RTE EFD Table
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*************************************************************************
+ * User selectable constants
+ *************************************************************************/
+
+/*
+ * If possible, best lookup performance will be achieved by ensuring that
+ * the entire table fits in the L3 cache.
+ *
+ * Some formulas for calculating various sizes are listed below:
+ *
+ * # of chunks =
+ * 2 ^ (ceiling(log2((requested # of rules) /
+ * (EFD_CHUNK_NUM_GROUPS * EFD_TARGET_GROUP_NUM_RULES))))
+ *
+ * Target # of rules = (# of chunks) * EFD_CHUNK_NUM_GROUPS *
+ * EFD_TARGET_GROUP_NUM_RULES
+ *
+ * Group Size (in bytes) = 4 (per value bit)
+ *
+ * Table size (in bytes) = RTE_EFD_VALUE_NUM_BITS * (# of chunks) *
+ * EFD_CHUNK_NUM_GROUPS * (group size)
+ */
+
+/**
+ * !!! This parameter should be adjusted for your application !!!
+ *
+ * This parameter adjusts the number of bits of value that can be
+ * stored in the table.
+ * For example, setting the number of bits to 3 will allow storing 8 values
+ * in the table (between 0 and 7).
+ *
+ * This number directly affects the performance of both lookups and insertion.
+ * In general, performance decreases as more bits are stored in the table.
+ *
+ * This number is directly proportional to the size of the online region
+ * used for lookups.
+ *
+ * Note that due to the way the CPU operates on memory, best lookup performance
+ * will be achieved when RTE_EFD_VALUE_NUM_BITS is a multiple of 8.
+ * These values align the hash indexes on 16-byte boundaries.
+ * The greatest performance drop is moving from 8->9 bits, 16->17 bits, etc.
+ *
+ * This value must be between 1 and 32
+ */
+#ifndef RTE_EFD_VALUE_NUM_BITS
+#define RTE_EFD_VALUE_NUM_BITS (8)
+#endif
+
+/*
+ * EFD_TARGET_GROUP_NUM_RULES:
+ * Adjusts how many groups/chunks are allocated at table creation time
+ * to support the requested number of rules. Higher values pack entries
+ * more tightly in memory, resulting in a smaller memory footprint
+ * for the online table.
+ * This comes at the cost of lower insert/update performance.
+ *
+ * EFD_MAX_GROUP_NUM_RULES:
+ * This adjusts the amount of offline memory allocated to store key/value
+ * pairs for the table. The recommended numbers are upper-bounds for
+ * this parameter
+ * - any higher and it becomes very unlikely that a perfect hash function
+ * can be found for that group size. This value should be at
+ * least 40% larger than EFD_TARGET_GROUP_NUM_RULES
+ *
+ * Recommended values for various lookuptable and hashfunc sizes are:
+ *
+ * HASH_FUNC_SIZE = 16, LOOKUPTBL_SIZE = 16:
+ * EFD_TARGET_GROUP_NUM_RULES = 22
+ * EFD_MAX_GROUP_NUM_RULES = 28
+ */
+#define EFD_TARGET_GROUP_NUM_RULES (22)
+#define EFD_MAX_GROUP_NUM_RULES (28LU)
+
+#define EFD_MIN_BALANCED_NUM_RULES 5
+
+/**
+ * Maximum number of keys that can be looked up in one call to efd_lookup_bulk
+ */
+#ifndef RTE_EFD_BURST_MAX
+#define RTE_EFD_BURST_MAX (32)
+#endif
+
+/** Maximum number of characters in efd name.*/
+#define RTE_EFD_NAMESIZE 32
+
+#if (RTE_EFD_VALUE_NUM_BITS > 0 && RTE_EFD_VALUE_NUM_BITS <= 8)
+typedef uint8_t efd_value_t;
+#elif (RTE_EFD_VALUE_NUM_BITS > 8 && RTE_EFD_VALUE_NUM_BITS <= 16)
+typedef uint16_t efd_value_t;
+#elif (RTE_EFD_VALUE_NUM_BITS > 16 && RTE_EFD_VALUE_NUM_BITS <= 32)
+typedef uint32_t efd_value_t;
+#else
+#error("RTE_EFD_VALUE_NUM_BITS must be in the range [1:32]")
+#endif
+
+#define EFD_LOOKUPTBL_SHIFT (32 - 4)
+typedef uint16_t efd_lookuptbl_t;
+typedef uint16_t efd_hashfunc_t;
+
+/**
+ * Creates an EFD table with a single offline region and multiple per-socket
+ * internally-managed copies of the online table used for lookups
+ *
+ * @param name
+ * EFD table name
+ * @param max_num_rules
+ * Minimum number of rules the table should be sized to hold.
+ * Will be rounded up to the next smallest valid table size
+ * @param key_len
+ * Length of the key
+ * @param online_cpu_socket_bitmask
+ * Bitmask specifying which sockets should get a copy of the online table.
+ * LSB = socket 0, etc.
+ * @param offline_cpu_socket
+ * Identifies the socket where the offline table will be allocated
+ * (and most efficiently accessed in the case of updates/insertions)
+ *
+ * @return
+ * EFD table, or NULL if table allocation failed or the bitmask is invalid
+ */
+struct rte_efd_table *
+rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len,
+ uint8_t online_cpu_socket_bitmask, uint8_t offline_cpu_socket);
+
+/**
+ * Releases the resources from an EFD table
+ *
+ * @param table
+ * Table to free
+ */
+void
+rte_efd_free(struct rte_efd_table *table);
+
+/**
+ * Find an existing EFD table object and return a pointer to it.
+ *
+ * @param name
+ * Name of the EFD table as passed to rte_efd_create()
+ * @return
+ * Pointer to EFD table or NULL if object not found
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - value not available for return
+ */
+struct rte_efd_table*
+rte_efd_find_existing(const char *name);
+
+#define RTE_EFD_UPDATE_WARN_GROUP_FULL (1)
+#define RTE_EFD_UPDATE_NO_CHANGE (2)
+#define RTE_EFD_UPDATE_FAILED (3)
+
+/**
+ * Computes an updated table entry for the supplied key/value pair.
+ * The update is then immediately applied to the provided table and
+ * all socket-local copies of the chunks are updated.
+ * This operation is not multi-thread safe
+ * and should only be called one from thread.
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing value (ideally caller's socket id)
+ * @param key
+ * EFD table key to modify
+ * @param value
+ * Value to associate with the key
+ *
+ * @return
+ * RTE_EFD_UPDATE_WARN_GROUP_FULL
+ * Operation is insert, and the last available space in the
+ * key's group was just used
+ * Future inserts may fail as groups fill up
+ * This operation was still successful, and entry contains a valid update
+ * RTE_EFD_UPDATE_FAILED
+ * Either the EFD failed to find a suitable perfect hash or the group was full
+ * This is a fatal error, and the table is now in an indeterminite state
+ * RTE_EFD_UPDATE_NO_CHANGE
+ * Operation resulted in no change to the table (same value already exists)
+ * 0 - success
+ */
+int
+rte_efd_update(struct rte_efd_table *table, unsigned int socket_id,
+ const void *key, efd_value_t value);
+
+/**
+ * Removes any value currently associated with the specified key from the table
+ * This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing value (ideally caller's socket id)
+ * @param key
+ * EFD table key to delete
+ * @param prev_value
+ * If not NULL, will store the previous value here before deleting it
+ *
+ * @return
+ * 0 - successfully found and deleted the key
+ * nonzero otherwise
+ */
+int
+rte_efd_delete(struct rte_efd_table *table, unsigned int socket_id,
+ const void *key, efd_value_t *prev_value);
+
+/**
+ * Looks up the value associated with a key
+ * This operation is multi-thread safe.
+ *
+ * NOTE: Lookups will *always* succeed - this is a property of
+ * using a perfect hash table.
+ * If the specified key was never inserted, a pseudorandom answer will be returned.
+ * There is no way to know based on the lookup if the key was ever inserted
+ * originally, so this must be tracked elsewhere.
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing value (ideally caller's socket id)
+ * @param key
+ * EFD table key to look up
+ *
+ * @return
+ * Value associated with the key, or random junk if they key was never inserted
+ */
+efd_value_t
+rte_efd_lookup(const struct rte_efd_table *table, unsigned int socket_id,
+ const void *key);
+
+/**
+ * Looks up the value associated with several keys.
+ * This operation is multi-thread safe.
+ *
+ * NOTE: Lookups will *always* succeed - this is a property of
+ * using a perfect hash table.
+ * If the specified key was never inserted, a pseudorandom answer will be returned.
+ * There is no way to know based on the lookup if the key was ever inserted
+ * originally, so this must be tracked elsewhere.
+ *
+ * @param table
+ * EFD table to reference
+ * @param socket_id
+ * Socket ID to use to lookup existing value (ideally caller's socket id)
+ * @param num_keys
+ * Number of keys in the key_list array, must be less than RTE_EFD_BURST_MAX
+ * @param key_list
+ * Array of num_keys pointers which point to keys to look up
+ * @param value_list
+ * Array of size num_keys where lookup values will be stored
+ */
+void
+rte_efd_lookup_bulk(const struct rte_efd_table *table, unsigned int socket_id,
+ int num_keys, const void **key_list,
+ efd_value_t *value_list);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EFD_H_ */
diff --git a/lib/librte_efd/rte_efd_version.map b/lib/librte_efd/rte_efd_version.map
new file mode 100644
index 00000000..ae60a641
--- /dev/null
+++ b/lib/librte_efd/rte_efd_version.map
@@ -0,0 +1,13 @@
+DPDK_17.02 {
+ global:
+
+ rte_efd_create;
+ rte_efd_delete;
+ rte_efd_find_existing;
+ rte_efd_free;
+ rte_efd_lookup;
+ rte_efd_lookup_bulk;
+ rte_efd_update;
+
+ local: *;
+};
diff --git a/lib/librte_efd/rte_efd_x86.h b/lib/librte_efd/rte_efd_x86.h
new file mode 100644
index 00000000..34f37d73
--- /dev/null
+++ b/lib/librte_efd/rte_efd_x86.h
@@ -0,0 +1,86 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* rte_efd_x86.h
+ * This file holds all x86 specific EFD functions
+ */
+#include <immintrin.h>
+
+#if (RTE_EFD_VALUE_NUM_BITS == 8 || RTE_EFD_VALUE_NUM_BITS == 16 || \
+ RTE_EFD_VALUE_NUM_BITS == 24 || RTE_EFD_VALUE_NUM_BITS == 32)
+#define EFD_LOAD_SI128(val) _mm_load_si128(val)
+#else
+#define EFD_LOAD_SI128(val) _mm_lddqu_si128(val)
+#endif
+
+static inline efd_value_t
+efd_lookup_internal_avx2(const efd_hashfunc_t *group_hash_idx,
+ const efd_lookuptbl_t *group_lookup_table,
+ const uint32_t hash_val_a, const uint32_t hash_val_b)
+{
+#ifdef RTE_MACHINE_CPUFLAG_AVX2
+ efd_value_t value = 0;
+ uint32_t i = 0;
+ __m256i vhash_val_a = _mm256_set1_epi32(hash_val_a);
+ __m256i vhash_val_b = _mm256_set1_epi32(hash_val_b);
+
+ for (; i < RTE_EFD_VALUE_NUM_BITS; i += 8) {
+ __m256i vhash_idx =
+ _mm256_cvtepu16_epi32(EFD_LOAD_SI128(
+ (__m128i const *) &group_hash_idx[i]));
+ __m256i vlookup_table = _mm256_cvtepu16_epi32(
+ EFD_LOAD_SI128((__m128i const *)
+ &group_lookup_table[i]));
+ __m256i vhash = _mm256_add_epi32(vhash_val_a,
+ _mm256_mullo_epi32(vhash_idx, vhash_val_b));
+ __m256i vbucket_idx = _mm256_srli_epi32(vhash,
+ EFD_LOOKUPTBL_SHIFT);
+ __m256i vresult = _mm256_srlv_epi32(vlookup_table,
+ vbucket_idx);
+
+ value |= (_mm256_movemask_ps(
+ (__m256) _mm256_slli_epi32(vresult, 31))
+ & ((1 << (RTE_EFD_VALUE_NUM_BITS - i)) - 1)) << i;
+ }
+
+ return value;
+#else
+ RTE_SET_USED(group_hash_idx);
+ RTE_SET_USED(group_lookup_table);
+ RTE_SET_USED(hash_val_a);
+ RTE_SET_USED(hash_val_b);
+ /* Return dummy value, only to avoid compilation breakage */
+ return 0;
+#endif
+
+}
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
index efe1e5fe..93fdde10 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ether/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -41,18 +41,20 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_ether_version.map
-LIBABIVER := 5
+LIBABIVER := 6
SRCS-y += rte_ethdev.c
+SRCS-y += rte_flow.c
#
# Export include files
#
SYMLINK-y-include += rte_ethdev.h
+SYMLINK-y-include += rte_ethdev_pci.h
+SYMLINK-y-include += rte_ethdev_vdev.h
SYMLINK-y-include += rte_eth_ctrl.h
SYMLINK-y-include += rte_dev_info.h
-
-# this lib depends upon:
-DEPDIRS-y += lib/librte_net lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf
+SYMLINK-y-include += rte_flow.h
+SYMLINK-y-include += rte_flow_driver.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ether/rte_eth_ctrl.h
index fe80eb01..83869042 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ether/rte_eth_ctrl.h
@@ -99,6 +99,7 @@ enum rte_filter_type {
RTE_ETH_FILTER_FDIR,
RTE_ETH_FILTER_HASH,
RTE_ETH_FILTER_L2_TUNNEL,
+ RTE_ETH_FILTER_GENERIC,
RTE_ETH_FILTER_MAX
};
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index 5a317594..83898a8f 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -138,10 +138,18 @@ enum {
STAT_QMAP_RX
};
-enum {
- DEV_DETACHED = 0,
- DEV_ATTACHED
-};
+uint8_t
+rte_eth_find_next(uint8_t port_id)
+{
+ while (port_id < RTE_MAX_ETHPORTS &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
+ port_id++;
+
+ if (port_id >= RTE_MAX_ETHPORTS)
+ return RTE_MAX_ETHPORTS;
+
+ return port_id;
+}
static void
rte_eth_dev_data_alloc(void)
@@ -170,7 +178,7 @@ rte_eth_dev_allocated(const char *name)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
+ if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
strcmp(rte_eth_devices[i].data->name, name) == 0)
return &rte_eth_devices[i];
}
@@ -183,7 +191,7 @@ rte_eth_dev_find_free_port(void)
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (rte_eth_devices[i].attached == DEV_DETACHED)
+ if (rte_eth_devices[i].state == RTE_ETH_DEV_UNUSED)
return i;
}
return RTE_MAX_ETHPORTS;
@@ -195,7 +203,8 @@ eth_dev_get(uint8_t port_id)
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_data[port_id];
- eth_dev->attached = DEV_ATTACHED;
+ eth_dev->state = RTE_ETH_DEV_ATTACHED;
+ TAILQ_INIT(&(eth_dev->link_intr_cbs));
eth_dev_last_created_port = port_id;
nb_ports++;
@@ -224,9 +233,11 @@ rte_eth_dev_allocate(const char *name)
return NULL;
}
+ memset(&rte_eth_dev_data[port_id], 0, sizeof(struct rte_eth_dev_data));
eth_dev = eth_dev_get(port_id);
snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
eth_dev->data->port_id = port_id;
+ eth_dev->data->mtu = ETHER_MTU;
return eth_dev;
}
@@ -236,8 +247,8 @@ rte_eth_dev_allocate(const char *name)
* makes sure that the same device would have the same port id both
* in the primary and secondary process.
*/
-static struct rte_eth_dev *
-eth_dev_attach_secondary(const char *name)
+struct rte_eth_dev *
+rte_eth_dev_attach_secondary(const char *name)
{
uint8_t i;
struct rte_eth_dev *eth_dev;
@@ -268,121 +279,16 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
if (eth_dev == NULL)
return -EINVAL;
- eth_dev->attached = DEV_DETACHED;
+ eth_dev->state = RTE_ETH_DEV_UNUSED;
nb_ports--;
return 0;
}
int
-rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev)
-{
- struct eth_driver *eth_drv;
- struct rte_eth_dev *eth_dev;
- char ethdev_name[RTE_ETH_NAME_MAX_LEN];
-
- int diag;
-
- eth_drv = (struct eth_driver *)pci_drv;
-
- rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
- sizeof(ethdev_name));
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eth_dev = rte_eth_dev_allocate(ethdev_name);
- if (eth_dev == NULL)
- return -ENOMEM;
-
- eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
- eth_drv->dev_private_size,
- RTE_CACHE_LINE_SIZE);
- if (eth_dev->data->dev_private == NULL)
- rte_panic("Cannot allocate memzone for private port data\n");
- } else {
- eth_dev = eth_dev_attach_secondary(ethdev_name);
- if (eth_dev == NULL) {
- /*
- * if we failed to attach a device, it means the
- * device is skipped in primary process, due to
- * some errors. If so, we return a positive value,
- * to let EAL skip it for the secondary process
- * as well.
- */
- return 1;
- }
- }
- eth_dev->pci_dev = pci_dev;
- eth_dev->driver = eth_drv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
-
- /* init user callbacks */
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
- /*
- * Set the default MTU.
- */
- eth_dev->data->mtu = ETHER_MTU;
-
- /* Invoke PMD device initialization function */
- diag = (*eth_drv->eth_dev_init)(eth_dev);
- if (diag == 0)
- return 0;
-
- RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
- pci_drv->driver.name,
- (unsigned) pci_dev->id.vendor_id,
- (unsigned) pci_dev->id.device_id);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
- rte_eth_dev_release_port(eth_dev);
- return diag;
-}
-
-int
-rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
-{
- const struct eth_driver *eth_drv;
- struct rte_eth_dev *eth_dev;
- char ethdev_name[RTE_ETH_NAME_MAX_LEN];
- int ret;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
- sizeof(ethdev_name));
-
- eth_dev = rte_eth_dev_allocated(ethdev_name);
- if (eth_dev == NULL)
- return -ENODEV;
-
- eth_drv = (const struct eth_driver *)pci_dev->driver;
-
- /* Invoke PMD device uninit function */
- if (*eth_drv->eth_dev_uninit) {
- ret = (*eth_drv->eth_dev_uninit)(eth_dev);
- if (ret)
- return ret;
- }
-
- /* free ether device */
- rte_eth_dev_release_port(eth_dev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(eth_dev->data->dev_private);
-
- eth_dev->pci_dev = NULL;
- eth_dev->driver = NULL;
- eth_dev->data = NULL;
-
- return 0;
-}
-
-int
rte_eth_dev_is_valid_port(uint8_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
- rte_eth_devices[port_id].attached != DEV_ATTACHED)
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
return 0;
else
return 1;
@@ -434,9 +340,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
return -ENODEV;
*port_id = RTE_MAX_ETHPORTS;
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
-
+ RTE_ETH_FOREACH_DEV(i) {
if (!strncmp(name,
rte_eth_dev_data[i].name, strlen(name))) {
@@ -460,8 +364,8 @@ rte_eth_dev_is_detachable(uint8_t port_id)
case RTE_KDRV_UIO_GENERIC:
case RTE_KDRV_NIC_UIO:
case RTE_KDRV_NONE:
- break;
case RTE_KDRV_VFIO:
+ break;
default:
return -ENOTSUP;
}
@@ -588,6 +492,9 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->rx_queue_release)(rxq[i]);
+
+ rte_free(dev->data->rx_queues);
+ dev->data->rx_queues = NULL;
}
dev->data->nb_rx_queues = nb_queues;
return 0;
@@ -739,6 +646,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
for (i = nb_queues; i < old_nb_queues; i++)
(*dev->dev_ops->tx_queue_release)(txq[i]);
+
+ rte_free(dev->data->tx_queues);
+ dev->data->tx_queues = NULL;
}
dev->data->nb_tx_queues = nb_queues;
return 0;
@@ -839,16 +749,19 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return -EINVAL;
}
- /*
- * If link state interrupt is enabled, check that the
- * device supports it.
- */
+ /* Check that the device supports requested interrupts */
if ((dev_conf->intr_conf.lsc == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
dev->data->drv_name);
return -EINVAL;
}
+ if ((dev_conf->intr_conf.rmv == 1) &&
+ (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
+ RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
+ dev->data->drv_name);
+ return -EINVAL;
+ }
/*
* If jumbo frames are enabled, check that the maximum RX packet
@@ -909,39 +822,61 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
return 0;
}
+void
+_rte_eth_dev_reset(struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be stopped to allow reset\n",
+ dev->data->port_id);
+ return;
+ }
+
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+
+ memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
+}
+
static void
rte_eth_dev_config_restore(uint8_t port_id)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- struct ether_addr addr;
+ struct ether_addr *addr;
uint16_t i;
uint32_t pool = 0;
+ uint64_t pool_mask;
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
- if (RTE_ETH_DEV_SRIOV(dev).active)
- pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
-
- /* replay MAC address configuration */
- for (i = 0; i < dev_info.max_mac_addrs; i++) {
- addr = dev->data->mac_addrs[i];
-
- /* skip zero address */
- if (is_zero_ether_addr(&addr))
- continue;
-
- /* add address to the hardware */
- if (*dev->dev_ops->mac_addr_add &&
- (dev->data->mac_pool_sel[i] & (1ULL << pool)))
- (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
- else {
- RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
- port_id);
- /* exit the loop but not return an error */
- break;
+ /* replay MAC address configuration including default MAC */
+ addr = &dev->data->mac_addrs[0];
+ if (*dev->dev_ops->mac_addr_set != NULL)
+ (*dev->dev_ops->mac_addr_set)(dev, addr);
+ else if (*dev->dev_ops->mac_addr_add != NULL)
+ (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
+
+ if (*dev->dev_ops->mac_addr_add != NULL) {
+ for (i = 1; i < dev_info.max_mac_addrs; i++) {
+ addr = &dev->data->mac_addrs[i];
+
+ /* skip zero address */
+ if (is_zero_ether_addr(addr))
+ continue;
+
+ pool = 0;
+ pool_mask = dev->data->mac_pool_sel[i];
+
+ do {
+ if (pool_mask & 1ULL)
+ (*dev->dev_ops->mac_addr_add)(dev,
+ addr, i, pool);
+ pool_mask >>= 1;
+ pool++;
+ } while (pool_mask);
}
}
@@ -1051,8 +986,10 @@ rte_eth_dev_close(uint8_t port_id)
dev->data->dev_started = 0;
(*dev->dev_ops->dev_close)(dev);
+ dev->data->nb_rx_queues = 0;
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
+ dev->data->nb_tx_queues = 0;
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
}
@@ -1067,6 +1004,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
uint32_t mbp_buf_size;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1125,6 +1063,14 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
return -EINVAL;
}
+ rxq = dev->data->rx_queues;
+ if (rxq[rx_queue_id]) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
+ -ENOTSUP);
+ (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
+ rxq[rx_queue_id] = NULL;
+ }
+
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
@@ -1146,6 +1092,7 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ void **txq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
@@ -1178,6 +1125,14 @@ rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
return -EINVAL;
}
+ txq = dev->data->tx_queues;
+ if (txq[tx_queue_id]) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
+ -ENOTSUP);
+ (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
+ txq[tx_queue_id] = NULL;
+ }
+
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
@@ -1234,6 +1189,20 @@ rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
return ret;
}
+int
+rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ /* Validate Input Data. Bail if not valid or not supported. */
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
+
+ /* Call driver to free pending mbufs. */
+ return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
+ free_cnt);
+}
+
void
rte_eth_promiscuous_enable(uint8_t port_id)
{
@@ -1393,12 +1362,19 @@ get_xstats_count(uint8_t port_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (dev->dev_ops->xstats_get_names_by_id != NULL) {
+ count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
+ NULL, 0);
+ if (count < 0)
+ return count;
+ }
if (dev->dev_ops->xstats_get_names != NULL) {
count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
if (count < 0)
return count;
} else
count = 0;
+
count += RTE_NB_STATS;
count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
RTE_NB_RXQ_STATS;
@@ -1408,9 +1384,170 @@ get_xstats_count(uint8_t port_id)
}
int
+rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+ uint64_t *id)
+{
+ int cnt_xstats, idx_xstat;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ if (!id) {
+ RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
+ return -ENOMEM;
+ }
+
+ if (!xstat_name) {
+ RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
+ return -ENOMEM;
+ }
+
+ /* Get count */
+ cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
+ if (cnt_xstats < 0) {
+ RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
+ return -ENODEV;
+ }
+
+ /* Get id-name lookup table */
+ struct rte_eth_xstat_name xstats_names[cnt_xstats];
+
+ if (cnt_xstats != rte_eth_xstats_get_names_by_id(
+ port_id, xstats_names, cnt_xstats, NULL)) {
+ RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
+ return -1;
+ }
+
+ for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
+ if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
+ *id = idx_xstat;
+ return 0;
+ };
+ }
+
+ return -EINVAL;
+}
+
+int
+rte_eth_xstats_get_names_by_id(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names, unsigned int size,
+ uint64_t *ids)
+{
+ /* Get all xstats */
+ if (!ids) {
+ struct rte_eth_dev *dev;
+ int cnt_used_entries;
+ int cnt_expected_entries;
+ int cnt_driver_entries;
+ uint32_t idx, id_queue;
+ uint16_t num_q;
+
+ cnt_expected_entries = get_xstats_count(port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* port_id checked in get_xstats_count() */
+ dev = &rte_eth_devices[port_id];
+ cnt_used_entries = 0;
+
+ for (idx = 0; idx < RTE_NB_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "%s", rte_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ num_q = RTE_MIN(dev->data->nb_rx_queues,
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u%s",
+ id_queue,
+ rte_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+
+ }
+ num_q = RTE_MIN(dev->data->nb_tx_queues,
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "tx_q%u%s",
+ id_queue,
+ rte_txq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+
+ if (dev->dev_ops->xstats_get_names_by_id != NULL) {
+ /* If there are any driver-specific xstats, append them
+ * to end of list.
+ */
+ cnt_driver_entries =
+ (*dev->dev_ops->xstats_get_names_by_id)(
+ dev,
+ xstats_names + cnt_used_entries,
+ NULL,
+ size - cnt_used_entries);
+ if (cnt_driver_entries < 0)
+ return cnt_driver_entries;
+ cnt_used_entries += cnt_driver_entries;
+
+ } else if (dev->dev_ops->xstats_get_names != NULL) {
+ /* If there are any driver-specific xstats, append them
+ * to end of list.
+ */
+ cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
+ dev,
+ xstats_names + cnt_used_entries,
+ size - cnt_used_entries);
+ if (cnt_driver_entries < 0)
+ return cnt_driver_entries;
+ cnt_used_entries += cnt_driver_entries;
+ }
+
+ return cnt_used_entries;
+ }
+ /* Get only xstats given by IDS */
+ else {
+ uint16_t len, i;
+ struct rte_eth_xstat_name *xstats_names_copy;
+
+ len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
+
+ xstats_names_copy =
+ malloc(sizeof(struct rte_eth_xstat_name) * len);
+ if (!xstats_names_copy) {
+ RTE_PMD_DEBUG_TRACE(
+ "ERROR: can't allocate memory for values_copy\n");
+ free(xstats_names_copy);
+ return -1;
+ }
+
+ rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
+ len, NULL);
+
+ for (i = 0; i < size; i++) {
+ if (ids[i] >= len) {
+ RTE_PMD_DEBUG_TRACE(
+ "ERROR: id value isn't valid\n");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ free(xstats_names_copy);
+ return size;
+ }
+}
+
+int
rte_eth_xstats_get_names(uint8_t port_id,
struct rte_eth_xstat_name *xstats_names,
- unsigned size)
+ unsigned int size)
{
struct rte_eth_dev *dev;
int cnt_used_entries;
@@ -1474,13 +1611,139 @@ rte_eth_xstats_get_names(uint8_t port_id,
/* retrieve ethdev extended statistics */
int
+rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
+ unsigned int n)
+{
+ /* If need all xstats */
+ if (!ids) {
+ struct rte_eth_stats eth_stats;
+ struct rte_eth_dev *dev;
+ unsigned int count = 0, i, q;
+ signed int xcount = 0;
+ uint64_t val, *stats_ptr;
+ uint16_t nb_rxqs, nb_txqs;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ nb_rxqs = RTE_MIN(dev->data->nb_rx_queues,
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ nb_txqs = RTE_MIN(dev->data->nb_tx_queues,
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ /* Return generic statistics */
+ count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
+ (nb_txqs * RTE_NB_TXQ_STATS);
+
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get_by_id != NULL) {
+ /* Retrieve the xstats from the driver at the end of the
+ * xstats struct. Retrieve all xstats.
+ */
+ xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
+ NULL,
+ values ? values + count : NULL,
+ (n > count) ? n - count : 0);
+
+ if (xcount < 0)
+ return xcount;
+ /* implemented by the driver */
+ } else if (dev->dev_ops->xstats_get != NULL) {
+ /* Retrieve the xstats from the driver at the end of the
+ * xstats struct. Retrieve all xstats.
+ * Compatibility for PMD without xstats_get_by_ids
+ */
+ unsigned int size = (n > count) ? n - count : 1;
+ struct rte_eth_xstat xstats[size];
+
+ xcount = (*dev->dev_ops->xstats_get)(dev,
+ values ? xstats : NULL, size);
+
+ if (xcount < 0)
+ return xcount;
+
+ if (values != NULL)
+ for (i = 0 ; i < (unsigned int)xcount; i++)
+ values[i + count] = xstats[i].value;
+ }
+
+ if (n < count + xcount || values == NULL)
+ return count + xcount;
+
+ /* now fill the xstats structure */
+ count = 0;
+ rte_eth_stats_get(port_id, &eth_stats);
+
+ /* global stats */
+ for (i = 0; i < RTE_NB_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_stats_strings[i].offset);
+ val = *stats_ptr;
+ values[count++] = val;
+ }
+
+ /* per-rxq stats */
+ for (q = 0; q < nb_rxqs; q++) {
+ for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_rxq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ values[count++] = val;
+ }
+ }
+
+ /* per-txq stats */
+ for (q = 0; q < nb_txqs; q++) {
+ for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(&eth_stats,
+ rte_txq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ values[count++] = val;
+ }
+ }
+
+ return count + xcount;
+ }
+ /* Need only xstats given by IDS array */
+ else {
+ uint16_t i, size;
+ uint64_t *values_copy;
+
+ size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
+
+ values_copy = malloc(sizeof(*values_copy) * size);
+ if (!values_copy) {
+ RTE_PMD_DEBUG_TRACE(
+ "ERROR: can't allocate memory for values_copy\n");
+ return -1;
+ }
+
+ rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= size) {
+ RTE_PMD_DEBUG_TRACE(
+ "ERROR: id value isn't valid\n");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ free(values_copy);
+ return n;
+ }
+}
+
+int
rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
- unsigned n)
+ unsigned int n)
{
struct rte_eth_stats eth_stats;
struct rte_eth_dev *dev;
- unsigned count = 0, i, q;
- signed xcount = 0;
+ unsigned int count = 0, i, q;
+ signed int xcount = 0;
uint64_t val, *stats_ptr;
uint16_t nb_rxqs, nb_txqs;
@@ -1606,6 +1869,18 @@ rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
STAT_QMAP_RX);
}
+int
+rte_eth_dev_fw_version_get(uint8_t port_id, char *fw_version, size_t fw_size)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
+ return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
+}
+
void
rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
{
@@ -1625,7 +1900,6 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->pci_dev = dev->pci_dev;
dev_info->driver_name = dev->data->drv_name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
@@ -1883,13 +2157,7 @@ rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
if (!reta_conf)
return -EINVAL;
- if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
- RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
- RTE_RETA_GROUP_SIZE);
- return -EINVAL;
- }
-
- num = reta_size / RTE_RETA_GROUP_SIZE;
+ num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
for (i = 0; i < num; i++) {
if (reta_conf[i].mask)
return 0;
@@ -2101,6 +2369,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
struct rte_eth_dev *dev;
int index;
uint64_t pool_mask;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
@@ -2133,15 +2402,17 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
}
/* Update NIC */
- (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+ ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
- /* Update address in NIC data structure */
- ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+ if (ret == 0) {
+ /* Update address in NIC data structure */
+ ether_addr_copy(addr, &dev->data->mac_addrs[index]);
- /* Update pool bitmap in NIC data structure */
- dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ /* Update pool bitmap in NIC data structure */
+ dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ }
- return 0;
+ return ret;
}
int
@@ -2194,32 +2465,6 @@ rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
return 0;
}
-int
-rte_eth_dev_set_vf_rxmode(uint8_t port_id, uint16_t vf,
- uint16_t rx_mode, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs) {
- RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
- return -EINVAL;
- }
-
- if (rx_mode == 0) {
- RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
- return -EINVAL;
- }
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
-}
/*
* Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
@@ -2309,76 +2554,6 @@ rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
}
-int
-rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs) {
- RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
-}
-
-int
-rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
-{
- uint16_t num_vfs;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
-
- num_vfs = dev_info.max_vfs;
- if (vf > num_vfs) {
- RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
- return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
-}
-
-int
-rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
- uint64_t vf_mask, uint8_t vlan_on)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- if (vlan_id > ETHER_MAX_VLAN_ID) {
- RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
- vlan_id);
- return -EINVAL;
- }
-
- if (vf_mask == 0) {
- RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
- return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
- vf_mask, vlan_on);
-}
-
int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
uint16_t tx_rate)
{
@@ -2409,45 +2584,12 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
}
-int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
- uint64_t q_msk)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- struct rte_eth_link link;
-
- if (q_msk == 0)
- return 0;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- rte_eth_dev_info_get(port_id, &dev_info);
- link = dev->data->dev_link;
-
- if (vf > dev_info.max_vfs) {
- RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
- "invalid vf id=%d\n", port_id, vf);
- return -EINVAL;
- }
-
- if (tx_rate > link.link_speed) {
- RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
- "bigger than link speed= %d\n",
- tx_rate, link.link_speed);
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
- return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
-}
-
int
rte_eth_mirror_rule_set(uint8_t port_id,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (mirror_conf->rule_type == 0) {
@@ -2483,7 +2625,7 @@ rte_eth_mirror_rule_set(uint8_t port_id,
int
rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -2590,7 +2732,7 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
dev_cb = *cb_lst;
cb_lst->active = 1;
if (cb_arg != NULL)
- dev_cb.cb_arg = (void *) cb_arg;
+ dev_cb.cb_arg = cb_arg;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
@@ -2613,7 +2755,13 @@ rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- intr_handle = &dev->pci_dev->intr_handle;
+
+ if (!dev->intr_handle) {
+ RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
+ return -ENOTSUP;
+ }
+
+ intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
return -EPERM;
@@ -2641,7 +2789,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->drv_name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -2673,7 +2821,12 @@ rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
return -EINVAL;
}
- intr_handle = &dev->pci_dev->intr_handle;
+ if (!dev->intr_handle) {
+ RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
+ return -ENOTSUP;
+ }
+
+ intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
return -EPERM;
@@ -3266,26 +3419,6 @@ rte_eth_dev_get_dcb_info(uint8_t port_id,
return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
}
-void
-rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
-{
- if ((eth_dev == NULL) || (pci_dev == NULL)) {
- RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
- eth_dev, pci_dev);
- return;
- }
-
- eth_dev->data->dev_flags = 0;
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
- eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
- if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
- eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
-
- eth_dev->data->kdrv = pci_dev->kdrv;
- eth_dev->data->numa_node = pci_dev->device.numa_node;
- eth_dev->data->drv_name = pci_dev->driver->driver.name;
-}
-
int
rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 96781792..0f38b45f 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -179,9 +179,9 @@ extern "C" {
#include <rte_log.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_dev.h>
#include <rte_devargs.h>
+#include <rte_errno.h>
#include "rte_ether.h"
#include "rte_eth_ctrl.h"
#include "rte_dev_info.h"
@@ -564,7 +564,7 @@ struct rte_eth_rss_reta_entry64 {
/**
* This enum indicates the possible number of traffic classes
- * in DCB configratioins
+ * in DCB configurations
*/
enum rte_eth_nb_tcs {
ETH_4_TCS = 4, /**< 4 TCs with DCB. */
@@ -702,6 +702,29 @@ struct rte_eth_desc_lim {
uint16_t nb_max; /**< Max allowed number of descriptors. */
uint16_t nb_min; /**< Min allowed number of descriptors. */
uint16_t nb_align; /**< Number of descriptors should be aligned to. */
+
+ /**
+ * Max allowed number of segments per whole packet.
+ *
+ * - For TSO packet this is the total number of data descriptors allowed
+ * by device.
+ *
+ * @see nb_mtu_seg_max
+ */
+ uint16_t nb_seg_max;
+
+ /**
+ * Max number of segments per one MTU.
+ *
+ * - For non-TSO packet, this is the maximum allowed number of segments
+ * in a single transmit packet.
+ *
+ * - For TSO packet each segment within the TSO may span up to this
+ * value.
+ *
+ * @see nb_seg_max
+ */
+ uint16_t nb_mtu_seg_max;
};
/**
@@ -792,9 +815,11 @@ struct rte_eth_udp_tunnel {
*/
struct rte_intr_conf {
/** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
- uint16_t lsc;
+ uint32_t lsc:1;
/** enable/disable rxq interrupt. 0 (default) - disable, 1 enable */
- uint16_t rxq;
+ uint32_t rxq:1;
+ /** enable/disable rmv interrupt. 0 (default) - disable, 1 enable */
+ uint32_t rmv:1;
};
/**
@@ -857,6 +882,7 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_TCP_LRO 0x00000010
#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020
#define DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM 0x00000040
+#define DEV_RX_OFFLOAD_MACSEC_STRIP 0x00000080
/**
* TX offload capabilities of a device.
@@ -874,6 +900,9 @@ struct rte_eth_conf {
#define DEV_TX_OFFLOAD_GRE_TNL_TSO 0x00000400 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_IPIP_TNL_TSO 0x00000800 /**< Used for tunneling packet. */
#define DEV_TX_OFFLOAD_GENEVE_TNL_TSO 0x00001000 /**< Used for tunneling packet. */
+#define DEV_TX_OFFLOAD_MACSEC_INSERT 0x00002000
+
+struct rte_pci_device;
/**
* Ethernet device information
@@ -938,23 +967,26 @@ struct rte_eth_txq_info {
/**
* An Ethernet device extended statistic structure
*
- * This structure is used by ethdev->eth_xstats_get() to provide
- * statistics that are not provided in the generic rte_eth_stats
+ * This structure is used by rte_eth_xstats_get() to provide
+ * statistics that are not provided in the generic *rte_eth_stats*
* structure.
+ * It maps a name id, corresponding to an index in the array returned
+ * by rte_eth_xstats_get_names(), to a statistic value.
*/
struct rte_eth_xstat {
- uint64_t id;
- uint64_t value;
+ uint64_t id; /**< The index in xstats name array. */
+ uint64_t value; /**< The statistic counter value. */
};
/**
- * A name-key lookup element for extended statistics.
+ * A name element for extended statistics.
*
- * This structure is used to map between names and ID numbers
- * for extended ethernet statistics.
+ * An array of this structure is returned by rte_eth_xstats_get_names().
+ * It lists the names of extended statistics for a PMD. The *rte_eth_xstat*
+ * structure references these names by their array index.
*/
struct rte_eth_xstat_name {
- char name[RTE_ETH_XSTATS_NAME_SIZE];
+ char name[RTE_ETH_XSTATS_NAME_SIZE]; /**< The statistic name. */
};
#define ETH_DCB_NUM_TCS 8
@@ -1001,15 +1033,6 @@ struct rte_eth_dev_callback;
/** @internal Structure to keep track of registered callbacks */
TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-#define RTE_PMD_DEBUG_TRACE(...) \
- rte_pmd_debug_trace(__func__, __VA_ARGS__)
-#else
-#define RTE_PMD_DEBUG_TRACE(...)
-#endif
-
-
/* Macros to check for valid port */
#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
if (!rte_eth_dev_is_valid_port(port_id)) { \
@@ -1089,6 +1112,12 @@ typedef int (*eth_xstats_get_t)(struct rte_eth_dev *dev,
struct rte_eth_xstat *stats, unsigned n);
/**< @internal Get extended stats of an Ethernet device. */
+typedef int (*eth_xstats_get_by_id_t)(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ uint64_t *values,
+ unsigned int n);
+/**< @internal Get extended stats of an Ethernet device. */
+
typedef void (*eth_xstats_reset_t)(struct rte_eth_dev *dev);
/**< @internal Reset extended stats of an Ethernet device. */
@@ -1096,6 +1125,11 @@ typedef int (*eth_xstats_get_names_t)(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned size);
/**< @internal Get names of extended stats of an Ethernet device. */
+typedef int (*eth_xstats_get_names_by_id_t)(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int size);
+/**< @internal Get names of extended stats of an Ethernet device. */
+
typedef int (*eth_queue_stats_mapping_set_t)(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
@@ -1145,11 +1179,24 @@ typedef void (*eth_queue_release_t)(void *queue);
typedef uint32_t (*eth_rx_queue_count_t)(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-/**< @internal Get number of available descriptors on a receive queue of an Ethernet device. */
+/**< @internal Get number of used descriptors on a receive queue. */
typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
/**< @internal Check DD bit of specific RX descriptor */
+typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
+/**< @internal Check the status of a Rx descriptor */
+
+typedef int (*eth_tx_descriptor_status_t)(void *txq, uint16_t offset);
+/**< @internal Check the status of a Tx descriptor */
+
+typedef int (*eth_fw_version_get_t)(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
+/**< @internal Get firmware information of an Ethernet device. */
+
+typedef int (*eth_tx_done_cleanup_t)(void *txq, uint32_t free_cnt);
+/**< @internal Force mbufs to be from TX ring. */
+
typedef void (*eth_rxq_info_get_t)(struct rte_eth_dev *dev,
uint16_t rx_queue_id, struct rte_eth_rxq_info *qinfo);
@@ -1191,6 +1238,11 @@ typedef uint16_t (*eth_tx_burst_t)(void *txq,
uint16_t nb_pkts);
/**< @internal Send output packets on a transmit queue of an Ethernet device. */
+typedef uint16_t (*eth_tx_prep_t)(void *txq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+/**< @internal Prepare output packets on a transmit queue of an Ethernet device. */
+
typedef int (*flow_ctrl_get_t)(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
/**< @internal Get current flow control parameter on an Ethernet device */
@@ -1230,7 +1282,7 @@ typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
/**< @internal Remove MAC address from receive address register */
-typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
+typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index,
uint32_t vmdq);
@@ -1249,39 +1301,11 @@ typedef int (*eth_uc_all_hash_table_set_t)(struct rte_eth_dev *dev,
uint8_t on);
/**< @internal Set all Unicast Hash bitmap */
-typedef int (*eth_set_vf_rx_mode_t)(struct rte_eth_dev *dev,
- uint16_t vf,
- uint16_t rx_mode,
- uint8_t on);
-/**< @internal Set a VF receive mode */
-
-typedef int (*eth_set_vf_rx_t)(struct rte_eth_dev *dev,
- uint16_t vf,
- uint8_t on);
-/**< @internal Set a VF receive mode */
-
-typedef int (*eth_set_vf_tx_t)(struct rte_eth_dev *dev,
- uint16_t vf,
- uint8_t on);
-/**< @internal Enable or disable a VF transmit */
-
-typedef int (*eth_set_vf_vlan_filter_t)(struct rte_eth_dev *dev,
- uint16_t vlan,
- uint64_t vf_mask,
- uint8_t vlan_on);
-/**< @internal Set VF VLAN pool filter */
-
typedef int (*eth_set_queue_rate_limit_t)(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t tx_rate);
/**< @internal Set queue TX rate */
-typedef int (*eth_set_vf_rate_limit_t)(struct rte_eth_dev *dev,
- uint16_t vf,
- uint16_t tx_rate,
- uint64_t q_msk);
-/**< @internal Set VF TX rate */
-
typedef int (*eth_mirror_rule_set_t)(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id,
@@ -1431,11 +1455,18 @@ struct eth_dev_ops {
eth_dev_set_link_up_t dev_set_link_up; /**< Device link up. */
eth_dev_set_link_down_t dev_set_link_down; /**< Device link down. */
eth_dev_close_t dev_close; /**< Close device. */
+ eth_link_update_t link_update; /**< Get device link state. */
+
eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */
eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */
eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */
eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */
- eth_link_update_t link_update; /**< Get device link state. */
+ eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address. */
+ eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address. */
+ eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address. */
+ eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs. */
+ mtu_set_t mtu_set; /**< Set MTU. */
+
eth_stats_get_t stats_get; /**< Get generic device statistics. */
eth_stats_reset_t stats_reset; /**< Reset generic device statistics. */
eth_xstats_get_t xstats_get; /**< Get extended device statistics. */
@@ -1444,109 +1475,104 @@ struct eth_dev_ops {
/**< Get names of extended statistics. */
eth_queue_stats_mapping_set_t queue_stats_mapping_set;
/**< Configure per queue stat counter mapping. */
+
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
+ eth_rxq_info_get_t rxq_info_get; /**< retrieve RX queue information. */
+ eth_txq_info_get_t txq_info_get; /**< retrieve TX queue information. */
+ eth_fw_version_get_t fw_version_get; /**< Get firmware version. */
eth_dev_supported_ptypes_get_t dev_supported_ptypes_get;
- /**< Get packet types supported and identified by device*/
- mtu_set_t mtu_set; /**< Set MTU. */
- vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
- vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
+ /**< Get packet types supported and identified by device. */
+
+ vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
+ vlan_tpid_set_t vlan_tpid_set; /**< Outer/Inner VLAN TPID Setup. */
vlan_strip_queue_set_t vlan_strip_queue_set; /**< VLAN Stripping on queue. */
vlan_offload_set_t vlan_offload_set; /**< Set VLAN Offload. */
- vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion */
- eth_queue_start_t rx_queue_start;/**< Start RX for a queue.*/
- eth_queue_stop_t rx_queue_stop;/**< Stop RX for a queue.*/
- eth_queue_start_t tx_queue_start;/**< Start TX for a queue.*/
- eth_queue_stop_t tx_queue_stop;/**< Stop TX for a queue.*/
- eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue.*/
- eth_queue_release_t rx_queue_release;/**< Release RX queue.*/
- eth_rx_queue_count_t rx_queue_count; /**< Get Rx queue count. */
- eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit */
- /**< Enable Rx queue interrupt. */
- eth_rx_enable_intr_t rx_queue_intr_enable;
- /**< Disable Rx queue interrupt.*/
- eth_rx_disable_intr_t rx_queue_intr_disable;
- eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/
- eth_queue_release_t tx_queue_release;/**< Release TX queue.*/
+ vlan_pvid_set_t vlan_pvid_set; /**< Set port based TX VLAN insertion. */
+
+ eth_queue_start_t rx_queue_start;/**< Start RX for a queue. */
+ eth_queue_stop_t rx_queue_stop; /**< Stop RX for a queue. */
+ eth_queue_start_t tx_queue_start;/**< Start TX for a queue. */
+ eth_queue_stop_t tx_queue_stop; /**< Stop TX for a queue. */
+ eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue. */
+ eth_queue_release_t rx_queue_release; /**< Release RX queue. */
+ eth_rx_queue_count_t rx_queue_count;
+ /**< Get the number of used RX descriptors. */
+ eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
+ eth_rx_descriptor_status_t rx_descriptor_status;
+ /**< Check the status of a Rx descriptor. */
+ eth_tx_descriptor_status_t tx_descriptor_status;
+ /**< Check the status of a Tx descriptor. */
+ eth_rx_enable_intr_t rx_queue_intr_enable; /**< Enable Rx queue interrupt. */
+ eth_rx_disable_intr_t rx_queue_intr_disable; /**< Disable Rx queue interrupt. */
+ eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue. */
+ eth_queue_release_t tx_queue_release; /**< Release TX queue. */
+ eth_tx_done_cleanup_t tx_done_cleanup;/**< Free tx ring mbufs */
+
eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
eth_dev_led_off_t dev_led_off; /**< Turn off LED. */
+
flow_ctrl_get_t flow_ctrl_get; /**< Get flow control. */
flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */
- priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control.*/
- eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address */
- eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address */
- eth_mac_addr_set_t mac_addr_set; /**< Set a MAC address */
- eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array */
- eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap */
- eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule.*/
- eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule.*/
- eth_set_vf_rx_mode_t set_vf_rx_mode; /**< Set VF RX mode */
- eth_set_vf_rx_t set_vf_rx; /**< enable/disable a VF receive */
- eth_set_vf_tx_t set_vf_tx; /**< enable/disable a VF transmit */
- eth_set_vf_vlan_filter_t set_vf_vlan_filter; /**< Set VF VLAN filter */
- /** Add UDP tunnel port. */
- eth_udp_tunnel_port_add_t udp_tunnel_port_add;
- /** Del UDP tunnel port. */
- eth_udp_tunnel_port_del_t udp_tunnel_port_del;
- eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit */
- eth_set_vf_rate_limit_t set_vf_rate_limit; /**< Set VF rate limit */
- /** Update redirection table. */
- reta_update_t reta_update;
- /** Query redirection table. */
- reta_query_t reta_query;
-
- eth_get_reg_t get_reg;
- /**< Get registers */
- eth_get_eeprom_length_t get_eeprom_length;
- /**< Get eeprom length */
- eth_get_eeprom_t get_eeprom;
- /**< Get eeprom data */
- eth_set_eeprom_t set_eeprom;
- /**< Set eeprom */
- /* bypass control */
+ priority_flow_ctrl_set_t priority_flow_ctrl_set; /**< Setup priority flow control. */
+
+ eth_uc_hash_table_set_t uc_hash_table_set; /**< Set Unicast Table Array. */
+ eth_uc_all_hash_table_set_t uc_all_hash_table_set; /**< Set Unicast hash bitmap. */
+
+ eth_mirror_rule_set_t mirror_rule_set; /**< Add a traffic mirror rule. */
+ eth_mirror_rule_reset_t mirror_rule_reset; /**< reset a traffic mirror rule. */
+
+ eth_udp_tunnel_port_add_t udp_tunnel_port_add; /** Add UDP tunnel port. */
+ eth_udp_tunnel_port_del_t udp_tunnel_port_del; /** Del UDP tunnel port. */
+ eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
+ /** Config ether type of l2 tunnel. */
+ eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
+ /** Enable/disable l2 tunnel offload functions. */
+
+ eth_set_queue_rate_limit_t set_queue_rate_limit; /**< Set queue rate limit. */
+
+ rss_hash_update_t rss_hash_update; /** Configure RSS hash protocols. */
+ rss_hash_conf_get_t rss_hash_conf_get; /** Get current RSS hash configuration. */
+ reta_update_t reta_update; /** Update redirection table. */
+ reta_query_t reta_query; /** Query redirection table. */
+
+ eth_get_reg_t get_reg; /**< Get registers. */
+ eth_get_eeprom_length_t get_eeprom_length; /**< Get eeprom length. */
+ eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
+ eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
+
+ /* bypass control */
#ifdef RTE_NIC_BYPASS
- bypass_init_t bypass_init;
- bypass_state_set_t bypass_state_set;
- bypass_state_show_t bypass_state_show;
- bypass_event_set_t bypass_event_set;
- bypass_event_show_t bypass_event_show;
- bypass_wd_timeout_set_t bypass_wd_timeout_set;
- bypass_wd_timeout_show_t bypass_wd_timeout_show;
- bypass_ver_show_t bypass_ver_show;
- bypass_wd_reset_t bypass_wd_reset;
+ bypass_init_t bypass_init;
+ bypass_state_set_t bypass_state_set;
+ bypass_state_show_t bypass_state_show;
+ bypass_event_set_t bypass_event_set;
+ bypass_event_show_t bypass_event_show;
+ bypass_wd_timeout_set_t bypass_wd_timeout_set;
+ bypass_wd_timeout_show_t bypass_wd_timeout_show;
+ bypass_ver_show_t bypass_ver_show;
+ bypass_wd_reset_t bypass_wd_reset;
#endif
- /** Configure RSS hash protocols. */
- rss_hash_update_t rss_hash_update;
- /** Get current RSS hash configuration. */
- rss_hash_conf_get_t rss_hash_conf_get;
- eth_filter_ctrl_t filter_ctrl;
- /**< common filter control. */
- eth_set_mc_addr_list_t set_mc_addr_list; /**< set list of mcast addrs */
- eth_rxq_info_get_t rxq_info_get;
- /**< retrieve RX queue information. */
- eth_txq_info_get_t txq_info_get;
- /**< retrieve TX queue information. */
+ eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
+
+ eth_get_dcb_info get_dcb_info; /** Get DCB information. */
+
+ eth_timesync_enable_t timesync_enable;
/** Turn IEEE1588/802.1AS timestamping on. */
- eth_timesync_enable_t timesync_enable;
+ eth_timesync_disable_t timesync_disable;
/** Turn IEEE1588/802.1AS timestamping off. */
- eth_timesync_disable_t timesync_disable;
- /** Read the IEEE1588/802.1AS RX timestamp. */
eth_timesync_read_rx_timestamp_t timesync_read_rx_timestamp;
- /** Read the IEEE1588/802.1AS TX timestamp. */
+ /** Read the IEEE1588/802.1AS RX timestamp. */
eth_timesync_read_tx_timestamp_t timesync_read_tx_timestamp;
-
- /** Get DCB information */
- eth_get_dcb_info get_dcb_info;
- /** Adjust the device clock.*/
- eth_timesync_adjust_time timesync_adjust_time;
- /** Get the device clock time. */
- eth_timesync_read_time timesync_read_time;
- /** Set the device clock time. */
- eth_timesync_write_time timesync_write_time;
- /** Config ether type of l2 tunnel */
- eth_l2_tunnel_eth_type_conf_t l2_tunnel_eth_type_conf;
- /** Enable/disable l2 tunnel offload functions */
- eth_l2_tunnel_offload_set_t l2_tunnel_offload_set;
+ /** Read the IEEE1588/802.1AS TX timestamp. */
+ eth_timesync_adjust_time timesync_adjust_time; /** Adjust the device clock. */
+ eth_timesync_read_time timesync_read_time; /** Get the device clock time. */
+ eth_timesync_write_time timesync_write_time; /** Set the device clock time. */
+
+ eth_xstats_get_by_id_t xstats_get_by_id;
+ /**< Get extended device statistic values by ID. */
+ eth_xstats_get_names_by_id_t xstats_get_names_by_id;
+ /**< Get name of extended device statistics by ID. */
};
/**
@@ -1613,6 +1639,14 @@ struct rte_eth_rxtx_callback {
};
/**
+ * A set of values to describe the possible states of an eth device.
+ */
+enum rte_eth_dev_state {
+ RTE_ETH_DEV_UNUSED = 0,
+ RTE_ETH_DEV_ATTACHED,
+};
+
+/**
* @internal
* The generic data structure associated with each ethernet device.
*
@@ -1625,10 +1659,11 @@ struct rte_eth_rxtx_callback {
struct rte_eth_dev {
eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
+ eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
struct rte_eth_dev_data *data; /**< Pointer to device data */
- const struct eth_driver *driver;/**< Driver for this device */
const struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */
- struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
+ struct rte_device *device; /**< Backing device */
+ struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
/** User application callbacks for NIC interrupts */
struct rte_eth_dev_cb_list link_intr_cbs;
/**
@@ -1641,7 +1676,7 @@ struct rte_eth_dev {
* received packets before passing them to the driver for transmission.
*/
struct rte_eth_rxtx_callback *pre_tx_burst_cbs[RTE_MAX_QUEUES_PER_PORT];
- uint8_t attached; /**< Flag indicating the port is attached */
+ enum rte_eth_dev_state state; /**< Flag indicating the port state */
} __rte_cache_aligned;
struct rte_eth_dev_sriov {
@@ -1711,6 +1746,8 @@ struct rte_eth_dev_data {
#define RTE_ETH_DEV_INTR_LSC 0x0002
/** Device is a bonded slave */
#define RTE_ETH_DEV_BONDED_SLAVE 0x0004
+/** Device supports device removal interrupt */
+#define RTE_ETH_DEV_INTR_RMV 0x0008
/**
* @internal
@@ -1720,6 +1757,25 @@ struct rte_eth_dev_data {
extern struct rte_eth_dev rte_eth_devices[];
/**
+ * Iterates over valid ethdev ports.
+ *
+ * @param port_id
+ * The id of the next possible valid port.
+ * @return
+ * Next valid port id, RTE_MAX_ETHPORTS if there is none.
+ */
+uint8_t rte_eth_find_next(uint8_t port_id);
+
+/**
+ * Macro to iterate over all enabled ethdev ports.
+ */
+#define RTE_ETH_FOREACH_DEV(p) \
+ for (p = rte_eth_find_next(0); \
+ (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
+ p = rte_eth_find_next(p + 1))
+
+
+/**
* Get the total number of Ethernet devices that have been successfully
* initialized by the [matching] Ethernet driver during the PCI probing phase.
* All devices whose port identifier is in the range
@@ -1727,7 +1783,7 @@ extern struct rte_eth_dev rte_eth_devices[];
* immediately after invoking rte_eal_init().
* If the application unplugs a port using hotplug function, The enabled port
* numbers may be noncontiguous. In the case, the applications need to manage
- * enabled port by themselves.
+ * enabled port by using the ``RTE_ETH_FOREACH_DEV()`` macro.
*
* @return
* - The total number of usable Ethernet devices.
@@ -1759,6 +1815,19 @@ struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
/**
* @internal
+ * Attach to the ethdev already initialized by the primary
+ * process.
+ *
+ * @param name Ethernet device's name.
+ * @return
+ * - Success: Slot in the rte_dev_devices array for attached
+ * device.
+ * - Error: Null pointer.
+ */
+struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
+
+/**
+ * @internal
* Release the specified ethdev port.
*
* @param eth_dev
@@ -1769,7 +1838,7 @@ struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
/**
- * Attach a new Ethernet device specified by aruguments.
+ * Attach a new Ethernet device specified by arguments.
*
* @param devargs
* A pointer to a strings array describing the new device
@@ -1796,78 +1865,6 @@ int rte_eth_dev_attach(const char *devargs, uint8_t *port_id);
*/
int rte_eth_dev_detach(uint8_t port_id, char *devname);
-struct eth_driver;
-/**
- * @internal
- * Initialization function of an Ethernet driver invoked for each matching
- * Ethernet PCI device detected during the PCI probing phase.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- * associated with the matching device and which have been [automatically]
- * allocated in the *rte_eth_devices* array.
- * The *eth_dev* structure is supplied to the driver initialization function
- * with the following fields already initialized:
- *
- * - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which
- * contains the generic PCI information of the matching device.
- *
- * - *driver*: Holds the pointer to the *eth_driver* structure.
- *
- * - *dev_private*: Holds a pointer to the device private data structure.
- *
- * - *mtu*: Contains the default Ethernet maximum frame length (1500).
- *
- * - *port_id*: Contains the port index of the device (actually the index
- * of the *eth_dev* structure in the *rte_eth_devices* array).
- *
- * @return
- * - 0: Success, the device is properly initialized by the driver.
- * In particular, the driver MUST have set up the *dev_ops* pointer
- * of the *eth_dev* structure.
- * - <0: Error code of the device initialization failure.
- */
-typedef int (*eth_dev_init_t)(struct rte_eth_dev *eth_dev);
-
-/**
- * @internal
- * Finalization function of an Ethernet driver invoked for each matching
- * Ethernet PCI device detected during the PCI closing phase.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- * associated with the matching device and which have been [automatically]
- * allocated in the *rte_eth_devices* array.
- * @return
- * - 0: Success, the device is properly finalized by the driver.
- * In particular, the driver MUST free the *dev_ops* pointer
- * of the *eth_dev* structure.
- * - <0: Error code of the device initialization failure.
- */
-typedef int (*eth_dev_uninit_t)(struct rte_eth_dev *eth_dev);
-
-/**
- * @internal
- * The structure associated with a PMD Ethernet driver.
- *
- * Each Ethernet driver acts as a PCI driver and is represented by a generic
- * *eth_driver* structure that holds:
- *
- * - An *rte_pci_driver* structure (which must be the first field).
- *
- * - The *eth_dev_init* function invoked for each matching PCI device.
- *
- * - The *eth_dev_uninit* function invoked for each matching PCI device.
- *
- * - The size of the private data to allocate for each matching device.
- */
-struct eth_driver {
- struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
- eth_dev_init_t eth_dev_init; /**< Device init function. */
- eth_dev_uninit_t eth_dev_uninit; /**< Device uninit function. */
- unsigned int dev_private_size; /**< Size of device private data. */
-};
-
/**
* Convert a numerical speed in Mbps to a bitmap flag that can be used in
* the bitmap link_speeds of the struct rte_eth_conf
@@ -1914,6 +1911,19 @@ int rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_queue,
uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf);
/**
+ * @internal
+ * Release device queues and clear its configuration to force the user
+ * application to reconfigure it. It is for internal use only.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * void
+ */
+void _rte_eth_dev_reset(struct rte_eth_dev *dev);
+
+/**
* Allocate and set up a receive queue for an Ethernet device.
*
* The function allocates a contiguous block of memory for *nb_rx_desc*
@@ -2272,22 +2282,23 @@ void rte_eth_stats_reset(uint8_t port_id);
* @param port_id
* The port identifier of the Ethernet device.
* @param xstats_names
- * Block of memory to insert names into. Must be at least size in capacity.
- * If set to NULL, function returns required capacity.
+ * An rte_eth_xstat_name array of at least *size* elements to
+ * be filled. If set to NULL, the function returns the required number
+ * of elements.
* @param size
- * Capacity of xstats_names (number of names).
+ * The size of the xstats_names array (number of elements).
* @return
- * - positive value lower or equal to size: success. The return value
+ * - A positive value lower or equal to size: success. The return value
* is the number of entries filled in the stats table.
- * - positive value higher than size: error, the given statistics table
+ * - A positive value higher than size: error, the given statistics table
* is too small. The return value corresponds to the size that should
* be given to succeed. The entries in the table are not valid and
* shall not be used by the caller.
- * - negative value on error (invalid port id)
+ * - A negative value on error (invalid port id).
*/
int rte_eth_xstats_get_names(uint8_t port_id,
struct rte_eth_xstat_name *xstats_names,
- unsigned size);
+ unsigned int size);
/**
* Retrieve extended statistics of an Ethernet device.
@@ -2296,22 +2307,96 @@ int rte_eth_xstats_get_names(uint8_t port_id,
* The port identifier of the Ethernet device.
* @param xstats
* A pointer to a table of structure of type *rte_eth_xstat*
- * to be filled with device statistics ids and values.
+ * to be filled with device statistics ids and values: id is the
+ * index of the name string in xstats_names (see rte_eth_xstats_get_names()),
+ * and value is the statistic counter.
* This parameter can be set to NULL if n is 0.
* @param n
- * The size of the stats table, which should be large enough to store
- * all the statistics of the device.
+ * The size of the xstats array (number of elements).
* @return
- * - positive value lower or equal to n: success. The return value
+ * - A positive value lower or equal to n: success. The return value
* is the number of entries filled in the stats table.
- * - positive value higher than n: error, the given statistics table
+ * - A positive value higher than n: error, the given statistics table
* is too small. The return value corresponds to the size that should
* be given to succeed. The entries in the table are not valid and
* shall not be used by the caller.
- * - negative value on error (invalid port id)
+ * - A negative value on error (invalid port id).
*/
int rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
- unsigned n);
+ unsigned int n);
+
+/**
+ * Retrieve names of extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param xstats_names
+ * An rte_eth_xstat_name array of at least *size* elements to
+ * be filled. If set to NULL, the function returns the required number
+ * of elements.
+ * @param ids
+ * IDs array given by app to retrieve specific statistics
+ * @param size
+ * The size of the xstats_names array (number of elements).
+ * @return
+ * - A positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - A positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - A negative value on error (invalid port id).
+ */
+int
+rte_eth_xstats_get_names_by_id(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names, unsigned int size,
+ uint64_t *ids);
+
+/**
+ * Retrieve extended statistics of an Ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param ids
+ * A pointer to an ids array passed by application. This tells wich
+ * statistics values function should retrieve. This parameter
+ * can be set to NULL if n is 0. In this case function will retrieve
+ * all avalible statistics.
+ * @param values
+ * A pointer to a table to be filled with device statistics values.
+ * @param n
+ * The size of the ids array (number of elements).
+ * @return
+ * - A positive value lower or equal to n: success. The return value
+ * is the number of entries filled in the stats table.
+ * - A positive value higher than n: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - A negative value on error (invalid port id).
+ */
+int rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids,
+ uint64_t *values, unsigned int n);
+
+/**
+ * Gets the ID of a statistic from its name.
+ *
+ * This function searches for the statistics using string compares, and
+ * as such should not be used on the fast-path. For fast-path retrieval of
+ * specific statistics, store the ID as provided in *id* from this function,
+ * and pass the ID to rte_eth_xstats_get()
+ *
+ * @param port_id The port to look up statistics from
+ * @param xstat_name The name of the statistic to return
+ * @param[out] id A pointer to an app-supplied uint64_t which should be
+ * set to the ID of the stat if the stat exists.
+ * @return
+ * 0 on success
+ * -ENODEV for invalid port_id,
+ * -EINVAL if the xstat_name doesn't exist in port_id
+ */
+int rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name,
+ uint64_t *id);
/**
* Reset extended statistics of an Ethernet device.
@@ -2385,6 +2470,27 @@ void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
/**
+ * Retrieve the firmware version of a device.
+ *
+ * @param port_id
+ * The port identifier of the device.
+ * @param fw_version
+ * A pointer to a string array storing the firmware version of a device,
+ * the string includes terminating null. This pointer is allocated by caller.
+ * @param fw_size
+ * The size of the string array pointed by fw_version, which should be
+ * large enough to store firmware version of the device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if operation is not supported.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (>0) if *fw_size* is not enough to store firmware version, return
+ * the size of the non truncated string.
+ */
+int rte_eth_dev_fw_version_get(uint8_t port_id,
+ char *fw_version, size_t fw_size);
+
+/**
* Retrieve the supported packet types of an Ethernet device.
*
* When a packet type is announced as supported, it *must* be recognized by
@@ -2413,7 +2519,7 @@ void rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info);
* @param ptype_mask
* A hint of what kind of packet type which the caller is interested in.
* @param ptypes
- * An array pointer to store adequent packet types, allocated by caller.
+ * An array pointer to store adequate packet types, allocated by caller.
* @param num
* Size of the array pointed by param ptypes.
* @return
@@ -2553,12 +2659,12 @@ int rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask);
int rte_eth_dev_get_vlan_offload(uint8_t port_id);
/**
- * Set port based TX VLAN insersion on or off.
+ * Set port based TX VLAN insertion on or off.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param pvid
- * Port based TX VLAN identifier togeth with user priority.
+ * Port based TX VLAN identifier together with user priority.
* @param on
* Turn on or off the port based TX VLAN insertion.
*
@@ -2615,7 +2721,7 @@ int rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on);
* method to retrieve bursts of received packets and to immediately
* queue them for further parallel processing by another logical core,
* for instance. However, instead of having received packets being
- * individually queued by the driver, this approach allows the invoker
+ * individually queued by the driver, this approach allows the caller
* of the rte_eth_rx_burst() function to queue a burst of retrieved
* packets at a time and therefore dramatically reduce the cost of
* enqueue/dequeue operations per packet.
@@ -2684,7 +2790,7 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
}
/**
- * Get the number of used descriptors in a specific queue
+ * Get the number of used descriptors of a rx queue
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -2692,16 +2798,21 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
* The queue id on the specific port.
* @return
* The number of used descriptors in the specific queue, or:
- * (-EINVAL) if *port_id* is invalid
+ * (-EINVAL) if *port_id* or *queue_id* is invalid
* (-ENOTSUP) if the device does not support this function
*/
static inline int
rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct rte_eth_dev *dev;
+
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ if (queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
/**
@@ -2729,6 +2840,121 @@ rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
dev->data->rx_queues[queue_id], offset);
}
+#define RTE_ETH_RX_DESC_AVAIL 0 /**< Desc available for hw. */
+#define RTE_ETH_RX_DESC_DONE 1 /**< Desc done, filled by hw. */
+#define RTE_ETH_RX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
+
+/**
+ * Check the status of a Rx descriptor in the queue
+ *
+ * It should be called in a similar context than the Rx function:
+ * - on a dataplane core
+ * - not concurrently on the same queue
+ *
+ * Since it's a dataplane function, no check is performed on port_id and
+ * queue_id. The caller must therefore ensure that the port is enabled
+ * and the queue is configured and running.
+ *
+ * Note: accessing to a random descriptor in the ring may trigger cache
+ * misses and have a performance impact.
+ *
+ * @param port_id
+ * A valid port identifier of the Ethernet device which.
+ * @param queue_id
+ * A valid Rx queue identifier on this port.
+ * @param offset
+ * The offset of the descriptor starting from tail (0 is the next
+ * packet to be received by the driver).
+ *
+ * @return
+ * - (RTE_ETH_RX_DESC_AVAIL): Descriptor is available for the hardware to
+ * receive a packet.
+ * - (RTE_ETH_RX_DESC_DONE): Descriptor is done, it is filled by hw, but
+ * not yet processed by the driver (i.e. in the receive queue).
+ * - (RTE_ETH_RX_DESC_UNAVAIL): Descriptor is unavailable, either hold by
+ * the driver and not yet returned to hw, or reserved by the hw.
+ * - (-EINVAL) bad descriptor offset.
+ * - (-ENOTSUP) if the device does not support this function.
+ * - (-ENODEV) bad port or queue (only if compiled with debug).
+ */
+static inline int
+rte_eth_rx_descriptor_status(uint8_t port_id, uint16_t queue_id,
+ uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+ void *rxq;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_rx_queues)
+ return -ENODEV;
+#endif
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_status, -ENOTSUP);
+ rxq = dev->data->rx_queues[queue_id];
+
+ return (*dev->dev_ops->rx_descriptor_status)(rxq, offset);
+}
+
+#define RTE_ETH_TX_DESC_FULL 0 /**< Desc filled for hw, waiting xmit. */
+#define RTE_ETH_TX_DESC_DONE 1 /**< Desc done, packet is transmitted. */
+#define RTE_ETH_TX_DESC_UNAVAIL 2 /**< Desc used by driver or hw. */
+
+/**
+ * Check the status of a Tx descriptor in the queue.
+ *
+ * It should be called in a similar context than the Tx function:
+ * - on a dataplane core
+ * - not concurrently on the same queue
+ *
+ * Since it's a dataplane function, no check is performed on port_id and
+ * queue_id. The caller must therefore ensure that the port is enabled
+ * and the queue is configured and running.
+ *
+ * Note: accessing to a random descriptor in the ring may trigger cache
+ * misses and have a performance impact.
+ *
+ * @param port_id
+ * A valid port identifier of the Ethernet device which.
+ * @param queue_id
+ * A valid Tx queue identifier on this port.
+ * @param offset
+ * The offset of the descriptor starting from tail (0 is the place where
+ * the next packet will be send).
+ *
+ * @return
+ * - (RTE_ETH_TX_DESC_FULL) Descriptor is being processed by the hw, i.e.
+ * in the transmit queue.
+ * - (RTE_ETH_TX_DESC_DONE) Hardware is done with this descriptor, it can
+ * be reused by the driver.
+ * - (RTE_ETH_TX_DESC_UNAVAIL): Descriptor is unavailable, reserved by the
+ * driver or the hardware.
+ * - (-EINVAL) bad descriptor offset.
+ * - (-ENOTSUP) if the device does not support this function.
+ * - (-ENODEV) bad port or queue (only if compiled with debug).
+ */
+static inline int rte_eth_tx_descriptor_status(uint8_t port_id,
+ uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+ void *txq;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+#endif
+ dev = &rte_eth_devices[port_id];
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_tx_queues)
+ return -ENODEV;
+#endif
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_descriptor_status, -ENOTSUP);
+ txq = dev->data->tx_queues[queue_id];
+
+ return (*dev->dev_ops->tx_descriptor_status)(txq, offset);
+}
+
/**
* Send a burst of output packets on a transmit queue of an Ethernet device.
*
@@ -2819,6 +3045,115 @@ rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
}
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Process a burst of output packets on a transmit queue of an Ethernet device.
+ *
+ * The rte_eth_tx_prepare() function is invoked to prepare output packets to be
+ * transmitted on the output queue *queue_id* of the Ethernet device designated
+ * by its *port_id*.
+ * The *nb_pkts* parameter is the number of packets to be prepared which are
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures, each of them
+ * allocated from a pool created with rte_pktmbuf_pool_create().
+ * For each packet to send, the rte_eth_tx_prepare() function performs
+ * the following operations:
+ *
+ * - Check if packet meets devices requirements for tx offloads.
+ *
+ * - Check limitations about number of segments.
+ *
+ * - Check additional requirements when debug is enabled.
+ *
+ * - Update and/or reset required checksums when tx offload is set for packet.
+ *
+ * Since this function can modify packet data, provided mbufs must be safely
+ * writable (e.g. modified data cannot be in shared segment).
+ *
+ * The rte_eth_tx_prepare() function returns the number of packets ready to be
+ * sent. A return value equal to *nb_pkts* means that all packets are valid and
+ * ready to be sent, otherwise stops processing on the first invalid packet and
+ * leaves the rest packets untouched.
+ *
+ * When this functionality is not implemented in the driver, all packets are
+ * are returned untouched.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * The value must be a valid port id.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param tx_pkts
+ * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ * which contain the output packets.
+ * @param nb_pkts
+ * The maximum number of packets to process.
+ * @return
+ * The number of packets correct and ready to be sent. The return value can be
+ * less than the value of the *tx_pkts* parameter when some packet doesn't
+ * meet devices requirements with rte_errno set appropriately:
+ * - -EINVAL: offload flags are not correctly set
+ * - -ENOTSUP: the offload feature is not supported by the hardware
+ *
+ */
+
+#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
+
+static inline uint16_t
+rte_eth_tx_prepare(uint8_t port_id, uint16_t queue_id,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct rte_eth_dev *dev;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ if (queue_id >= dev->data->nb_tx_queues) {
+ RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ if (!dev->tx_pkt_prepare)
+ return nb_pkts;
+
+ return (*dev->tx_pkt_prepare)(dev->data->tx_queues[queue_id],
+ tx_pkts, nb_pkts);
+}
+
+#else
+
+/*
+ * Native NOOP operation for compilation targets which doesn't require any
+ * preparations steps, and functional NOOP may introduce unnecessary performance
+ * drop.
+ *
+ * Generally this is not a good idea to turn it on globally and didn't should
+ * be used if behavior of tx_preparation can change.
+ */
+
+static inline uint16_t
+rte_eth_tx_prepare(__rte_unused uint8_t port_id, __rte_unused uint16_t queue_id,
+ __rte_unused struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ return nb_pkts;
+}
+
+#endif
+
typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
void *userdata);
@@ -3024,6 +3359,33 @@ rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
void *userdata);
/**
+ * Request the driver to free mbufs currently cached by the driver. The
+ * driver will only free the mbuf if it is no longer in use. It is the
+ * application's responsibity to ensure rte_eth_tx_buffer_flush(..) is
+ * called if needed.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The index of the transmit queue through which output packets must be
+ * sent.
+ * The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ * to rte_eth_dev_configure().
+ * @param free_cnt
+ * Maximum number of packets to free. Use 0 to indicate all possible packets
+ * should be freed. Note that a packet may be using multiple mbufs.
+ * @return
+ * Failure: < 0
+ * -ENODEV: Invalid interface
+ * -ENOTSUP: Driver does not support function
+ * Success: >= 0
+ * 0-n: Number of packets freed. More packets may still remain in ring that
+ * are in use.
+ */
+int
+rte_eth_tx_done_cleanup(uint8_t port_id, uint16_t queue_id, uint32_t free_cnt);
+
+/**
* The eth device event type for interrupt, and maybe others in the future.
*/
enum rte_eth_event_type {
@@ -3034,6 +3396,8 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_INTR_RESET,
/**< reset interrupt event, sent to VF on PF reset */
RTE_ETH_EVENT_VF_MBOX, /**< message from the VF received by PF */
+ RTE_ETH_EVENT_MACSEC, /**< MACsec offload related event */
+ RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
@@ -3112,7 +3476,7 @@ void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
/**
* When there is no rx packet coming in Rx Queue for a long time, we can
* sleep lcore related to RX Queue for power saving, and enable rx interrupt
- * to be triggered when rx packect arrives.
+ * to be triggered when Rx packet arrives.
*
* The rte_eth_dev_rx_intr_enable() function enables rx queue
* interrupt on specific rx queue of a port.
@@ -3403,93 +3767,6 @@ int rte_eth_dev_uc_hash_table_set(uint8_t port,struct ether_addr *addr,
*/
int rte_eth_dev_uc_all_hash_table_set(uint8_t port,uint8_t on);
- /**
- * Set RX L2 Filtering mode of a VF of an Ethernet device.
- *
- * @param port
- * The port identifier of the Ethernet device.
- * @param vf
- * VF id.
- * @param rx_mode
- * The RX mode mask, which is one or more of accepting Untagged Packets,
- * packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
- * ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
- * ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
- * in rx_mode.
- * @param on
- * 1 - Enable a VF RX mode.
- * 0 - Disable a VF RX mode.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-ENOTSUP) if hardware doesn't support.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_dev_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mode,
- uint8_t on);
-
-/**
-* Enable or disable a VF traffic transmit of the Ethernet device.
-*
-* @param port
-* The port identifier of the Ethernet device.
-* @param vf
-* VF id.
-* @param on
-* 1 - Enable a VF traffic transmit.
-* 0 - Disable a VF traffic transmit.
-* @return
-* - (0) if successful.
-* - (-ENODEV) if *port_id* invalid.
-* - (-ENOTSUP) if hardware doesn't support.
-* - (-EINVAL) if bad parameter.
-*/
-int
-rte_eth_dev_set_vf_tx(uint8_t port,uint16_t vf, uint8_t on);
-
-/**
-* Enable or disable a VF traffic receive of an Ethernet device.
-*
-* @param port
-* The port identifier of the Ethernet device.
-* @param vf
-* VF id.
-* @param on
-* 1 - Enable a VF traffic receive.
-* 0 - Disable a VF traffic receive.
-* @return
-* - (0) if successful.
-* - (-ENOTSUP) if hardware doesn't support.
-* - (-ENODEV) if *port_id* invalid.
-* - (-EINVAL) if bad parameter.
-*/
-int
-rte_eth_dev_set_vf_rx(uint8_t port,uint16_t vf, uint8_t on);
-
-/**
-* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
-* received VLAN packets tagged with a given VLAN Tag Identifier.
-*
-* @param port id
-* The port identifier of the Ethernet device.
-* @param vlan_id
-* The VLAN Tag Identifier whose filtering must be enabled or disabled.
-* @param vf_mask
-* Bitmap listing which VFs participate in the VLAN filtering.
-* @param vlan_on
-* 1 - Enable VFs VLAN filtering.
-* 0 - Disable VFs VLAN filtering.
-* @return
-* - (0) if successful.
-* - (-ENOTSUP) if hardware doesn't support.
-* - (-ENODEV) if *port_id* invalid.
-* - (-EINVAL) if bad parameter.
-*/
-int
-rte_eth_dev_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
- uint64_t vf_mask,
- uint8_t vlan_on);
-
/**
* Set a traffic mirroring rule on an Ethernet device
*
@@ -3551,26 +3828,6 @@ int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
uint16_t tx_rate);
/**
- * Set the rate limitation for a vf on an Ethernet device.
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param vf
- * VF id.
- * @param tx_rate
- * The tx rate allocated from the total link speed for this VF id.
- * @param q_msk
- * The queue mask which need to set the rate.
- * @return
- * - (0) if successful.
- * - (-ENOTSUP) if hardware doesn't support this feature.
- * - (-ENODEV) if *port_id* invalid.
- * - (-EINVAL) if bad parameter.
- */
-int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
-
-/**
* Initialize bypass logic. This function needs to be called before
* executing any other bypass API.
*
@@ -3773,7 +4030,7 @@ rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
* The packets with this UDP port will be identified as this type of tunnel.
* Before enabling any offloading function for a tunnel, users can call this API
* to change or add more UDP port for the tunnel. So the offloading function
- * can take effect on the packets with the sepcific UDP port.
+ * can take effect on the packets with the specific UDP port.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -3795,7 +4052,7 @@ rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
* any more.
* Before enabling any offloading function for a tunnel, users can call this API
* to delete a UDP port for the tunnel. So the offloading function will not take
- * effect on the packets with the sepcific UDP port.
+ * effect on the packets with the specific UDP port.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -3889,31 +4146,31 @@ int rte_eth_dev_get_dcb_info(uint8_t port_id,
void *rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
-/*
-* Add a callback that must be called first on packet RX on a given port
-* and queue.
-*
-* This API configures a first function to be called for each burst of
-* packets received on a given NIC port queue. The return value is a pointer
-* that can be used to later remove the callback using
-* rte_eth_remove_rx_callback().
-*
-* Multiple functions are called in the order that they are added.
-*
-* @param port_id
-* The port identifier of the Ethernet device.
-* @param queue_id
-* The queue on the Ethernet device on which the callback is to be added.
-* @param fn
-* The callback function
-* @param user_param
-* A generic pointer parameter which will be passed to each invocation of the
-* callback function on this port and queue.
-*
-* @return
-* NULL on error.
-* On success, a pointer value which can later be used to remove the callback.
-*/
+/**
+ * Add a callback that must be called first on packet RX on a given port
+ * and queue.
+ *
+ * This API configures a first function to be called for each burst of
+ * packets received on a given NIC port queue. The return value is a pointer
+ * that can be used to later remove the callback using
+ * rte_eth_remove_rx_callback().
+ *
+ * Multiple functions are called in the order that they are added.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param queue_id
+ * The queue on the Ethernet device on which the callback is to be added.
+ * @param fn
+ * The callback function
+ * @param user_param
+ * A generic pointer parameter which will be passed to each invocation of the
+ * callback function on this port and queue.
+ *
+ * @return
+ * NULL on error.
+ * On success, a pointer value which can later be used to remove the callback.
+ */
void *rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
@@ -4251,20 +4508,6 @@ int rte_eth_timesync_read_time(uint8_t port_id, struct timespec *time);
int rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *time);
/**
- * Copy pci device info to the Ethernet device data.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
- * @param pci_dev
- * The *pci_dev* pointer is the address of the *rte_pci_device* structure.
- *
- * @return
- * - 0 on success, negative on error
- */
-void rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
- struct rte_pci_device *pci_dev);
-
-/**
* Create memzone for HW rings.
* malloc can't be used as the physical address is needed.
* If the memzone is already created, then this function returns a ptr
@@ -4336,7 +4579,7 @@ rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
uint8_t en);
/**
-* Get the port id from pci adrress or device name
+* Get the port id from pci address or device name
* Ex: 0000:2:00.0 or vdev name net_pcap0
*
* @param name
@@ -4364,21 +4607,6 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id);
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name);
-/**
- * @internal
- * Wrapper for use by pci drivers as a .probe function to attach to a ethdev
- * interface.
- */
-int rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
- struct rte_pci_device *pci_dev);
-
-/**
- * @internal
- * Wrapper for use by pci drivers as a .remove function to detach a ethdev
- * interface.
- */
-int rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev);
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ether/rte_ethdev_pci.h
new file mode 100644
index 00000000..d3bc03cf
--- /dev/null
+++ b/lib/librte_ether/rte_ethdev_pci.h
@@ -0,0 +1,193 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Brocade Communications Systems, Inc.
+ * Author: Jan Blunck <jblunck@infradead.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETHDEV_PCI_H_
+#define _RTE_ETHDEV_PCI_H_
+
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+
+/**
+ * Copy pci device info to the Ethernet device data.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
+ * @param pci_dev
+ * The *pci_dev* pointer is the address of the *rte_pci_device* structure.
+ *
+ * @return
+ * - 0 on success, negative on error
+ */
+static inline void
+rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
+ struct rte_pci_device *pci_dev)
+{
+ if ((eth_dev == NULL) || (pci_dev == NULL)) {
+ RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
+ eth_dev, pci_dev);
+ return;
+ }
+
+ eth_dev->intr_handle = &pci_dev->intr_handle;
+
+ eth_dev->data->dev_flags = 0;
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_RMV)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV;
+
+ eth_dev->data->kdrv = pci_dev->kdrv;
+ eth_dev->data->numa_node = pci_dev->device.numa_node;
+ eth_dev->data->drv_name = pci_dev->driver->driver.name;
+}
+
+/**
+ * @internal
+ * Allocates a new ethdev slot for an ethernet device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param dev
+ * Pointer to the PCI device
+ *
+ * @param private_data_size
+ * Size of private data structure
+ *
+ * @return
+ * A pointer to a rte_eth_dev or NULL if allocation failed.
+ */
+static inline struct rte_eth_dev *
+rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size)
+{
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ if (!dev)
+ return NULL;
+
+ name = dev->device.name;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return NULL;
+
+ if (private_data_size) {
+ eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ private_data_size, RTE_CACHE_LINE_SIZE,
+ dev->device.numa_node);
+ if (!eth_dev->data->dev_private) {
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev)
+ return NULL;
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = &dev->intr_handle;
+ rte_eth_copy_pci_info(eth_dev, dev);
+ return eth_dev;
+}
+
+static inline void
+rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
+{
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ eth_dev->data->dev_private = NULL;
+
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+}
+
+typedef int (*eth_dev_pci_callback_t)(struct rte_eth_dev *eth_dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers in a .probe function to attach to a ethdev
+ * interface.
+ */
+static inline int
+rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev,
+ size_t private_data_size, eth_dev_pci_callback_t dev_init)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, private_data_size);
+ if (!eth_dev)
+ return -ENOMEM;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev_init, -EINVAL);
+ ret = dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers in a .remove function to detach a ethdev
+ * interface.
+ */
+static inline int
+rte_eth_dev_pci_generic_remove(struct rte_pci_device *pci_dev,
+ eth_dev_pci_callback_t dev_uninit)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ if (dev_uninit) {
+ ret = dev_uninit(eth_dev);
+ if (ret)
+ return ret;
+ }
+
+ rte_eth_dev_pci_release(eth_dev);
+ return 0;
+}
+
+#endif /* _RTE_ETHDEV_PCI_H_ */
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ether/rte_ethdev_vdev.h
new file mode 100644
index 00000000..fa2cb61e
--- /dev/null
+++ b/lib/librte_ether/rte_ethdev_vdev.h
@@ -0,0 +1,84 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Brocade Communications Systems, Inc.
+ * Author: Jan Blunck <jblunck@infradead.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETHDEV_VDEV_H_
+#define _RTE_ETHDEV_VDEV_H_
+
+#include <rte_malloc.h>
+#include <rte_vdev.h>
+#include <rte_ethdev.h>
+
+/**
+ * @internal
+ * Allocates a new ethdev slot for an ethernet device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param dev
+ * Pointer to virtual device
+ *
+ * @param private_data_size
+ * Size of private data structure
+ *
+ * @return
+ * A pointer to a rte_eth_dev or NULL if allocation failed.
+ */
+static inline struct rte_eth_dev *
+rte_eth_vdev_allocate(struct rte_vdev_device *dev, size_t private_data_size)
+{
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
+
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return NULL;
+
+ if (private_data_size) {
+ eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ private_data_size, RTE_CACHE_LINE_SIZE,
+ dev->device.numa_node);
+ if (!eth_dev->data->dev_private) {
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = NULL;
+
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->numa_node = dev->device.numa_node;
+ eth_dev->data->drv_name = dev->device.driver->name;
+ return eth_dev;
+}
+
+#endif /* _RTE_ETHDEV_VDEV_H_ */
diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map
index fd622635..d6726bb1 100644
--- a/lib/librte_ether/rte_ether_version.map
+++ b/lib/librte_ether/rte_ether_version.map
@@ -7,7 +7,6 @@ DPDK_2.2 {
rte_eth_allmulticast_disable;
rte_eth_allmulticast_enable;
rte_eth_allmulticast_get;
- rte_eth_copy_pci_info;
rte_eth_dev_allocate;
rte_eth_dev_allocated;
rte_eth_dev_attach;
@@ -60,10 +59,6 @@ DPDK_2.2 {
rte_eth_dev_set_mtu;
rte_eth_dev_set_rx_queue_stats_mapping;
rte_eth_dev_set_tx_queue_stats_mapping;
- rte_eth_dev_set_vf_rx;
- rte_eth_dev_set_vf_rxmode;
- rte_eth_dev_set_vf_tx;
- rte_eth_dev_set_vf_vlan_filter;
rte_eth_dev_set_vlan_offload;
rte_eth_dev_set_vlan_pvid;
rte_eth_dev_set_vlan_strip_on_queue;
@@ -93,7 +88,6 @@ DPDK_2.2 {
rte_eth_rx_queue_info_get;
rte_eth_rx_queue_setup;
rte_eth_set_queue_rate_limit;
- rte_eth_set_vf_rate_limit;
rte_eth_stats;
rte_eth_stats_get;
rte_eth_stats_reset;
@@ -139,10 +133,26 @@ DPDK_16.07 {
} DPDK_16.04;
-DPDK_16.11 {
+DPDK_17.02 {
global:
- rte_eth_dev_pci_probe;
- rte_eth_dev_pci_remove;
+ _rte_eth_dev_reset;
+ rte_eth_dev_fw_version_get;
+ rte_flow_create;
+ rte_flow_destroy;
+ rte_flow_flush;
+ rte_flow_query;
+ rte_flow_validate;
} DPDK_16.07;
+
+DPDK_17.05 {
+ global:
+
+ rte_eth_dev_attach_secondary;
+ rte_eth_find_next;
+ rte_eth_xstats_get_by_id;
+ rte_eth_xstats_get_id_by_name;
+ rte_eth_xstats_get_names_by_id;
+
+} DPDK_17.02;
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c
new file mode 100644
index 00000000..aaa70d68
--- /dev/null
+++ b/lib/librte_ether/rte_flow.c
@@ -0,0 +1,159 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include <rte_branch_prediction.h>
+#include "rte_ethdev.h"
+#include "rte_flow_driver.h"
+#include "rte_flow.h"
+
+/* Get generic flow operations structure from a port. */
+const struct rte_flow_ops *
+rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops;
+ int code;
+
+ if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
+ code = ENODEV;
+ else if (unlikely(!dev->dev_ops->filter_ctrl ||
+ dev->dev_ops->filter_ctrl(dev,
+ RTE_ETH_FILTER_GENERIC,
+ RTE_ETH_FILTER_GET,
+ &ops) ||
+ !ops))
+ code = ENOSYS;
+ else
+ return ops;
+ rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(code));
+ return NULL;
+}
+
+/* Check whether a flow rule can be created on a given port. */
+int
+rte_flow_validate(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (likely(!!ops->validate))
+ return ops->validate(dev, attr, pattern, actions, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/* Create a flow rule on a given port. */
+struct rte_flow *
+rte_flow_create(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return NULL;
+ if (likely(!!ops->create))
+ return ops->create(dev, attr, pattern, actions, error);
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+ return NULL;
+}
+
+/* Destroy a flow rule on a given port. */
+int
+rte_flow_destroy(uint8_t port_id,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (likely(!!ops->destroy))
+ return ops->destroy(dev, flow, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/* Destroy all flow rules associated with a port. */
+int
+rte_flow_flush(uint8_t port_id,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (likely(!!ops->flush))
+ return ops->flush(dev, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
+
+/* Query an existing flow rule. */
+int
+rte_flow_query(uint8_t port_id,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (!ops)
+ return -rte_errno;
+ if (likely(!!ops->query))
+ return ops->query(dev, flow, action, data, error);
+ return -rte_flow_error_set(error, ENOSYS,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOSYS));
+}
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h
new file mode 100644
index 00000000..c47edbc9
--- /dev/null
+++ b/lib/librte_ether/rte_flow.h
@@ -0,0 +1,1198 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_FLOW_H_
+#define RTE_FLOW_H_
+
+/**
+ * @file
+ * RTE generic flow API
+ *
+ * This interface provides the ability to program packet matching and
+ * associated actions in hardware through flow rules.
+ */
+
+#include <rte_arp.h>
+#include <rte_ether.h>
+#include <rte_icmp.h>
+#include <rte_ip.h>
+#include <rte_sctp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_byteorder.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Flow rule attributes.
+ *
+ * Priorities are set on two levels: per group and per rule within groups.
+ *
+ * Lower values denote higher priority, the highest priority for both levels
+ * is 0, so that a rule with priority 0 in group 8 is always matched after a
+ * rule with priority 8 in group 0.
+ *
+ * Although optional, applications are encouraged to group similar rules as
+ * much as possible to fully take advantage of hardware capabilities
+ * (e.g. optimized matching) and work around limitations (e.g. a single
+ * pattern type possibly allowed in a given group).
+ *
+ * Group and priority levels are arbitrary and up to the application, they
+ * do not need to be contiguous nor start from 0, however the maximum number
+ * varies between devices and may be affected by existing flow rules.
+ *
+ * If a packet is matched by several rules of a given group for a given
+ * priority level, the outcome is undefined. It can take any path, may be
+ * duplicated or even cause unrecoverable errors.
+ *
+ * Note that support for more than a single group and priority level is not
+ * guaranteed.
+ *
+ * Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
+ *
+ * Several pattern items and actions are valid and can be used in both
+ * directions. Those valid for only one direction are described as such.
+ *
+ * At least one direction must be specified.
+ *
+ * Specifying both directions at once for a given rule is not recommended
+ * but may be valid in a few cases (e.g. shared counter).
+ */
+struct rte_flow_attr {
+ uint32_t group; /**< Priority group. */
+ uint32_t priority; /**< Priority level within group. */
+ uint32_t ingress:1; /**< Rule applies to ingress traffic. */
+ uint32_t egress:1; /**< Rule applies to egress traffic. */
+ uint32_t reserved:30; /**< Reserved, must be zero. */
+};
+
+/**
+ * Matching pattern item types.
+ *
+ * Pattern items fall in two categories:
+ *
+ * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
+ * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
+ * specification structure. These must be stacked in the same order as the
+ * protocol layers to match, starting from the lowest.
+ *
+ * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
+ * PF, VF, PORT and so on), often without a specification structure. Since
+ * they do not match packet contents, these can be specified anywhere
+ * within item lists without affecting others.
+ *
+ * See the description of individual types for more information. Those
+ * marked with [META] fall into the second category.
+ */
+enum rte_flow_item_type {
+ /**
+ * [META]
+ *
+ * End marker for item lists. Prevents further processing of items,
+ * thereby ending the pattern.
+ *
+ * No associated specification structure.
+ */
+ RTE_FLOW_ITEM_TYPE_END,
+
+ /**
+ * [META]
+ *
+ * Used as a placeholder for convenience. It is ignored and simply
+ * discarded by PMDs.
+ *
+ * No associated specification structure.
+ */
+ RTE_FLOW_ITEM_TYPE_VOID,
+
+ /**
+ * [META]
+ *
+ * Inverted matching, i.e. process packets that do not match the
+ * pattern.
+ *
+ * No associated specification structure.
+ */
+ RTE_FLOW_ITEM_TYPE_INVERT,
+
+ /**
+ * Matches any protocol in place of the current layer, a single ANY
+ * may also stand for several protocol layers.
+ *
+ * See struct rte_flow_item_any.
+ */
+ RTE_FLOW_ITEM_TYPE_ANY,
+
+ /**
+ * [META]
+ *
+ * Matches packets addressed to the physical function of the device.
+ *
+ * If the underlying device function differs from the one that would
+ * normally receive the matched traffic, specifying this item
+ * prevents it from reaching that device unless the flow rule
+ * contains a PF action. Packets are not duplicated between device
+ * instances by default.
+ *
+ * No associated specification structure.
+ */
+ RTE_FLOW_ITEM_TYPE_PF,
+
+ /**
+ * [META]
+ *
+ * Matches packets addressed to a virtual function ID of the device.
+ *
+ * If the underlying device function differs from the one that would
+ * normally receive the matched traffic, specifying this item
+ * prevents it from reaching that device unless the flow rule
+ * contains a VF action. Packets are not duplicated between device
+ * instances by default.
+ *
+ * See struct rte_flow_item_vf.
+ */
+ RTE_FLOW_ITEM_TYPE_VF,
+
+ /**
+ * [META]
+ *
+ * Matches packets coming from the specified physical port of the
+ * underlying device.
+ *
+ * The first PORT item overrides the physical port normally
+ * associated with the specified DPDK input port (port_id). This
+ * item can be provided several times to match additional physical
+ * ports.
+ *
+ * See struct rte_flow_item_port.
+ */
+ RTE_FLOW_ITEM_TYPE_PORT,
+
+ /**
+ * Matches a byte string of a given length at a given offset.
+ *
+ * See struct rte_flow_item_raw.
+ */
+ RTE_FLOW_ITEM_TYPE_RAW,
+
+ /**
+ * Matches an Ethernet header.
+ *
+ * See struct rte_flow_item_eth.
+ */
+ RTE_FLOW_ITEM_TYPE_ETH,
+
+ /**
+ * Matches an 802.1Q/ad VLAN tag.
+ *
+ * See struct rte_flow_item_vlan.
+ */
+ RTE_FLOW_ITEM_TYPE_VLAN,
+
+ /**
+ * Matches an IPv4 header.
+ *
+ * See struct rte_flow_item_ipv4.
+ */
+ RTE_FLOW_ITEM_TYPE_IPV4,
+
+ /**
+ * Matches an IPv6 header.
+ *
+ * See struct rte_flow_item_ipv6.
+ */
+ RTE_FLOW_ITEM_TYPE_IPV6,
+
+ /**
+ * Matches an ICMP header.
+ *
+ * See struct rte_flow_item_icmp.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP,
+
+ /**
+ * Matches a UDP header.
+ *
+ * See struct rte_flow_item_udp.
+ */
+ RTE_FLOW_ITEM_TYPE_UDP,
+
+ /**
+ * Matches a TCP header.
+ *
+ * See struct rte_flow_item_tcp.
+ */
+ RTE_FLOW_ITEM_TYPE_TCP,
+
+ /**
+ * Matches a SCTP header.
+ *
+ * See struct rte_flow_item_sctp.
+ */
+ RTE_FLOW_ITEM_TYPE_SCTP,
+
+ /**
+ * Matches a VXLAN header.
+ *
+ * See struct rte_flow_item_vxlan.
+ */
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+
+ /**
+ * Matches a E_TAG header.
+ *
+ * See struct rte_flow_item_e_tag.
+ */
+ RTE_FLOW_ITEM_TYPE_E_TAG,
+
+ /**
+ * Matches a NVGRE header.
+ *
+ * See struct rte_flow_item_nvgre.
+ */
+ RTE_FLOW_ITEM_TYPE_NVGRE,
+
+ /**
+ * Matches a MPLS header.
+ *
+ * See struct rte_flow_item_mpls.
+ */
+ RTE_FLOW_ITEM_TYPE_MPLS,
+
+ /**
+ * Matches a GRE header.
+ *
+ * See struct rte_flow_item_gre.
+ */
+ RTE_FLOW_ITEM_TYPE_GRE,
+};
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ANY
+ *
+ * Matches any protocol in place of the current layer, a single ANY may also
+ * stand for several protocol layers.
+ *
+ * This is usually specified as the first pattern item when looking for a
+ * protocol anywhere in a packet.
+ *
+ * A zeroed mask stands for any number of layers.
+ */
+struct rte_flow_item_any {
+ uint32_t num; /**< Number of layers covered. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ANY. */
+#ifndef __cplusplus
+static const struct rte_flow_item_any rte_flow_item_any_mask = {
+ .num = 0x00000000,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_VF
+ *
+ * Matches packets addressed to a virtual function ID of the device.
+ *
+ * If the underlying device function differs from the one that would
+ * normally receive the matched traffic, specifying this item prevents it
+ * from reaching that device unless the flow rule contains a VF
+ * action. Packets are not duplicated between device instances by default.
+ *
+ * - Likely to return an error or never match any traffic if this causes a
+ * VF device to match traffic addressed to a different VF.
+ * - Can be specified multiple times to match traffic addressed to several
+ * VF IDs.
+ * - Can be combined with a PF item to match both PF and VF traffic.
+ *
+ * A zeroed mask can be used to match any VF ID.
+ */
+struct rte_flow_item_vf {
+ uint32_t id; /**< Destination VF ID. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
+#ifndef __cplusplus
+static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
+ .id = 0x00000000,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_PORT
+ *
+ * Matches packets coming from the specified physical port of the underlying
+ * device.
+ *
+ * The first PORT item overrides the physical port normally associated with
+ * the specified DPDK input port (port_id). This item can be provided
+ * several times to match additional physical ports.
+ *
+ * Note that physical ports are not necessarily tied to DPDK input ports
+ * (port_id) when those are not under DPDK control. Possible values are
+ * specific to each device, they are not necessarily indexed from zero and
+ * may not be contiguous.
+ *
+ * As a device property, the list of allowed values as well as the value
+ * associated with a port_id should be retrieved by other means.
+ *
+ * A zeroed mask can be used to match any port index.
+ */
+struct rte_flow_item_port {
+ uint32_t index; /**< Physical port index. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
+#ifndef __cplusplus
+static const struct rte_flow_item_port rte_flow_item_port_mask = {
+ .index = 0x00000000,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_RAW
+ *
+ * Matches a byte string of a given length at a given offset.
+ *
+ * Offset is either absolute (using the start of the packet) or relative to
+ * the end of the previous matched item in the stack, in which case negative
+ * values are allowed.
+ *
+ * If search is enabled, offset is used as the starting point. The search
+ * area can be delimited by setting limit to a nonzero value, which is the
+ * maximum number of bytes after offset where the pattern may start.
+ *
+ * Matching a zero-length pattern is allowed, doing so resets the relative
+ * offset for subsequent items.
+ *
+ * This type does not support ranges (struct rte_flow_item.last).
+ */
+struct rte_flow_item_raw {
+ uint32_t relative:1; /**< Look for pattern after the previous item. */
+ uint32_t search:1; /**< Search pattern from offset (see also limit). */
+ uint32_t reserved:30; /**< Reserved, must be set to zero. */
+ int32_t offset; /**< Absolute or relative offset for pattern. */
+ uint16_t limit; /**< Search area limit for start of pattern. */
+ uint16_t length; /**< Pattern length. */
+ uint8_t pattern[]; /**< Byte string to look for. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
+#ifndef __cplusplus
+static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
+ .relative = 1,
+ .search = 1,
+ .reserved = 0x3fffffff,
+ .offset = 0xffffffff,
+ .limit = 0xffff,
+ .length = 0xffff,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ETH
+ *
+ * Matches an Ethernet header.
+ */
+struct rte_flow_item_eth {
+ struct ether_addr dst; /**< Destination MAC. */
+ struct ether_addr src; /**< Source MAC. */
+ uint16_t type; /**< EtherType. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
+#ifndef __cplusplus
+static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = 0x0000,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_VLAN
+ *
+ * Matches an 802.1Q/ad VLAN tag.
+ *
+ * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
+ * RTE_FLOW_ITEM_TYPE_VLAN.
+ */
+struct rte_flow_item_vlan {
+ uint16_t tpid; /**< Tag protocol identifier. */
+ uint16_t tci; /**< Tag control information. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
+#ifndef __cplusplus
+static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
+ .tpid = 0x0000,
+ .tci = 0xffff,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_IPV4
+ *
+ * Matches an IPv4 header.
+ *
+ * Note: IPv4 options are handled by dedicated pattern items.
+ */
+struct rte_flow_item_ipv4 {
+ struct ipv4_hdr hdr; /**< IPv4 header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_IPV4. */
+#ifndef __cplusplus
+static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
+ .hdr = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_IPV6.
+ *
+ * Matches an IPv6 header.
+ *
+ * Note: IPv6 options are handled by dedicated pattern items.
+ */
+struct rte_flow_item_ipv6 {
+ struct ipv6_hdr hdr; /**< IPv6 header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6. */
+#ifndef __cplusplus
+static const struct rte_flow_item_ipv6 rte_flow_item_ipv6_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP.
+ *
+ * Matches an ICMP header.
+ */
+struct rte_flow_item_icmp {
+ struct icmp_hdr hdr; /**< ICMP header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_icmp rte_flow_item_icmp_mask = {
+ .hdr = {
+ .icmp_type = 0xff,
+ .icmp_code = 0xff,
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_UDP.
+ *
+ * Matches a UDP header.
+ */
+struct rte_flow_item_udp {
+ struct udp_hdr hdr; /**< UDP header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_UDP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_udp rte_flow_item_udp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_TCP.
+ *
+ * Matches a TCP header.
+ */
+struct rte_flow_item_tcp {
+ struct tcp_hdr hdr; /**< TCP header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_TCP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_tcp rte_flow_item_tcp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_SCTP.
+ *
+ * Matches a SCTP header.
+ */
+struct rte_flow_item_sctp {
+ struct sctp_hdr hdr; /**< SCTP header definition. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_SCTP. */
+#ifndef __cplusplus
+static const struct rte_flow_item_sctp rte_flow_item_sctp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ },
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_VXLAN.
+ *
+ * Matches a VXLAN header (RFC 7348).
+ */
+struct rte_flow_item_vxlan {
+ uint8_t flags; /**< Normally 0x08 (I flag). */
+ uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */
+ uint8_t vni[3]; /**< VXLAN identifier. */
+ uint8_t rsvd1; /**< Reserved, normally 0x00. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN. */
+#ifndef __cplusplus
+static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
+ .vni = "\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_E_TAG.
+ *
+ * Matches a E-tag header.
+ */
+struct rte_flow_item_e_tag {
+ uint16_t tpid; /**< Tag protocol identifier (0x893F). */
+ /**
+ * E-Tag control information (E-TCI).
+ * E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
+ */
+ uint16_t epcp_edei_in_ecid_b;
+ /** Reserved (2b), GRP (2b), E-CID base (12b). */
+ uint16_t rsvd_grp_ecid_b;
+ uint8_t in_ecid_e; /**< Ingress E-CID ext. */
+ uint8_t ecid_e; /**< E-CID ext. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
+#ifndef __cplusplus
+static const struct rte_flow_item_e_tag rte_flow_item_e_tag_mask = {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ .rsvd_grp_ecid_b = 0x3fff,
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ .rsvd_grp_ecid_b = 0xff3f,
+#else
+#error Unsupported endianness.
+#endif
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_NVGRE.
+ *
+ * Matches a NVGRE header.
+ */
+struct rte_flow_item_nvgre {
+ /**
+ * Checksum (1b), undefined (1b), key bit (1b), sequence number (1b),
+ * reserved 0 (9b), version (3b).
+ *
+ * c_k_s_rsvd0_ver must have value 0x2000 according to RFC 7637.
+ */
+ uint16_t c_k_s_rsvd0_ver;
+ uint16_t protocol; /**< Protocol type (0x6558). */
+ uint8_t tni[3]; /**< Virtual subnet ID. */
+ uint8_t flow_id; /**< Flow ID. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_NVGRE. */
+#ifndef __cplusplus
+static const struct rte_flow_item_nvgre rte_flow_item_nvgre_mask = {
+ .tni = "\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_MPLS.
+ *
+ * Matches a MPLS header.
+ */
+struct rte_flow_item_mpls {
+ /**
+ * Label (20b), TC (3b), Bottom of Stack (1b).
+ */
+ uint8_t label_tc_s[3];
+ uint8_t ttl; /** Time-to-Live. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_MPLS. */
+#ifndef __cplusplus
+static const struct rte_flow_item_mpls rte_flow_item_mpls_mask = {
+ .label_tc_s = "\xff\xff\xf0",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_GRE.
+ *
+ * Matches a GRE header.
+ */
+struct rte_flow_item_gre {
+ /**
+ * Checksum (1b), reserved 0 (12b), version (3b).
+ * Refer to RFC 2784.
+ */
+ uint16_t c_rsvd0_ver;
+ uint16_t protocol; /**< Protocol type. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_GRE. */
+#ifndef __cplusplus
+static const struct rte_flow_item_gre rte_flow_item_gre_mask = {
+ .protocol = 0xffff,
+};
+#endif
+
+/**
+ * Matching pattern item definition.
+ *
+ * A pattern is formed by stacking items starting from the lowest protocol
+ * layer to match. This stacking restriction does not apply to meta items
+ * which can be placed anywhere in the stack without affecting the meaning
+ * of the resulting pattern.
+ *
+ * Patterns are terminated by END items.
+ *
+ * The spec field should be a valid pointer to a structure of the related
+ * item type. It may remain unspecified (NULL) in many cases to request
+ * broad (nonspecific) matching. In such cases, last and mask must also be
+ * set to NULL.
+ *
+ * Optionally, last can point to a structure of the same type to define an
+ * inclusive range. This is mostly supported by integer and address fields,
+ * may cause errors otherwise. Fields that do not support ranges must be set
+ * to 0 or to the same value as the corresponding fields in spec.
+ *
+ * Only the fields defined to nonzero values in the default masks (see
+ * rte_flow_item_{name}_mask constants) are considered relevant by
+ * default. This can be overridden by providing a mask structure of the
+ * same type with applicable bits set to one. It can also be used to
+ * partially filter out specific fields (e.g. as an alternate mean to match
+ * ranges of IP addresses).
+ *
+ * Mask is a simple bit-mask applied before interpreting the contents of
+ * spec and last, which may yield unexpected results if not used
+ * carefully. For example, if for an IPv4 address field, spec provides
+ * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the
+ * effective range becomes 10.1.0.0 to 10.3.255.255.
+ */
+struct rte_flow_item {
+ enum rte_flow_item_type type; /**< Item type. */
+ const void *spec; /**< Pointer to item specification structure. */
+ const void *last; /**< Defines an inclusive range (spec to last). */
+ const void *mask; /**< Bit-mask applied to spec and last. */
+};
+
+/**
+ * Action types.
+ *
+ * Each possible action is represented by a type. Some have associated
+ * configuration structures. Several actions combined in a list can be
+ * affected to a flow rule. That list is not ordered.
+ *
+ * They fall in three categories:
+ *
+ * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
+ * processing matched packets by subsequent flow rules, unless overridden
+ * with PASSTHRU.
+ *
+ * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
+ * for additional processing by subsequent flow rules.
+ *
+ * - Other non terminating meta actions that do not affect the fate of
+ * packets (END, VOID, MARK, FLAG, COUNT).
+ *
+ * When several actions are combined in a flow rule, they should all have
+ * different types (e.g. dropping a packet twice is not possible).
+ *
+ * Only the last action of a given type is taken into account. PMDs still
+ * perform error checking on the entire list.
+ *
+ * Note that PASSTHRU is the only action able to override a terminating
+ * rule.
+ */
+enum rte_flow_action_type {
+ /**
+ * [META]
+ *
+ * End marker for action lists. Prevents further processing of
+ * actions, thereby ending the list.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_END,
+
+ /**
+ * [META]
+ *
+ * Used as a placeholder for convenience. It is ignored and simply
+ * discarded by PMDs.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_VOID,
+
+ /**
+ * Leaves packets up for additional processing by subsequent flow
+ * rules. This is the default when a rule does not contain a
+ * terminating action, but can be specified to force a rule to
+ * become non-terminating.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_PASSTHRU,
+
+ /**
+ * [META]
+ *
+ * Attaches an integer value to packets and sets PKT_RX_FDIR and
+ * PKT_RX_FDIR_ID mbuf flags.
+ *
+ * See struct rte_flow_action_mark.
+ */
+ RTE_FLOW_ACTION_TYPE_MARK,
+
+ /**
+ * [META]
+ *
+ * Flags packets. Similar to MARK without a specific value; only
+ * sets the PKT_RX_FDIR mbuf flag.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_FLAG,
+
+ /**
+ * Assigns packets to a given queue index.
+ *
+ * See struct rte_flow_action_queue.
+ */
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+
+ /**
+ * Drops packets.
+ *
+ * PASSTHRU overrides this action if both are specified.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_DROP,
+
+ /**
+ * [META]
+ *
+ * Enables counters for this rule.
+ *
+ * These counters can be retrieved and reset through rte_flow_query(),
+ * see struct rte_flow_query_count.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_COUNT,
+
+ /**
+ * Duplicates packets to a given queue index.
+ *
+ * This is normally combined with QUEUE, however when used alone, it
+ * is actually similar to QUEUE + PASSTHRU.
+ *
+ * See struct rte_flow_action_dup.
+ */
+ RTE_FLOW_ACTION_TYPE_DUP,
+
+ /**
+ * Similar to QUEUE, except RSS is additionally performed on packets
+ * to spread them among several queues according to the provided
+ * parameters.
+ *
+ * See struct rte_flow_action_rss.
+ */
+ RTE_FLOW_ACTION_TYPE_RSS,
+
+ /**
+ * Redirects packets to the physical function (PF) of the current
+ * device.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_PF,
+
+ /**
+ * Redirects packets to the virtual function (VF) of the current
+ * device with the specified ID.
+ *
+ * See struct rte_flow_action_vf.
+ */
+ RTE_FLOW_ACTION_TYPE_VF,
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_MARK
+ *
+ * Attaches an integer value to packets and sets PKT_RX_FDIR and
+ * PKT_RX_FDIR_ID mbuf flags.
+ *
+ * This value is arbitrary and application-defined. Maximum allowed value
+ * depends on the underlying implementation. It is returned in the
+ * hash.fdir.hi mbuf field.
+ */
+struct rte_flow_action_mark {
+ uint32_t id; /**< Integer value to return with packets. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_QUEUE
+ *
+ * Assign packets to a given queue index.
+ *
+ * Terminating by default.
+ */
+struct rte_flow_action_queue {
+ uint16_t index; /**< Queue index to use. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_COUNT (query)
+ *
+ * Query structure to retrieve and reset flow rule counters.
+ */
+struct rte_flow_query_count {
+ uint32_t reset:1; /**< Reset counters after query [in]. */
+ uint32_t hits_set:1; /**< hits field is set [out]. */
+ uint32_t bytes_set:1; /**< bytes field is set [out]. */
+ uint32_t reserved:29; /**< Reserved, must be zero [in, out]. */
+ uint64_t hits; /**< Number of hits for this rule [out]. */
+ uint64_t bytes; /**< Number of bytes through this rule [out]. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_DUP
+ *
+ * Duplicates packets to a given queue index.
+ *
+ * This is normally combined with QUEUE, however when used alone, it is
+ * actually similar to QUEUE + PASSTHRU.
+ *
+ * Non-terminating by default.
+ */
+struct rte_flow_action_dup {
+ uint16_t index; /**< Queue index to duplicate packets to. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_RSS
+ *
+ * Similar to QUEUE, except RSS is additionally performed on packets to
+ * spread them among several queues according to the provided parameters.
+ *
+ * Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
+ * hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
+ * both can be requested simultaneously.
+ *
+ * Terminating by default.
+ */
+struct rte_flow_action_rss {
+ const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
+ uint16_t num; /**< Number of entries in queue[]. */
+ uint16_t queue[]; /**< Queues indices to use. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_VF
+ *
+ * Redirects packets to a virtual function (VF) of the current device.
+ *
+ * Packets matched by a VF pattern item can be redirected to their original
+ * VF ID instead of the specified one. This parameter may not be available
+ * and is not guaranteed to work properly if the VF part is matched by a
+ * prior flow rule or if packets are not addressed to a VF in the first
+ * place.
+ *
+ * Terminating by default.
+ */
+struct rte_flow_action_vf {
+ uint32_t original:1; /**< Use original VF ID if possible. */
+ uint32_t reserved:31; /**< Reserved, must be zero. */
+ uint32_t id; /**< VF ID to redirect packets to. */
+};
+
+/**
+ * Definition of a single action.
+ *
+ * A list of actions is terminated by a END action.
+ *
+ * For simple actions without a configuration structure, conf remains NULL.
+ */
+struct rte_flow_action {
+ enum rte_flow_action_type type; /**< Action type. */
+ const void *conf; /**< Pointer to action configuration structure. */
+};
+
+/**
+ * Opaque type returned after successfully creating a flow.
+ *
+ * This handle can be used to manage and query the related flow (e.g. to
+ * destroy it or retrieve counters).
+ */
+struct rte_flow;
+
+/**
+ * Verbose error types.
+ *
+ * Most of them provide the type of the object referenced by struct
+ * rte_flow_error.cause.
+ */
+enum rte_flow_error_type {
+ RTE_FLOW_ERROR_TYPE_NONE, /**< No error. */
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, /**< Cause unspecified. */
+ RTE_FLOW_ERROR_TYPE_HANDLE, /**< Flow rule (handle). */
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, /**< Group field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
+ RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
+ RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
+ RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
+};
+
+/**
+ * Verbose error structure definition.
+ *
+ * This object is normally allocated by applications and set by PMDs, the
+ * message points to a constant string which does not need to be freed by
+ * the application, however its pointer can be considered valid only as long
+ * as its associated DPDK port remains configured. Closing the underlying
+ * device or unloading the PMD invalidates it.
+ *
+ * Both cause and message may be NULL regardless of the error type.
+ */
+struct rte_flow_error {
+ enum rte_flow_error_type type; /**< Cause field and error types. */
+ const void *cause; /**< Object responsible for the error. */
+ const char *message; /**< Human-readable error message. */
+};
+
+/**
+ * Check whether a flow rule can be created on a given port.
+ *
+ * The flow rule is validated for correctness and whether it could be accepted
+ * by the device given sufficient resources. The rule is checked against the
+ * current device mode and queue configuration. The flow rule may also
+ * optionally be validated against existing flow rules and device resources.
+ * This function has no effect on the target device.
+ *
+ * The returned value is guaranteed to remain valid only as long as no
+ * successful calls to rte_flow_create() or rte_flow_destroy() are made in
+ * the meantime and no device parameter affecting flow rules in any way are
+ * modified, due to possible collisions or resource limitations (although in
+ * such cases EINVAL should not be returned).
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 if flow rule is valid and can be created. A negative errno value
+ * otherwise (rte_errno is also set), the following errors are defined:
+ *
+ * -ENOSYS: underlying device does not support this functionality.
+ *
+ * -EINVAL: unknown or invalid rule specification.
+ *
+ * -ENOTSUP: valid but unsupported rule specification (e.g. partial
+ * bit-masks are unsupported).
+ *
+ * -EEXIST: collision with an existing rule. Only returned if device
+ * supports flow rule collision checking and there was a flow rule
+ * collision. Not receiving this return code is no guarantee that creating
+ * the rule will not fail due to a collision.
+ *
+ * -ENOMEM: not enough memory to execute the function, or if the device
+ * supports resource validation, resource limitation on the device.
+ *
+ * -EBUSY: action cannot be performed due to busy device resources, may
+ * succeed if the affected queues or even the entire port are in a stopped
+ * state (see rte_eth_dev_rx_queue_stop() and rte_eth_dev_stop()).
+ */
+int
+rte_flow_validate(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+/**
+ * Create a flow rule on a given port.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * A valid handle in case of success, NULL otherwise and rte_errno is set
+ * to the positive version of one of the error codes defined for
+ * rte_flow_validate().
+ */
+struct rte_flow *
+rte_flow_create(uint8_t port_id,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+/**
+ * Destroy a flow rule on a given port.
+ *
+ * Failure to destroy a flow rule handle may occur when other flow rules
+ * depend on it, and destroying it would result in an inconsistent state.
+ *
+ * This function is only guaranteed to succeed if handles are destroyed in
+ * reverse order of their creation.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param flow
+ * Flow rule handle to destroy.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_flow_destroy(uint8_t port_id,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+/**
+ * Destroy all flow rules associated with a port.
+ *
+ * In the unlikely event of failure, handles are still considered destroyed
+ * and no longer valid but the port must be assumed to be in an inconsistent
+ * state.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_flow_flush(uint8_t port_id,
+ struct rte_flow_error *error);
+
+/**
+ * Query an existing flow rule.
+ *
+ * This function allows retrieving flow-specific data such as counters.
+ * Data is gathered by special actions which must be present in the flow
+ * rule definition.
+ *
+ * \see RTE_FLOW_ACTION_TYPE_COUNT
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param flow
+ * Flow rule handle to query.
+ * @param action
+ * Action type to query.
+ * @param[in, out] data
+ * Pointer to storage for the associated query data type.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+rte_flow_query(uint8_t port_id,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action,
+ void *data,
+ struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_FLOW_H_ */
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ether/rte_flow_driver.h
new file mode 100644
index 00000000..da5749d5
--- /dev/null
+++ b/lib/librte_ether/rte_flow_driver.h
@@ -0,0 +1,182 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_FLOW_DRIVER_H_
+#define RTE_FLOW_DRIVER_H_
+
+/**
+ * @file
+ * RTE generic flow API (driver side)
+ *
+ * This file provides implementation helpers for internal use by PMDs, they
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ */
+
+#include <stdint.h>
+
+#include <rte_errno.h>
+#include "rte_ethdev.h"
+#include "rte_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic flow operations structure implemented and returned by PMDs.
+ *
+ * To implement this API, PMDs must handle the RTE_ETH_FILTER_GENERIC filter
+ * type in their .filter_ctrl callback function (struct eth_dev_ops) as well
+ * as the RTE_ETH_FILTER_GET filter operation.
+ *
+ * If successful, this operation must result in a pointer to a PMD-specific
+ * struct rte_flow_ops written to the argument address as described below:
+ *
+ * \code
+ *
+ * // PMD filter_ctrl callback
+ *
+ * static const struct rte_flow_ops pmd_flow_ops = { ... };
+ *
+ * switch (filter_type) {
+ * case RTE_ETH_FILTER_GENERIC:
+ * if (filter_op != RTE_ETH_FILTER_GET)
+ * return -EINVAL;
+ * *(const void **)arg = &pmd_flow_ops;
+ * return 0;
+ * }
+ *
+ * \endcode
+ *
+ * See also rte_flow_ops_get().
+ *
+ * These callback functions are not supposed to be used by applications
+ * directly, which must rely on the API defined in rte_flow.h.
+ *
+ * Public-facing wrapper functions perform a few consistency checks so that
+ * unimplemented (i.e. NULL) callbacks simply return -ENOTSUP. These
+ * callbacks otherwise only differ by their first argument (with port ID
+ * already resolved to a pointer to struct rte_eth_dev).
+ */
+struct rte_flow_ops {
+ /** See rte_flow_validate(). */
+ int (*validate)
+ (struct rte_eth_dev *,
+ const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+ /** See rte_flow_create(). */
+ struct rte_flow *(*create)
+ (struct rte_eth_dev *,
+ const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+ /** See rte_flow_destroy(). */
+ int (*destroy)
+ (struct rte_eth_dev *,
+ struct rte_flow *,
+ struct rte_flow_error *);
+ /** See rte_flow_flush(). */
+ int (*flush)
+ (struct rte_eth_dev *,
+ struct rte_flow_error *);
+ /** See rte_flow_query(). */
+ int (*query)
+ (struct rte_eth_dev *,
+ struct rte_flow *,
+ enum rte_flow_action_type,
+ void *,
+ struct rte_flow_error *);
+};
+
+/**
+ * Initialize generic flow error structure.
+ *
+ * This function also sets rte_errno to a given value.
+ *
+ * @param[out] error
+ * Pointer to flow error structure (may be NULL).
+ * @param code
+ * Related error code (rte_errno).
+ * @param type
+ * Cause field and error types.
+ * @param cause
+ * Object responsible for the error.
+ * @param message
+ * Human-readable error message.
+ *
+ * @return
+ * Error code.
+ */
+static inline int
+rte_flow_error_set(struct rte_flow_error *error,
+ int code,
+ enum rte_flow_error_type type,
+ const void *cause,
+ const char *message)
+{
+ if (error) {
+ *error = (struct rte_flow_error){
+ .type = type,
+ .cause = cause,
+ .message = message,
+ };
+ }
+ rte_errno = code;
+ return code;
+}
+
+/**
+ * Get generic flow operations structure from a port.
+ *
+ * @param port_id
+ * Port identifier to query.
+ * @param[out] error
+ * Pointer to flow error structure.
+ *
+ * @return
+ * The flow operations structure associated with port_id, NULL in case of
+ * error, in which case rte_errno is set and the error structure contains
+ * additional details.
+ */
+const struct rte_flow_ops *
+rte_flow_ops_get(uint8_t port_id, struct rte_flow_error *error);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_FLOW_DRIVER_H_ */
diff --git a/mk/arch/tile/rte.vars.mk b/lib/librte_eventdev/Makefile
index 5ad37389..e06346a6 100644
--- a/mk/arch/tile/rte.vars.mk
+++ b/lib/librte_eventdev/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright (C) EZchip Semiconductor Ltd. 2015.
+# Copyright(c) 2016 Cavium networks. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -12,7 +12,7 @@
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of EZchip Semiconductor nor the names of its
+# * Neither the name of Cavium networks nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
@@ -28,12 +28,26 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+include $(RTE_SDK)/mk/rte.vars.mk
-ARCH ?= tile
-CROSS ?=
+# library name
+LIB = librte_eventdev.a
-CPU_CFLAGS ?=
-CPU_LDFLAGS ?=
-CPU_ASFLAGS ?=
+# library version
+LIBABIVER := 1
-export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library source files
+SRCS-y += rte_eventdev.c
+
+# export include files
+SYMLINK-y-include += rte_eventdev.h
+SYMLINK-y-include += rte_eventdev_pmd.h
+
+# versioning export map
+EXPORT_MAP := rte_eventdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
new file mode 100644
index 00000000..20afc3f0
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -0,0 +1,1345 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+
+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
+
+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
+
+static struct rte_eventdev_global eventdev_globals = {
+ .nb_devs = 0
+};
+
+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
+
+/* Event dev north bound API implementation */
+
+uint8_t
+rte_event_dev_count(void)
+{
+ return rte_eventdev_globals->nb_devs;
+}
+
+int
+rte_event_dev_get_dev_id(const char *name)
+{
+ int i;
+
+ if (!name)
+ return -EINVAL;
+
+ for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
+ if ((strcmp(rte_event_devices[i].data->name, name)
+ == 0) &&
+ (rte_event_devices[i].attached ==
+ RTE_EVENTDEV_ATTACHED))
+ return i;
+ return -ENODEV;
+}
+
+int
+rte_event_dev_socket_id(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ return dev->data->socket_id;
+}
+
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (dev_info == NULL)
+ return -EINVAL;
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
+
+ dev_info->dev = dev->dev;
+ if (dev->driver)
+ dev_info->driver_name = dev->driver->pci_drv.driver.name;
+ return 0;
+}
+
+static inline int
+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+{
+ uint8_t old_nb_queues = dev->data->nb_queues;
+ uint8_t *queues_prio;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->queues_prio == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue priority */
+ dev->data->queues_prio = rte_zmalloc_socket(
+ "eventdev->data->queues_prio",
+ sizeof(dev->data->queues_prio[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->queues_prio == NULL) {
+ dev->data->nb_queues = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for queue priority,"
+ "nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ /* Re-configure */
+ } else if (dev->data->queues_prio != NULL && nb_queues != 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+
+ /* Re allocate memory to store queue priority */
+ queues_prio = dev->data->queues_prio;
+ queues_prio = rte_realloc(queues_prio,
+ sizeof(queues_prio[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (queues_prio == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue priority,"
+ " nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ dev->data->queues_prio = queues_prio;
+
+ if (nb_queues > old_nb_queues) {
+ uint8_t new_qs = nb_queues - old_nb_queues;
+
+ memset(queues_prio + old_nb_queues, 0,
+ sizeof(queues_prio[0]) * new_qs);
+ }
+ } else if (dev->data->queues_prio != NULL && nb_queues == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+ }
+
+ dev->data->nb_queues = nb_queues;
+ return 0;
+}
+
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
+static inline int
+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+{
+ uint8_t old_nb_ports = dev->data->nb_ports;
+ void **ports;
+ uint16_t *links_map;
+ uint8_t *ports_dequeue_depth;
+ uint8_t *ports_enqueue_depth;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->ports == NULL && nb_ports != 0) {
+ dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
+ sizeof(dev->data->ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store ports dequeue depth */
+ dev->data->ports_dequeue_depth =
+ rte_zmalloc_socket("eventdev->ports_dequeue_depth",
+ sizeof(dev->data->ports_dequeue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports_dequeue_depth == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port deq meta,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store ports enqueue depth */
+ dev->data->ports_enqueue_depth =
+ rte_zmalloc_socket("eventdev->ports_enqueue_depth",
+ sizeof(dev->data->ports_enqueue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports_enqueue_depth == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port enq meta,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store queue to port link connection */
+ dev->data->links_map =
+ rte_zmalloc_socket("eventdev->links_map",
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+ for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+ dev->data->links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ ports_dequeue_depth = dev->data->ports_dequeue_depth;
+ ports_enqueue_depth = dev->data->ports_enqueue_depth;
+ links_map = dev->data->links_map;
+
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+
+ /* Realloc memory for ports */
+ ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory for ports_dequeue_depth */
+ ports_dequeue_depth = rte_realloc(ports_dequeue_depth,
+ sizeof(ports_dequeue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports_dequeue_depth == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port dequeue meta,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory for ports_enqueue_depth */
+ ports_enqueue_depth = rte_realloc(ports_enqueue_depth,
+ sizeof(ports_enqueue_depth[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports_enqueue_depth == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port enqueue meta,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory to store queue to port link connection */
+ links_map = rte_realloc(links_map,
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE);
+ if (dev->data->links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ if (nb_ports > old_nb_ports) {
+ uint8_t new_ps = nb_ports - old_nb_ports;
+ unsigned int old_links_map_end =
+ old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ unsigned int links_map_end =
+ nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+
+ memset(ports + old_nb_ports, 0,
+ sizeof(ports[0]) * new_ps);
+ memset(ports_dequeue_depth + old_nb_ports, 0,
+ sizeof(ports_dequeue_depth[0]) * new_ps);
+ memset(ports_enqueue_depth + old_nb_ports, 0,
+ sizeof(ports_enqueue_depth[0]) * new_ps);
+ for (i = old_links_map_end; i < links_map_end; i++)
+ links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ }
+
+ dev->data->ports = ports;
+ dev->data->ports_dequeue_depth = ports_dequeue_depth;
+ dev->data->ports_enqueue_depth = ports_enqueue_depth;
+ dev->data->links_map = links_map;
+ } else if (dev->data->ports != NULL && nb_ports == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+ }
+
+ dev->data->nb_ports = nb_ports;
+ return 0;
+}
+
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_dev_info info;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ if (dev_conf == NULL)
+ return -EINVAL;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+
+ /* Check dequeue_timeout_ns value is in limit */
+ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
+ if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ || dev_conf->dequeue_timeout_ns >
+ info.max_dequeue_timeout_ns) {
+ RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
+ " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
+ dev_id, dev_conf->dequeue_timeout_ns,
+ info.min_dequeue_timeout_ns,
+ info.max_dequeue_timeout_ns);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_events_limit is in limit */
+ if (dev_conf->nb_events_limit > info.max_num_events) {
+ RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
+ dev_id, dev_conf->nb_events_limit, info.max_num_events);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queues is in limit */
+ if (!dev_conf->nb_event_queues) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queues > info.max_event_queues) {
+ RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
+ dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_ports is in limit */
+ if (!dev_conf->nb_event_ports) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_ports > info.max_event_ports) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
+ dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queue_flows is in limit */
+ if (!dev_conf->nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
+ dev_id, dev_conf->nb_event_queue_flows,
+ info.max_event_queue_flows);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_dequeue_depth is in limit */
+ if (!dev_conf->nb_event_port_dequeue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
+ dev_id, dev_conf->nb_event_port_dequeue_depth,
+ info.max_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_enqueue_depth is in limit */
+ if (!dev_conf->nb_event_port_enqueue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
+ dev_id, dev_conf->nb_event_port_enqueue_depth,
+ info.max_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ /* Copy the dev_conf parameter into the dev structure */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+ /* Setup new number of queues and reconfigure device. */
+ diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Setup new number of ports and reconfigure device. */
+ diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+ if (diag != 0) {
+ rte_event_dev_queue_config(dev, 0);
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Configure the device */
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+ rte_event_dev_queue_config(dev, 0);
+ rte_event_dev_port_config(dev, 0);
+ }
+
+ dev->data->event_dev_cap = info.event_dev_cap;
+ return diag;
+}
+
+static inline int
+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ if (queue_id < dev->data->nb_queues && queue_id <
+ RTE_EVENT_MAX_QUEUES_PER_DEV)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (queue_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
+ memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
+ return 0;
+}
+
+static inline int
+is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf && (
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int
+is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf && (
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK)
+ == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_queue_conf def_conf;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ /* Check nb_atomic_flows limit */
+ if (is_valid_atomic_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_flows == 0 ||
+ queue_conf->nb_atomic_flows >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_flows,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_atomic_order_sequences limit */
+ if (is_valid_ordered_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_order_sequences == 0 ||
+ queue_conf->nb_atomic_order_sequences >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow queue setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
+
+ if (queue_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
+ queue_conf = &def_conf;
+ }
+
+ dev->data->queues_prio[queue_id] = queue_conf->priority;
+ return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
+}
+
+uint8_t
+rte_event_queue_count(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->nb_queues;
+}
+
+uint8_t
+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ return dev->data->queues_prio[queue_id];
+ else
+ return RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static inline int
+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
+{
+ if (port_id < dev->data->nb_ports)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (port_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
+ memset(port_conf, 0, sizeof(struct rte_event_port_conf));
+ (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
+ return 0;
+}
+
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_port_conf def_conf;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ /* Check new_event_threshold limit */
+ if ((port_conf && !port_conf->new_event_threshold) ||
+ (port_conf && port_conf->new_event_threshold >
+ dev->data->dev_conf.nb_events_limit)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
+ dev_id, port_id, port_conf->new_event_threshold,
+ dev->data->dev_conf.nb_events_limit);
+ return -EINVAL;
+ }
+
+ /* Check dequeue_depth limit */
+ if ((port_conf && !port_conf->dequeue_depth) ||
+ (port_conf && port_conf->dequeue_depth >
+ dev->data->dev_conf.nb_event_port_dequeue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
+ dev_id, port_id, port_conf->dequeue_depth,
+ dev->data->dev_conf.nb_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check enqueue_depth limit */
+ if ((port_conf && !port_conf->enqueue_depth) ||
+ (port_conf && port_conf->enqueue_depth >
+ dev->data->dev_conf.nb_event_port_enqueue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
+ dev_id, port_id, port_conf->enqueue_depth,
+ dev->data->dev_conf.nb_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow port setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
+
+ if (port_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
+ port_conf = &def_conf;
+ }
+
+ dev->data->ports_dequeue_depth[port_id] =
+ port_conf->dequeue_depth;
+ dev->data->ports_enqueue_depth[port_id] =
+ port_conf->enqueue_depth;
+
+ diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
+
+ /* Unlink all the queues from this port(default state after setup) */
+ if (!diag)
+ diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
+
+ if (diag < 0)
+ return diag;
+
+ return 0;
+}
+
+uint8_t
+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->ports_dequeue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->ports_enqueue_depth[port_id];
+}
+
+uint8_t
+rte_event_port_count(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ dev = &rte_eventdevs[dev_id];
+ return dev->data->nb_ports;
+}
+
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct rte_eventdev *dev;
+ uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t *links_map;
+ int i, diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ if (queues == NULL) {
+ for (i = 0; i < dev->data->nb_queues; i++)
+ queues_list[i] = i;
+
+ queues = queues_list;
+ nb_links = dev->data->nb_queues;
+ }
+
+ if (priorities == NULL) {
+ for (i = 0; i < nb_links; i++)
+ priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ priorities = priorities_list;
+ }
+
+ for (i = 0; i < nb_links; i++)
+ if (queues[i] >= dev->data->nb_queues)
+ return -EINVAL;
+
+ diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
+ queues, priorities, nb_links);
+ if (diag < 0)
+ return diag;
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = (uint8_t)priorities[i];
+
+ return diag;
+}
+
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct rte_eventdev *dev;
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int i, diag;
+ uint16_t *links_map;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ if (queues == NULL) {
+ for (i = 0; i < dev->data->nb_queues; i++)
+ all_queues[i] = i;
+ queues = all_queues;
+ nb_unlinks = dev->data->nb_queues;
+ }
+
+ for (i = 0; i < nb_unlinks; i++)
+ if (queues[i] >= dev->data->nb_queues)
+ return -EINVAL;
+
+ diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
+ queues, nb_unlinks);
+
+ if (diag < 0)
+ return diag;
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+
+ return diag;
+}
+
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[])
+{
+ struct rte_eventdev *dev;
+ uint16_t *links_map;
+ int i, count = 0;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < dev->data->nb_queues; i++) {
+ if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+ queues[count] = i;
+ priorities[count] = (uint8_t)links_map[i];
+ ++count;
+ }
+ }
+ return count;
+}
+
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
+
+ if (timeout_ticks == NULL)
+ return -EINVAL;
+
+ return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
+}
+
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
+
+ (*dev->dev_ops->dump)(dev, f);
+ return 0;
+
+}
+
+static int
+xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id,
+ NULL, NULL, 0);
+ return 0;
+}
+
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const int cnt_expected_entries = xstats_get_count(dev_id, mode,
+ queue_port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* dev_id checked above */
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id, xstats_names, ids, size);
+
+ return -ENOTSUP;
+}
+
+/* retrieve eventdev extended statistics */
+int
+rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL)
+ return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
+ ids, values, n);
+ return -ENOTSUP;
+}
+
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ unsigned int temp = -1;
+
+ if (id != NULL)
+ *id = (unsigned int)-1;
+ else
+ id = &temp; /* ensure driver never gets a NULL value */
+
+ /* implemented by driver */
+ if (dev->dev_ops->xstats_get_by_name != NULL)
+ return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
+ return -ENOTSUP;
+}
+
+int rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_reset != NULL)
+ return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
+ ids, nb_ids);
+ return -ENOTSUP;
+}
+
+int
+rte_event_dev_start(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+ int diag;
+
+ RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
+ dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+void
+rte_event_dev_stop(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
+ dev_id);
+ return;
+ }
+
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_event_dev_close(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ return (*dev->dev_ops->dev_close)(dev);
+}
+
+static inline int
+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* Generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_eventdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_eventdev_data));
+
+ return 0;
+}
+
+static inline uint8_t
+rte_eventdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
+ if (rte_eventdevs[dev_id].attached ==
+ RTE_EVENTDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_EVENT_MAX_DEVS;
+}
+
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+ uint8_t dev_id;
+
+ if (rte_event_pmd_get_named_dev(name) != NULL) {
+ RTE_EDEV_LOG_ERR("Event device with name %s already "
+ "allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_eventdev_find_free_device_index();
+ if (dev_id == RTE_EVENT_MAX_DEVS) {
+ RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
+ return NULL;
+ }
+
+ eventdev = &rte_eventdevs[dev_id];
+
+ if (eventdev->data == NULL) {
+ struct rte_eventdev_data *eventdev_data = NULL;
+
+ int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
+ socket_id);
+
+ if (retval < 0 || eventdev_data == NULL)
+ return NULL;
+
+ eventdev->data = eventdev_data;
+
+ snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ eventdev->data->dev_id = dev_id;
+ eventdev->data->socket_id = socket_id;
+ eventdev->data->dev_started = 0;
+
+ eventdev->attached = RTE_EVENTDEV_ATTACHED;
+
+ eventdev_globals.nb_devs++;
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev)
+{
+ int ret;
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+
+ if (eventdev == NULL)
+ return -EINVAL;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_free(eventdev->data->dev_private);
+
+ /* Generate memzone name */
+ ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
+ eventdev->data->dev_id);
+ if (ret >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ return ret;
+ }
+
+ eventdev->data = NULL;
+ return 0;
+}
+
+struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id)
+{
+ struct rte_eventdev *eventdev;
+
+ /* Allocate device structure */
+ eventdev = rte_event_pmd_allocate(name, socket_id);
+ if (eventdev == NULL)
+ return NULL;
+
+ /* Allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket("eventdev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_vdev_uninit(const char *name)
+{
+ struct rte_eventdev *eventdev;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ /* Free the event device */
+ rte_event_pmd_release(eventdev);
+
+ return 0;
+}
+
+int
+rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eventdev_driver *eventdrv;
+ struct rte_eventdev *eventdev;
+
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+
+ int retval;
+
+ eventdrv = (struct rte_eventdev_driver *)pci_drv;
+ if (eventdrv == NULL)
+ return -ENODEV;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_allocate(eventdev_name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ eventdrv->dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+ eventdev->driver = eventdrv;
+
+ /* Invoke PMD device initialization function */
+ retval = (*eventdrv->eventdev_init)(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eventdev->data->dev_private);
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ return -ENXIO;
+}
+
+int
+rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev)
+{
+ const struct rte_eventdev_driver *eventdrv;
+ struct rte_eventdev *eventdev;
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_get_named_dev(eventdev_name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver;
+ if (eventdrv == NULL)
+ return -ENODEV;
+
+ /* Invoke PMD device un-init function */
+ if (*eventdrv->eventdev_uninit) {
+ ret = (*eventdrv->eventdev_uninit)(eventdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Free event device */
+ rte_event_pmd_release(eventdev);
+
+ eventdev->dev = NULL;
+ eventdev->driver = NULL;
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
new file mode 100644
index 00000000..20e7293e
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -0,0 +1,1588 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright 2016 Cavium.
+ * Copyright 2016 Intel Corporation.
+ * Copyright 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_H_
+#define _RTE_EVENTDEV_H_
+
+/**
+ * @file
+ *
+ * RTE Event Device API
+ *
+ * In a polling model, lcores poll ethdev ports and associated rx queues
+ * directly to look for packet. In an event driven model, by contrast, lcores
+ * call the scheduler that selects packets for them based on programmer
+ * specified criteria. Eventdev library adds support for event driven
+ * programming model, which offer applications automatic multicore scaling,
+ * dynamic load balancing, pipelining, packet ingress order maintenance and
+ * synchronization services to simplify application packet processing.
+ *
+ * The Event Device API is composed of two parts:
+ *
+ * - The application-oriented Event API that includes functions to setup
+ * an event device (configure it, setup its queues, ports and start it), to
+ * establish the link between queues to port and to receive events, and so on.
+ *
+ * - The driver-oriented Event API that exports a function allowing
+ * an event poll Mode Driver (PMD) to simultaneously register itself as
+ * an event device driver.
+ *
+ * Event device components:
+ *
+ * +-----------------+
+ * | +-------------+ |
+ * +-------+ | | flow 0 | |
+ * |Packet | | +-------------+ |
+ * |event | | +-------------+ |
+ * | | | | flow 1 | |port_link(port0, queue0)
+ * +-------+ | +-------------+ | | +--------+
+ * +-------+ | +-------------+ o-----v-----o |dequeue +------+
+ * |Crypto | | | flow n | | | event +------->|Core 0|
+ * |work | | +-------------+ o----+ | port 0 | | |
+ * |done ev| | event queue 0 | | +--------+ +------+
+ * +-------+ +-----------------+ |
+ * +-------+ |
+ * |Timer | +-----------------+ | +--------+
+ * |expiry | | +-------------+ | +------o |dequeue +------+
+ * |event | | | flow 0 | o-----------o event +------->|Core 1|
+ * +-------+ | +-------------+ | +----o port 1 | | |
+ * Event enqueue | +-------------+ | | +--------+ +------+
+ * o-------------> | | flow 1 | | |
+ * enqueue( | +-------------+ | |
+ * queue_id, | | | +--------+ +------+
+ * flow_id, | +-------------+ | | | |dequeue |Core 2|
+ * sched_type, | | flow n | o-----------o event +------->| |
+ * event_type, | +-------------+ | | | port 2 | +------+
+ * subev_type, | event queue 1 | | +--------+
+ * event) +-----------------+ | +--------+
+ * | | |dequeue +------+
+ * +-------+ +-----------------+ | | event +------->|Core n|
+ * |Core | | +-------------+ o-----------o port n | | |
+ * |(SW) | | | flow 0 | | | +--------+ +--+---+
+ * |event | | +-------------+ | | |
+ * +-------+ | +-------------+ | | |
+ * ^ | | flow 1 | | | |
+ * | | +-------------+ o------+ |
+ * | | +-------------+ | |
+ * | | | flow n | | |
+ * | | +-------------+ | |
+ * | | event queue n | |
+ * | +-----------------+ |
+ * | |
+ * +-----------------------------------------------------------+
+ *
+ * Event device: A hardware or software-based event scheduler.
+ *
+ * Event: A unit of scheduling that encapsulates a packet or other datatype
+ * like SW generated event from the CPU, Crypto work completion notification,
+ * Timer expiry event notification etc as well as metadata.
+ * The metadata includes flow ID, scheduling type, event priority, event_type,
+ * sub_event_type etc.
+ *
+ * Event queue: A queue containing events that are scheduled by the event dev.
+ * An event queue contains events of different flows associated with scheduling
+ * types, such as atomic, ordered, or parallel.
+ *
+ * Event port: An application's interface into the event dev for enqueue and
+ * dequeue operations. Each event port can be linked with one or more
+ * event queues for dequeue operations.
+ *
+ * By default, all the functions of the Event Device API exported by a PMD
+ * are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object. For instance,
+ * the dequeue function of a PMD cannot be invoked in parallel on two logical
+ * cores to operates on same event port. Of course, this function
+ * can be invoked in parallel by different logical cores on different ports.
+ * It is the responsibility of the upper level application to enforce this rule.
+ *
+ * In all functions of the Event API, the Event device is
+ * designated by an integer >= 0 named the device identifier *dev_id*
+ *
+ * At the Event driver level, Event devices are represented by a generic
+ * data structure of type *rte_event_dev*.
+ *
+ * Event devices are dynamically registered during the PCI/SoC device probing
+ * phase performed at EAL initialization time.
+ * When an Event device is being probed, a *rte_event_dev* structure and
+ * a new device identifier are allocated for that device. Then, the
+ * event_dev_init() function supplied by the Event driver matching the probed
+ * device is invoked to properly initialize the device.
+ *
+ * The role of the device init function consists of resetting the hardware or
+ * software event driver implementations.
+ *
+ * If the device init operation is successful, the correspondence between
+ * the device identifier assigned to the new device and its associated
+ * *rte_event_dev* structure is effectively registered.
+ * Otherwise, both the *rte_event_dev* structure and the device identifier are
+ * freed.
+ *
+ * The functions exported by the application Event API to setup a device
+ * designated by its device identifier must be invoked in the following order:
+ * - rte_event_dev_configure()
+ * - rte_event_queue_setup()
+ * - rte_event_port_setup()
+ * - rte_event_port_link()
+ * - rte_event_dev_start()
+ *
+ * Then, the application can invoke, in any order, the functions
+ * exported by the Event API to schedule events, dequeue events, enqueue events,
+ * change event queue(s) to event port [un]link establishment and so on.
+ *
+ * Application may use rte_event_[queue/port]_default_conf_get() to get the
+ * default configuration to set up an event queue or event port by
+ * overriding few default values.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_event_dev_configure(), rte_event_queue_setup(), or
+ * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_event_dev_start()
+ * again. The schedule, enqueue and dequeue functions should not be invoked
+ * when the device is stopped.
+ *
+ * Finally, an application can close an Event device by invoking the
+ * rte_event_dev_close() function.
+ *
+ * Each function of the application Event API invokes a specific function
+ * of the PMD that controls the target device designated by its device
+ * identifier.
+ *
+ * For this purpose, all device-specific functions of an Event driver are
+ * supplied through a set of pointers contained in a generic structure of type
+ * *event_dev_ops*.
+ * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
+ * structure by the device init function of the Event driver, which is
+ * invoked during the PCI/SoC device probing phase, as explained earlier.
+ *
+ * In other words, each function of the Event API simply retrieves the
+ * *rte_event_dev* structure associated with the device identifier and
+ * performs an indirect invocation of the corresponding driver function
+ * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
+ *
+ * For performance reasons, the address of the fast-path functions of the
+ * Event driver is not contained in the *event_dev_ops* structure.
+ * Instead, they are directly stored at the beginning of the *rte_event_dev*
+ * structure to avoid an extra indirect memory access during their invocation.
+ *
+ * RTE event device drivers do not use interrupts for enqueue or dequeue
+ * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
+ * functions to applications.
+ *
+ * An event driven based application has following typical workflow on fastpath:
+ * \code{.c}
+ * while (1) {
+ *
+ * rte_event_schedule(dev_id);
+ *
+ * rte_event_dequeue(...);
+ *
+ * (event processing)
+ *
+ * rte_event_enqueue(...);
+ * }
+ * \endcode
+ *
+ * The events are injected to event device through *enqueue* operation by
+ * event producers in the system. The typical event producers are ethdev
+ * subsystem for generating packet events, CPU(SW) for generating events based
+ * on different stages of application processing, cryptodev for generating
+ * crypto work completion notification etc
+ *
+ * The *dequeue* operation gets one or more events from the event ports.
+ * The application process the events and send to downstream event queue through
+ * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
+ * on the final stage, the application may send to different subsystem like
+ * ethdev to send the packet/event on the wire using ethdev
+ * rte_eth_tx_burst() API.
+ *
+ * The point at which events are scheduled to ports depends on the device.
+ * For hardware devices, scheduling occurs asynchronously without any software
+ * intervention. Software schedulers can either be distributed
+ * (each worker thread schedules events to its own port) or centralized
+ * (a dedicated thread schedules to all ports). Distributed software schedulers
+ * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
+ * scheduler logic is located in rte_event_schedule().
+ * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
+ * indicates the device is centralized and thus needs a dedicated scheduling
+ * thread that repeatedly calls rte_event_schedule().
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+
+struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
+
+/* Event device capability bitmap flags */
+#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event queue.
+ *
+ * @see rte_event_queue_setup()
+ */
+#define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event. Priority of each event is supplied in *rte_event* structure
+ * on each enqueue operation.
+ *
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
+/**< Event device operates in distributed scheduling mode.
+ * In distributed scheduling mode, event scheduling happens in HW or
+ * rte_event_dequeue_burst() or the combination of these two.
+ * If the flag is not set then eventdev is centralized and thus needs a
+ * dedicated scheduling thread that repeatedly calls rte_event_schedule().
+ *
+ * @see rte_event_schedule(), rte_event_dequeue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
+/**< Event device is capable of enqueuing events of any type to any queue.
+ * If this capability is not set, the queue only supports events of the
+ * *RTE_EVENT_QUEUE_CFG_* type that it was created with.
+ *
+ * @see RTE_EVENT_QUEUE_CFG_* values
+ */
+
+/* Event device priority levels */
+#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
+/**< Highest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_NORMAL 128
+/**< Normal priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_LOWEST 255
+/**< Lowest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+
+/**
+ * Get the total number of event devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * The total number of usable event devices.
+ */
+uint8_t
+rte_event_dev_count(void);
+
+/**
+ * Get the device identifier for the named event device.
+ *
+ * @param name
+ * Event device name to select the event device identifier.
+ *
+ * @return
+ * Returns event device identifier on success.
+ * - <0: Failure to find named event device.
+ */
+int
+rte_event_dev_get_dev_id(const char *name);
+
+/**
+ * Return the NUMA socket to which a device is connected.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -(-EINVAL) dev_id value is out of range.
+ */
+int
+rte_event_dev_socket_id(uint8_t dev_id);
+
+/**
+ * Event device information
+ */
+struct rte_event_dev_info {
+ const char *driver_name; /**< Event driver name */
+ struct rte_device *dev; /**< Device information */
+ uint32_t min_dequeue_timeout_ns;
+ /**< Minimum supported global dequeue timeout(ns) by this device */
+ uint32_t max_dequeue_timeout_ns;
+ /**< Maximum supported global dequeue timeout(ns) by this device */
+ uint32_t dequeue_timeout_ns;
+ /**< Configured global dequeue timeout(ns) for this device */
+ uint8_t max_event_queues;
+ /**< Maximum event_queues supported by this device */
+ uint32_t max_event_queue_flows;
+ /**< Maximum supported flows in an event queue by this device*/
+ uint8_t max_event_queue_priority_levels;
+ /**< Maximum number of event queue priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+ uint8_t max_event_priority_levels;
+ /**< Maximum number of event priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
+ */
+ uint8_t max_event_ports;
+ /**< Maximum number of event ports supported by this device */
+ uint8_t max_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * A device that does not support bulk dequeue will set this as 1.
+ */
+ uint32_t max_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * A device that does not support bulk enqueue will set this as 1.
+ */
+ int32_t max_num_events;
+ /**< A *closed system* event dev has a limit on the number of events it
+ * can manage at a time. An *open system* event dev does not have a
+ * limit and will specify this as -1.
+ */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+};
+
+/**
+ * Retrieve the contextual information of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] dev_info
+ * A pointer to a structure of type *rte_event_dev_info* to be filled with the
+ * contextual information of the device.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the event device
+ * - <0: Error code returned by the driver info get function.
+ *
+ */
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
+
+/* Event device configuration bitmap flags */
+#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
+/**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
+ * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
+ */
+
+/** Event device configuration structure */
+struct rte_event_dev_config {
+ uint32_t dequeue_timeout_ns;
+ /**< rte_event_dequeue_burst() timeout on this device.
+ * This value should be in the range of *min_dequeue_timeout_ns* and
+ * *max_dequeue_timeout_ns* which previously provided in
+ * rte_event_dev_info_get()
+ * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ */
+ int32_t nb_events_limit;
+ /**< In a *closed system* this field is the limit on maximum number of
+ * events that can be inflight in the eventdev at a given time. The
+ * limit is required to ensure that the finite space in a closed system
+ * is not overwhelmed. The value cannot exceed the *max_num_events*
+ * as provided by rte_event_dev_info_get().
+ * This value should be set to -1 for *open system*.
+ */
+ uint8_t nb_event_queues;
+ /**< Number of event queues to configure on this device.
+ * This value cannot exceed the *max_event_queues* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint8_t nb_event_ports;
+ /**< Number of event ports to configure on this device.
+ * This value cannot exceed the *max_event_ports* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_queue_flows;
+ /**< Number of flows for any event queue on this device.
+ * This value cannot exceed the *max_event_queue_flows* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_dequeue_depth*
+ * which previously provided in rte_event_dev_info_get()
+ * @see rte_event_port_setup()
+ */
+ uint32_t nb_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_enqueue_depth*
+ * which previously provided in rte_event_dev_info_get()
+ * @see rte_event_port_setup()
+ */
+ uint32_t event_dev_cfg;
+ /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+};
+
+/**
+ * Configure an event device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * The caller may use rte_event_dev_info_get() to get the capability of each
+ * resources available for this event device.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ * @param dev_conf
+ * The event device configuration structure.
+ *
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf);
+
+
+/* Event queue specific APIs */
+
+/* Event queue configuration bitmap flags */
+#define RTE_EVENT_QUEUE_CFG_TYPE_MASK (3ULL << 0)
+/**< Mask for event queue schedule type configuration request */
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (0ULL << 0)
+/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
+ *
+ * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY (1ULL << 0)
+/**< Allow only ATOMIC schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with ATOMIC only and sched_type != RTE_SCHED_TYPE_ATOMIC
+ *
+ * @see RTE_SCHED_TYPE_ATOMIC, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_ORDERED_ONLY (2ULL << 0)
+/**< Allow only ORDERED schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with ORDERED only and sched_type != RTE_SCHED_TYPE_ORDERED
+ *
+ * @see RTE_SCHED_TYPE_ORDERED, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY (3ULL << 0)
+/**< Allow only PARALLEL schedule type enqueue
+ *
+ * The rte_event_enqueue_burst() result is undefined if the queue configured
+ * with PARALLEL only and sched_type != RTE_SCHED_TYPE_PARALLEL
+ *
+ * @see RTE_SCHED_TYPE_PARALLEL, rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 2)
+/**< This event queue links only to a single event port.
+ *
+ * @see rte_event_port_setup(), rte_event_port_link()
+ */
+
+/** Event queue configuration structure */
+struct rte_event_queue_conf {
+ uint32_t nb_atomic_flows;
+ /**< The maximum number of active flows this queue can track at any
+ * given time. The value must be in the range of
+ * [1 - nb_event_queue_flows)] which previously provided in
+ * rte_event_dev_info_get().
+ */
+ uint32_t nb_atomic_order_sequences;
+ /**< The maximum number of outstanding events waiting to be
+ * reordered by this queue. In other words, the number of entries in
+ * this queue’s reorder buffer.When the number of events in the
+ * reorder buffer reaches to *nb_atomic_order_sequences* then the
+ * scheduler cannot schedule the events from this queue and invalid
+ * event will be returned from dequeue until one or more entries are
+ * freed up/released.
+ * The value must be in the range of [1 - nb_event_queue_flows)]
+ * which previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg; /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t priority;
+ /**< Priority for this event queue relative to other event queues.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * event device supported priority value.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event queue designated
+ * by its *queue_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_queue_setup()
+ * where caller needs to set up the queue by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to get the configuration information.
+ * The value must be in the range [0, nb_event_queues - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] queue_conf
+ * The pointer to the default event queue configuration data.
+ * @return
+ * - 0: Success, driver updates the default event queue configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_queue_setup()
+ *
+ */
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Allocate and set up an event queue for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to setup. The value must be in the range
+ * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
+ * @param queue_conf
+ * The pointer to the configuration data to be used for the event queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_queue_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event queue correctly set up.
+ * - <0: event queue configuration failed
+ */
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Get the number of event queues on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @return
+ * - The number of configured event queues
+ */
+uint8_t
+rte_event_queue_count(uint8_t dev_id);
+
+/**
+ * Get the priority of the event queue on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param queue_id
+ * Event queue identifier.
+ * @return
+ * - If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then the
+ * configured priority of the event queue in
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST] range
+ * else the value RTE_EVENT_DEV_PRIORITY_NORMAL
+ */
+uint8_t
+rte_event_queue_priority(uint8_t dev_id, uint8_t queue_id);
+
+/* Event port specific APIs */
+
+/** Event port configuration structure */
+struct rte_event_port_conf {
+ int32_t new_event_threshold;
+ /**< A backpressure threshold for new event enqueues on this port.
+ * Use for *closed system* event dev where event capacity is limited,
+ * and cannot exceed the capacity of the event dev.
+ * Configuring ports with different thresholds can make higher priority
+ * traffic less likely to be backpressured.
+ * For example, a port used to inject NIC Rx packets into the event dev
+ * can have a lower threshold so as not to overwhelm the device,
+ * while ports used for worker pools can have a higher threshold.
+ * This value cannot exceed the *nb_events_limit*
+ * which was previously supplied to rte_event_dev_configure().
+ * This should be set to '-1' for *open system*.
+ */
+ uint16_t dequeue_depth;
+ /**< Configure number of bulk dequeues for this event port.
+ * This value cannot exceed the *nb_event_port_dequeue_depth*
+ * which previously supplied to rte_event_dev_configure()
+ */
+ uint16_t enqueue_depth;
+ /**< Configure number of bulk enqueues for this event port.
+ * This value cannot exceed the *nb_event_port_enqueue_depth*
+ * which previously supplied to rte_event_dev_configure()
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event port designated
+ * by its *port_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_port_setup()
+ * where caller needs to set up the port by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to get the configuration information.
+ * The value must be in the range [0, nb_event_ports - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] port_conf
+ * The pointer to the default event port configuration data
+ * @return
+ * - 0: Success, driver updates the default event port configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_port_setup()
+ *
+ */
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf);
+
+/**
+ * Allocate and set up an event port for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to setup. The value must be in the range
+ * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
+ * @param port_conf
+ * The pointer to the configuration data to be used for the queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_port_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event port correctly set up.
+ * - <0: Port configuration failed
+ * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
+ * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ */
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * Get the number of dequeue queue depth configured for event port designated
+ * by its *port_id* on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param port_id
+ * Event port identifier.
+ * @return
+ * - The number of configured dequeue queue depth
+ *
+ * @see rte_event_dequeue_burst()
+ */
+uint8_t
+rte_event_port_dequeue_depth(uint8_t dev_id, uint8_t port_id);
+
+/**
+ * Get the number of enqueue queue depth configured for event port designated
+ * by its *port_id* on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @param port_id
+ * Event port identifier.
+ * @return
+ * - The number of configured enqueue queue depth
+ *
+ * @see rte_event_enqueue_burst()
+ */
+uint8_t
+rte_event_port_enqueue_depth(uint8_t dev_id, uint8_t port_id);
+
+/**
+ * Get the number of ports on a specific event device
+ *
+ * @param dev_id
+ * Event device identifier.
+ * @return
+ * - The number of configured ports
+ */
+uint8_t
+rte_event_port_count(uint8_t dev_id);
+
+/**
+ * Start an event device.
+ *
+ * The device start step is the last one and consists of setting the event
+ * queues to start accepting the events and schedules to event ports.
+ *
+ * On success, all basic functions exported by the API (event enqueue,
+ * event dequeue and so on) can be invoked.
+ *
+ * @param dev_id
+ * Event device identifier
+ * @return
+ * - 0: Success, device started.
+ * - -ESTALE : Not all ports of the device are configured
+ * - -ENOLINK: Not all queues are linked, which could lead to deadlock.
+ */
+int
+rte_event_dev_start(uint8_t dev_id);
+
+/**
+ * Stop an event device. The device can be restarted with a call to
+ * rte_event_dev_start()
+ *
+ * @param dev_id
+ * Event device identifier.
+ */
+void
+rte_event_dev_stop(uint8_t dev_id);
+
+/**
+ * Close an event device. The device cannot be restarted!
+ *
+ * @param dev_id
+ * Event device identifier
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ * - (-EAGAIN) if device is busy
+ */
+int
+rte_event_dev_close(uint8_t dev_id);
+
+/* Scheduler type definitions */
+#define RTE_SCHED_TYPE_ORDERED 0
+/**< Ordered scheduling
+ *
+ * Events from an ordered flow of an event queue can be scheduled to multiple
+ * ports for concurrent processing while maintaining the original event order.
+ * This scheme enables the user to achieve high single flow throughput by
+ * avoiding SW synchronization for ordering between ports which bound to cores.
+ *
+ * The source flow ordering from an event queue is maintained when events are
+ * enqueued to their destination queue within the same ordered flow context.
+ * An event port holds the context until application call
+ * rte_event_dequeue_burst() from the same port, which implicitly releases
+ * the context.
+ * User may allow the scheduler to release the context earlier than that
+ * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
+ *
+ * Events from the source queue appear in their original order when dequeued
+ * from a destination queue.
+ * Event ordering is based on the received event(s), but also other
+ * (newly allocated or stored) events are ordered when enqueued within the same
+ * ordered context. Events not enqueued (e.g. released or stored) within the
+ * context are considered missing from reordering and are skipped at this time
+ * (but can be ordered again within another context).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_ATOMIC 1
+/**< Atomic scheduling
+ *
+ * Events from an atomic flow of an event queue can be scheduled only to a
+ * single port at a time. The port is guaranteed to have exclusive (atomic)
+ * access to the associated flow context, which enables the user to avoid SW
+ * synchronization. Atomic flows also help to maintain event ordering
+ * since only one port at a time can process events from a flow of an
+ * event queue.
+ *
+ * The atomic queue synchronization context is dedicated to the port until
+ * application call rte_event_dequeue_burst() from the same port,
+ * which implicitly releases the context. User may allow the scheduler to
+ * release the context earlier than that by invoking rte_event_enqueue_burst()
+ * with RTE_EVENT_OP_RELEASE operation.
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_PARALLEL 2
+/**< Parallel scheduling
+ *
+ * The scheduler performs priority scheduling, load balancing, etc. functions
+ * but does not provide additional event synchronization or ordering.
+ * It is free to schedule events from a single parallel flow of an event queue
+ * to multiple events ports for concurrent processing.
+ * The application is responsible for flow context synchronization and
+ * event ordering (SW synchronization).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst()
+ */
+
+/* Event types to classify the event source */
+#define RTE_EVENT_TYPE_ETHDEV 0x0
+/**< The event generated from ethdev subsystem */
+#define RTE_EVENT_TYPE_CRYPTODEV 0x1
+/**< The event generated from crypodev subsystem */
+#define RTE_EVENT_TYPE_TIMERDEV 0x2
+/**< The event generated from timerdev subsystem */
+#define RTE_EVENT_TYPE_CPU 0x3
+/**< The event generated from cpu for pipelining.
+ * Application may use *sub_event_type* to further classify the event
+ */
+#define RTE_EVENT_TYPE_MAX 0x10
+/**< Maximum number of event types */
+
+/* Event enqueue operations */
+#define RTE_EVENT_OP_NEW 0
+/**< The event producers use this operation to inject a new event to the
+ * event device.
+ */
+#define RTE_EVENT_OP_FORWARD 1
+/**< The CPU use this operation to forward the event to different event queue or
+ * change to new application specific flow or schedule type to enable
+ * pipelining
+ */
+#define RTE_EVENT_OP_RELEASE 2
+/**< Release the flow context associated with the schedule type.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
+ * then this function hints the scheduler that the user has completed critical
+ * section processing in the current atomic context.
+ * The scheduler is now allowed to schedule events from the same flow from
+ * an event queue to another port. However, the context may be still held
+ * until the next rte_event_dequeue_burst() call, this call allows but does not
+ * force the scheduler to release the context early.
+ *
+ * Early atomic context release may increase parallelism and thus system
+ * performance, but the user needs to design carefully the split into critical
+ * vs non-critical sections.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
+ * then this function hints the scheduler that the user has done all that need
+ * to maintain event order in the current ordered context.
+ * The scheduler is allowed to release the ordered context of this port and
+ * avoid reordering any following enqueues.
+ *
+ * Early ordered context release may increase parallelism and thus system
+ * performance.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
+ * or no scheduling context is held then this function may be an NOOP,
+ * depending on the implementation.
+ *
+ */
+
+/**
+ * The generic *rte_event* structure to hold the event attributes
+ * for dequeue and enqueue operation
+ */
+RTE_STD_C11
+struct rte_event {
+ /** WORD0 */
+ union {
+ uint64_t event;
+ /** Event attributes for dequeue or enqueue operation */
+ struct {
+ uint32_t flow_id:20;
+ /**< Targeted flow identifier for the enqueue and
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queue_flows - 1] which
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t sub_event_type:8;
+ /**< Sub-event types based on the event source.
+ * @see RTE_EVENT_TYPE_CPU
+ */
+ uint32_t event_type:4;
+ /**< Event type to classify the event source.
+ * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
+ */
+ uint8_t op:2;
+ /**< The type of event enqueue operation - new/forward/
+ * etc.This field is not preserved across an instance
+ * and is undefined on dequeue.
+ * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
+ */
+ uint8_t rsvd:4;
+ /**< Reserved for future use */
+ uint8_t sched_type:2;
+ /**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
+ * associated with flow id on a given event queue
+ * for the enqueue and dequeue operation.
+ */
+ uint8_t queue_id;
+ /**< Targeted event queue identifier for the enqueue or
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queues - 1] which previously supplied to
+ * rte_event_dev_configure().
+ */
+ uint8_t priority;
+ /**< Event priority relative to other events in the
+ * event queue. The requested priority should in the
+ * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ * RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested
+ * priority to supported priority value.
+ * Valid when the device has
+ * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
+ */
+ uint8_t impl_opaque;
+ /**< Implementation specific opaque value.
+ * An implementation may use this field to hold
+ * implementation specific value to share between
+ * dequeue and enqueue operation.
+ * The application should not modify this field.
+ */
+ };
+ };
+ /** WORD1 */
+ union {
+ uint64_t u64;
+ /**< Opaque 64-bit value */
+ void *event_ptr;
+ /**< Opaque event pointer */
+ struct rte_mbuf *mbuf;
+ /**< mbuf pointer if dequeued event is associated with mbuf */
+ };
+};
+
+
+struct rte_eventdev_driver;
+struct rte_eventdev_ops;
+struct rte_eventdev;
+
+typedef void (*event_schedule_t)(struct rte_eventdev *dev);
+/**< @internal Schedule one or more events in the event dev. */
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+ int socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t nb_queues;
+ /**< Number of event queues. */
+ uint8_t nb_ports;
+ /**< Number of event ports. */
+ void **ports;
+ /**< Array of pointers to ports. */
+ uint8_t *ports_dequeue_depth;
+ /**< Array of port dequeue depth. */
+ uint8_t *ports_enqueue_depth;
+ /**< Array of port enqueue depth. */
+ uint8_t *queues_prio;
+ /**< Array of queue priority. */
+ uint16_t *links_map;
+ /**< Memory to store queues to port connections. */
+ void *dev_private;
+ /**< PMD-specific private data */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ struct rte_event_dev_config dev_conf;
+ /**< Configuration applied to device. */
+
+ RTE_STD_C11
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ char name[RTE_EVENTDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+ event_schedule_t schedule;
+ /**< Pointer to PMD schedule function. */
+ event_enqueue_t enqueue;
+ /**< Pointer to PMD enqueue function. */
+ event_enqueue_burst_t enqueue_burst;
+ /**< Pointer to PMD enqueue burst function. */
+ event_dequeue_t dequeue;
+ /**< Pointer to PMD dequeue function. */
+ event_dequeue_burst_t dequeue_burst;
+ /**< Pointer to PMD dequeue burst function. */
+
+ struct rte_eventdev_data *data;
+ /**< Pointer to device data */
+ const struct rte_eventdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ struct rte_device *dev;
+ /**< Device info. supplied by probing */
+ const struct rte_eventdev_driver *driver;
+ /**< Driver for this device */
+
+ RTE_STD_C11
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+
+/**
+ * Schedule one or more events in the event dev.
+ *
+ * An event dev implementation may define this is a NOOP, for instance if
+ * the event dev performs its scheduling in hardware.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ */
+static inline void
+rte_event_schedule(uint8_t dev_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (*dev->schedule)
+ (*dev->schedule)(dev);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth()
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(
+ dev->data->ports[port_id], ev);
+ else
+ return (*dev->enqueue_burst)(
+ dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
+ *
+ * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
+ * then application can use this function to convert timeout value in
+ * nanoseconds to implementations specific timeout value supplied in
+ * rte_event_dequeue_burst()
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
+ *
+ * @return
+ * - 0 on success.
+ * - -ENOTSUP if the device doesn't support timeouts
+ * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
+ * - other values < 0 on failure.
+ *
+ * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * @see rte_event_dev_configure()
+ *
+ */
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks);
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ * a) The list of event queues are linked to the event port.
+ * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ * queue selection from list is based on event queue priority relative to
+ * other event queue supplied as *priority* in rte_event_queue_setup()
+ * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ * queue selection from the list is based on event priority supplied as
+ * *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ * a) The number of flows available in selected event queue.
+ * b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation, or invoking rte_event_enqueue_burst()
+ * with RTE_EVENT_OP_RELEASE operation can be used to release the
+ * contexts early.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param[out] ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * for output to be populated with the dequeued event objects.
+ * @param nb_events
+ * The maximum number of event objects to dequeue, typically number of
+ * rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ * - 0 no-wait, returns immediately if there is no event.
+ * - >0 wait for the event, if the device is configured with
+ * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ * at least one event is available or *timeout_ticks* time.
+ * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * then this function will wait until the event available or
+ * *dequeue_timeout_ns* ns which was previously supplied to
+ * rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->dequeue)(
+ dev->data->ports[port_id], ev, timeout_ticks);
+ else
+ return (*dev->dequeue_burst)(
+ dev->data->ports[port_id], ev, nb_events,
+ timeout_ticks);
+}
+
+/**
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
+ *
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
+ *
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
+ *
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to link.
+ *
+ * @param queues
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * NULL value is allowed, in which case this function links all the configured
+ * event queues *nb_event_queues* which previously supplied to
+ * rte_event_dev_configure() to the event port *port_id*
+ *
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * The priority defines the event port's servicing priority for
+ * event queue, which may be ignored by an implementation.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * implementation supported priority value.
+ * NULL value is allowed, in which case this function links the event queues
+ * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
+ *
+ * @param nb_links
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of links actually established. The return value can be less than
+ * the value of the *nb_links* parameter when the implementation has the
+ * limitation on specific queue to port link establishment or if invalid
+ * parameters are specified in *queues*
+ * If the return value is less than *nb_links*, the remaining links at the end
+ * of link[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_links* then implementation shall update the
+ * rte_errno accordingly, Possible rte_errno values are
+ * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with
+ * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues supplied in *queues* from the destination
+ * event port designated by its *port_id* on the event device designated
+ * by its *dev_id*.
+ *
+ * The unlink establishment shall disable the event port *port_id* from
+ * receiving events from the specified event queue *queue_id*
+ *
+ * Event queue(s) to event port unlink establishment can be changed at runtime
+ * without re-configuring the device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to unlink.
+ *
+ * @param queues
+ * Points to an array of *nb_unlinks* event queues to be unlinked
+ * from the event port.
+ * NULL value is allowed, in which case this function unlinks all the
+ * event queue(s) from the event port *port_id*.
+ *
+ * @param nb_unlinks
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of unlinks actually established. The return value can be less
+ * than the value of the *nb_unlinks* parameter when the implementation has the
+ * limitation on specific queue to port unlink establishment or
+ * if invalid parameters are specified.
+ * If the return value is less than *nb_unlinks*, the remaining queues at the
+ * end of queues[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_unlinks* then implementation shall update
+ * the rte_errno accordingly, Possible rte_errno values are
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Retrieve the list of source event queues and its associated service priority
+ * linked to the destination event port designated by its *port_id*
+ * on the event device designated by its *dev_id*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier.
+ *
+ * @param[out] queues
+ * Points to an array of *queues* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the event queue(s) linked with event port *port_id*
+ *
+ * @param[out] priorities
+ * Points to an array of *priorities* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the service priority associated with each event queue linked
+ *
+ * @return
+ * The number of links established on the event port designated by its
+ * *port_id*.
+ * - <0 on failure.
+ *
+ */
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[]);
+
+/**
+ * Dump internal information about *dev_id* to the FILE* provided in *f*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param f
+ * A pointer to a file for output
+ *
+ * @return
+ * - 0: on success
+ * - <0: on failure.
+ */
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f);
+
+/** Maximum name length for extended statistics counters */
+#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
+
+/**
+ * Selects the component of the eventdev to retrieve statistics from.
+ */
+enum rte_event_dev_xstats_mode {
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+};
+
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethdev statistics.
+ */
+struct rte_event_dev_xstats_name {
+ char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
+};
+
+/**
+ * Retrieve names of extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the event device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param[out] xstats_names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param[out] ids
+ * Block of memory to insert ids into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity. The id values returned
+ * can be passed to *rte_event_dev_xstats_get* to select statistics.
+ * @param size
+ * Capacity of xstats_names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids,
+ unsigned int size);
+
+/**
+ * Retrieve extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param ids
+ * The id numbers of the stats to get. The ids can be got from the stat
+ * position in the stat list from rte_event_dev_get_xstats_names(), or
+ * by using rte_eventdev_get_xstats_by_name()
+ * @param[out] values
+ * The values for each stats request by ID.
+ * @param n
+ * The number of stats requested
+ * @return
+ * - positive value: number of stat entries filled into the values array
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ const unsigned int ids[],
+ uint64_t values[], unsigned int n);
+
+/**
+ * Retrieve the value of a single stat by requesting it by name.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param name
+ * The stat name to retrieve
+ * @param[out] id
+ * If non-NULL, the numerical id of the stat will be returned, so that further
+ * requests for the stat can be got using rte_eventdev_xstats_get, which will
+ * be faster as it doesn't need to scan a list of names for the stat.
+ * If the stat cannot be found, the id returned will be (unsigned)-1.
+ * @return
+ * - positive value or zero: the stat value
+ * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
+ */
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id);
+
+/**
+ * Reset the values of the xstats of the selected component in the device.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param mode
+ * The mode of the statistics to reset. Choose from device, queue or port.
+ * @param queue_port_id
+ * The queue or port to reset. 0 and positive values select ports and queues,
+ * while -1 indicates all ports or queues.
+ * @param ids
+ * Selects specific statistics to be reset. When NULL, all statistics selected
+ * by *mode* will be reset. If non-NULL, must point to array of at least
+ * *nb_ids* size.
+ * @param nb_ids
+ * The number of ids available from the *ids* array. Ignored when ids is NULL.
+ * @return
+ * - zero: successfully reset the statistics to zero
+ * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
+ */
+int
+rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
new file mode 100644
index 00000000..4005b3c9
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -0,0 +1,599 @@
+/*
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_H_
+#define _RTE_EVENTDEV_PMD_H_
+
+/** @file
+ * RTE Event PMD APIs
+ *
+ * @note
+ * These API are from event PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_eventdev.h"
+
+/* Logging Macros */
+#define RTE_EDEV_LOG_ERR(...) \
+ RTE_LOG(ERR, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_EDEV_LOG_DEBUG(...) \
+ RTE_LOG(DEBUG, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+#else
+#define RTE_EDEV_LOG_DEBUG(...) (void)0
+#endif
+
+/* Macros to check for valid device */
+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_DETACHED (0)
+#define RTE_EVENTDEV_ATTACHED (1)
+
+/**
+ * Initialisation function of a event driver invoked for each matching
+ * event PCI device detected during the PCI probing phase.
+ *
+ * @param dev
+ * The dev pointer is the address of the *rte_eventdev* structure associated
+ * with the matching device and which has been [automatically] allocated in
+ * the *rte_event_devices* array.
+ *
+ * @return
+ * - 0: Success, the device is properly initialised by the driver.
+ * In particular, the driver MUST have set up the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*eventdev_init_t)(struct rte_eventdev *dev);
+
+/**
+ * Finalisation function of a driver invoked for each matching
+ * PCI device detected during the PCI closing phase.
+ *
+ * @param dev
+ * The dev pointer is the address of the *rte_eventdev* structure associated
+ * with the matching device and which has been [automatically] allocated in
+ * the *rte_event_devices* array.
+ *
+ * @return
+ * - 0: Success, the device is properly finalised by the driver.
+ * In particular, the driver MUST free the *dev_ops* pointer
+ * of the *dev* structure.
+ * - <0: Error code of the device initialisation failure.
+ */
+typedef int (*eventdev_uninit_t)(struct rte_eventdev *dev);
+
+/**
+ * The structure associated with a PMD driver.
+ *
+ * Each driver acts as a PCI driver and is represented by a generic
+ * *event_driver* structure that holds:
+ *
+ * - An *rte_pci_driver* structure (which must be the first field).
+ *
+ * - The *eventdev_init* function invoked for each matching PCI device.
+ *
+ * - The size of the private data to allocate for each matching device.
+ */
+struct rte_eventdev_driver {
+ struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */
+ unsigned int dev_private_size; /**< Size of device private data. */
+
+ eventdev_init_t eventdev_init; /**< Device init function. */
+ eventdev_uninit_t eventdev_uninit; /**< Device uninit function. */
+};
+
+/** Global structure used for maintaining state of allocated event devices */
+struct rte_eventdev_global {
+ uint8_t nb_devs; /**< Number of devices found */
+};
+
+extern struct rte_eventdev_global *rte_eventdev_globals;
+/** Pointer to global event devices data structure. */
+extern struct rte_eventdev *rte_eventdevs;
+/** The pool of rte_eventdev structures. */
+
+/**
+ * Get the rte_eventdev structure device pointer for the named device.
+ *
+ * @param name
+ * device name to select the device structure.
+ *
+ * @return
+ * - The rte_eventdev structure pointer for the given device ID.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_get_named_dev(const char *name)
+{
+ struct rte_eventdev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_EVENT_MAX_DEVS; i++) {
+ dev = &rte_eventdevs[i];
+ if ((dev->attached == RTE_EVENTDEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * Validate if the event device index is valid attached event device.
+ *
+ * @param dev_id
+ * Event device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+static inline unsigned
+rte_event_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ if (dev_id >= RTE_EVENT_MAX_DEVS)
+ return 0;
+
+ dev = &rte_eventdevs[dev_id];
+ if (dev->attached != RTE_EVENTDEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *event_dev_ops* supplied in the
+ * *rte_eventdev* structure associated with a device.
+ */
+
+/**
+ * Get device information of a device.
+ *
+ * @param dev
+ * Event device pointer
+ * @param dev_info
+ * Event device information structure
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info);
+
+/**
+ * Configure a device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_configure_t)(const struct rte_eventdev *dev);
+
+/**
+ * Start a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_start_t)(struct rte_eventdev *dev);
+
+/**
+ * Stop a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ */
+typedef void (*eventdev_stop_t)(struct rte_eventdev *dev);
+
+/**
+ * Close a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * - 0 on success
+ * - (-EAGAIN) if can't close as device is busy
+ */
+typedef int (*eventdev_close_t)(struct rte_eventdev *dev);
+
+/**
+ * Retrieve the default event queue configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param[out] queue_conf
+ * Event queue configuration structure
+ *
+ */
+typedef void (*eventdev_queue_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t queue_id, struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Setup an event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param queue_conf
+ * Event queue configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Release resources allocated by given event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ *
+ */
+typedef void (*eventdev_queue_release_t)(struct rte_eventdev *dev,
+ uint8_t queue_id);
+
+/**
+ * Retrieve the default event port configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param[out] port_conf
+ * Event port configuration structure
+ *
+ */
+typedef void (*eventdev_port_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t port_id, struct rte_event_port_conf *port_conf);
+
+/**
+ * Setup an event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param port_conf
+ * Event port configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
+ uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * Release memory resources allocated by given event port.
+ *
+ * @param port
+ * Event port pointer
+ *
+ */
+typedef void (*eventdev_port_release_t)(void *port);
+
+/**
+ * Link multiple source event queues to destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param link
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * @param nb_links
+ * The number of links to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues from destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param queues
+ * An array of *nb_unlinks* event queues to be unlinked from the event port.
+ * @param nb_unlinks
+ * The number of unlinks to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue()
+ *
+ * @param dev
+ * Event device pointer
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue() function
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_dequeue_timeout_ticks_t)(struct rte_eventdev *dev,
+ uint64_t ns, uint64_t *timeout_ticks);
+
+/**
+ * Dump internal information
+ *
+ * @param dev
+ * Event device pointer
+ * @param f
+ * A pointer to a file for output
+ *
+ */
+typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f);
+
+/**
+ * Retrieve a set of statistics from device
+ *
+ * @param dev
+ * Event device pointer
+ * @param ids
+ * The stat ids to retrieve
+ * @param values
+ * The returned stat values
+ * @param n
+ * The number of id values and entries in the values array
+ * @return
+ * The number of stat values successfully filled into the values array
+ */
+typedef int (*eventdev_xstats_get_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+
+/**
+ * Resets the statistic values in xstats for the device, based on mode.
+ */
+typedef int (*eventdev_xstats_reset_t)(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+/**
+ * Get names of extended stats of an event device
+ *
+ * @param dev
+ * Event device pointer
+ * @param xstats_names
+ * Array of name values to be filled in
+ * @param size
+ * Number of values in the xstats_names array
+ * @return
+ * When size >= the number of stats, return the number of stat values filled
+ * into the array.
+ * When size < the number of available stats, return the number of stats
+ * values, and do not fill in any data into xstats_names.
+ */
+typedef int (*eventdev_xstats_get_names_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+
+/**
+ * Get value of one stats and optionally return its id
+ *
+ * @param dev
+ * Event device pointer
+ * @param name
+ * The name of the stat to retrieve
+ * @param id
+ * Pointer to an unsigned int where we store the stat-id for future reference.
+ * This pointer may be null if the id is not required.
+ * @return
+ * The value of the stat, or (uint64_t)-1 if the stat is not found.
+ * If the stat is not found, the id value will be returned as (unsigned)-1,
+ * if id pointer is non-NULL
+ */
+typedef uint64_t (*eventdev_xstats_get_by_name)(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+
+/** Event device operations function pointer table */
+struct rte_eventdev_ops {
+ eventdev_info_get_t dev_infos_get; /**< Get device info. */
+ eventdev_configure_t dev_configure; /**< Configure device. */
+ eventdev_start_t dev_start; /**< Start device. */
+ eventdev_stop_t dev_stop; /**< Stop device. */
+ eventdev_close_t dev_close; /**< Close device. */
+
+ eventdev_queue_default_conf_get_t queue_def_conf;
+ /**< Get default queue configuration. */
+ eventdev_queue_setup_t queue_setup;
+ /**< Set up an event queue. */
+ eventdev_queue_release_t queue_release;
+ /**< Release an event queue. */
+
+ eventdev_port_default_conf_get_t port_def_conf;
+ /**< Get default port configuration. */
+ eventdev_port_setup_t port_setup;
+ /**< Set up an event port. */
+ eventdev_port_release_t port_release;
+ /**< Release an event port. */
+
+ eventdev_port_link_t port_link;
+ /**< Link event queues to an event port. */
+ eventdev_port_unlink_t port_unlink;
+ /**< Unlink event queues from an event port. */
+ eventdev_dequeue_timeout_ticks_t timeout_ticks;
+ /**< Converts ns to *timeout_ticks* value for rte_event_dequeue() */
+ eventdev_dump_t dump;
+ /* Dump internal information */
+
+ eventdev_xstats_get_t xstats_get;
+ /**< Get extended device statistics. */
+ eventdev_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended stats. */
+ eventdev_xstats_get_by_name xstats_get_by_name;
+ /**< Get one value by name. */
+ eventdev_xstats_reset_t xstats_reset;
+ /**< Reset the statistics values in xstats. */
+};
+
+/**
+ * Allocates a new eventdev slot for an event device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ * Unique identifier name for each device
+ * @param socket_id
+ * Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Release the specified eventdev device.
+ *
+ * @param eventdev
+ * The *eventdev* pointer is the address of the *rte_eventdev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev);
+
+/**
+ * Creates a new virtual event device and returns the pointer to that device.
+ *
+ * @param name
+ * PMD type name
+ * @param dev_private_size
+ * Size of event PMDs private data
+ * @param socket_id
+ * Socket to allocate resources on.
+ *
+ * @return
+ * - Eventdev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id);
+
+/**
+ * Destroy the given virtual event device
+ *
+ * @param name
+ * PMD type name
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_event_pmd_vdev_uninit(const char *name);
+
+/**
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.
+ */
+int rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev);
+
+/**
+ * Wrapper for use by pci drivers as a .remove function to detach a event
+ * interface.
+ */
+int rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
new file mode 100644
index 00000000..1fa6b333
--- /dev/null
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -0,0 +1,44 @@
+DPDK_17.05 {
+ global:
+
+ rte_eventdevs;
+
+ rte_event_dev_count;
+ rte_event_dev_get_dev_id;
+ rte_event_dev_socket_id;
+ rte_event_dev_info_get;
+ rte_event_dev_configure;
+ rte_event_dev_start;
+ rte_event_dev_stop;
+ rte_event_dev_close;
+ rte_event_dev_dump;
+ rte_event_dev_xstats_by_name_get;
+ rte_event_dev_xstats_get;
+ rte_event_dev_xstats_names_get;
+ rte_event_dev_xstats_reset;
+
+ rte_event_port_default_conf_get;
+ rte_event_port_setup;
+ rte_event_port_dequeue_depth;
+ rte_event_port_enqueue_depth;
+ rte_event_port_count;
+ rte_event_port_link;
+ rte_event_port_unlink;
+ rte_event_port_links_get;
+
+ rte_event_queue_default_conf_get;
+ rte_event_queue_setup;
+ rte_event_queue_count;
+ rte_event_queue_priority;
+
+ rte_event_dequeue_timeout_ticks;
+
+ rte_event_pmd_allocate;
+ rte_event_pmd_release;
+ rte_event_pmd_vdev_init;
+ rte_event_pmd_vdev_uninit;
+ rte_event_pmd_pci_probe;
+ rte_event_pmd_pci_remove;
+
+ local: *;
+};
diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
index bb1ea990..d856aa26 100644
--- a/lib/librte_hash/Makefile
+++ b/lib/librte_hash/Makefile
@@ -55,7 +55,4 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_thash.h
SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h
-# this lib needs eal and ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_HASH) += lib/librte_eal lib/librte_ring
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_hash/rte_crc_arm64.h b/lib/librte_hash/rte_crc_arm64.h
index 7dd6334e..2abe42ab 100644
--- a/lib/librte_hash/rte_crc_arm64.h
+++ b/lib/librte_hash/rte_crc_arm64.h
@@ -110,8 +110,10 @@ rte_hash_crc_set_alg(uint8_t alg)
case CRC32_ARM64:
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
alg = CRC32_SW;
+ /* fall-through */
case CRC32_SW:
crc32_alg = alg;
+ /* fall-through */
default:
break;
}
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 51db006a..645c0cfa 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -536,7 +536,8 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
if (cached_free_slots->len == 0) {
/* Need to get another burst of free slots from global ring */
n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
- cached_free_slots->objs, LCORE_CACHE_SIZE);
+ cached_free_slots->objs,
+ LCORE_CACHE_SIZE, NULL);
if (n_slots == 0)
return -ENOSPC;
@@ -808,7 +809,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
/* Need to enqueue the free slots in global ring. */
n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
cached_free_slots->objs,
- LCORE_CACHE_SIZE);
+ LCORE_CACHE_SIZE, NULL);
cached_free_slots->len -= n_slots;
}
/* Put index of new free slot in cache. */
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
index 63e74aa4..0f485b85 100644
--- a/lib/librte_hash/rte_hash_crc.h
+++ b/lib/librte_hash/rte_hash_crc.h
@@ -476,9 +476,15 @@ rte_hash_crc_set_alg(uint8_t alg)
case CRC32_SSE42_x64:
if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_EM64T))
alg = CRC32_SSE42;
+#if __GNUC__ >= 7
+ __attribute__ ((fallthrough));
+#endif
case CRC32_SSE42:
if (! rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2))
alg = CRC32_SW;
+#if __GNUC__ >= 7
+ __attribute__ ((fallthrough));
+#endif
#endif
case CRC32_SW:
crc32_alg = alg;
diff --git a/lib/librte_ip_frag/Makefile b/lib/librte_ip_frag/Makefile
index 43f8b1e3..4e693bf8 100644
--- a/lib/librte_ip_frag/Makefile
+++ b/lib/librte_ip_frag/Makefile
@@ -52,10 +52,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += ip_frag_internal.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IP_FRAG)-include += rte_ip_frag.h
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_hash
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += lib/librte_mempool
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_jobstats/Makefile b/lib/librte_jobstats/Makefile
index 136a448e..561a0678 100644
--- a/lib/librte_jobstats/Makefile
+++ b/lib/librte_jobstats/Makefile
@@ -47,7 +47,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_JOBSTATS) := rte_jobstats.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_JOBSTATS)-include := rte_jobstats.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_kni/Makefile b/lib/librte_kni/Makefile
index 09474461..70f1ca8f 100644
--- a/lib/librte_kni/Makefile
+++ b/lib/librte_kni/Makefile
@@ -46,9 +46,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_KNI) := rte_kni.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_KNI)-include := rte_kni.h
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KNI) += lib/librte_ether
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index a80cefd2..c3f9208c 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -451,17 +451,35 @@ kni_free_fifo(struct rte_kni_fifo *fifo)
} while (ret);
}
+static void *
+va2pa(struct rte_mbuf *m)
+{
+ return (void *)((unsigned long)m -
+ ((unsigned long)m->buf_addr -
+ (unsigned long)m->buf_physaddr));
+}
+
+static void
+obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
+ unsigned obj_idx __rte_unused)
+{
+ struct rte_mbuf *m = obj;
+ void *mbuf_phys = opaque;
+
+ if (va2pa(m) == mbuf_phys)
+ rte_pktmbuf_free(m);
+}
+
static void
-kni_free_fifo_phy(struct rte_kni_fifo *fifo)
+kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
{
void *mbuf_phys;
int ret;
do {
ret = kni_fifo_get(fifo, &mbuf_phys, 1);
- /*
- * TODO: free mbufs
- */
+ if (ret)
+ rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
} while (ret);
}
@@ -470,6 +488,7 @@ rte_kni_release(struct rte_kni *kni)
{
struct rte_kni_device_info dev_info;
uint32_t slot_id;
+ uint32_t retry = 5;
if (!kni || !kni->in_use)
return -1;
@@ -481,9 +500,16 @@ rte_kni_release(struct rte_kni *kni)
}
/* mbufs in all fifo should be released, except request/response */
+
+ /* wait until all rxq packets processed by kernel */
+ while (kni_fifo_count(kni->rx_q) && retry--)
+ usleep(1000);
+
+ if (kni_fifo_count(kni->rx_q))
+ RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
+
+ kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
kni_free_fifo(kni->tx_q);
- kni_free_fifo_phy(kni->rx_q);
- kni_free_fifo_phy(kni->alloc_q);
kni_free_fifo(kni->free_q);
slot_id = kni->slot_id;
@@ -549,14 +575,6 @@ rte_kni_handle_request(struct rte_kni *kni)
return 0;
}
-static void *
-va2pa(struct rte_mbuf *m)
-{
- return (void *)((unsigned long)m -
- ((unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr));
-}
-
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index 8cb85873..c7cd5c26 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -91,3 +91,12 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
fifo->read = new_read;
return i;
}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline uint32_t
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
diff --git a/lib/librte_kvargs/Makefile b/lib/librte_kvargs/Makefile
index 87b09f20..564dd310 100644
--- a/lib/librte_kvargs/Makefile
+++ b/lib/librte_kvargs/Makefile
@@ -49,7 +49,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) := rte_kvargs.c
INCS := rte_kvargs.h
SYMLINK-$(CONFIG_RTE_LIBRTE_KVARGS)-include := $(INCS)
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_kvargs/rte_kvargs.c b/lib/librte_kvargs/rte_kvargs.c
index 8d56abd4..854ac83f 100644
--- a/lib/librte_kvargs/rte_kvargs.c
+++ b/lib/librte_kvargs/rte_kvargs.c
@@ -92,9 +92,9 @@ rte_kvargs_tokenize(struct rte_kvargs *kvlist, const char *params)
* into a list of valid keys.
*/
static int
-is_valid_key(const char *valid[], const char *key_match)
+is_valid_key(const char * const valid[], const char *key_match)
{
- const char **valid_ptr;
+ const char * const *valid_ptr;
for (valid_ptr = valid; *valid_ptr != NULL; valid_ptr++) {
if (strcmp(key_match, *valid_ptr) == 0)
@@ -109,7 +109,7 @@ is_valid_key(const char *valid[], const char *key_match)
*/
static int
check_for_valid_keys(struct rte_kvargs *kvlist,
- const char *valid[])
+ const char * const valid[])
{
unsigned i, ret;
struct rte_kvargs_pair *pair;
@@ -187,7 +187,7 @@ rte_kvargs_free(struct rte_kvargs *kvlist)
* check if only valid keys were used.
*/
struct rte_kvargs *
-rte_kvargs_parse(const char *args, const char *valid_keys[])
+rte_kvargs_parse(const char *args, const char * const valid_keys[])
{
struct rte_kvargs *kvlist;
diff --git a/lib/librte_kvargs/rte_kvargs.h b/lib/librte_kvargs/rte_kvargs.h
index ae9ae79f..5821c726 100644
--- a/lib/librte_kvargs/rte_kvargs.h
+++ b/lib/librte_kvargs/rte_kvargs.h
@@ -97,7 +97,8 @@ struct rte_kvargs {
* - A pointer to an allocated rte_kvargs structure on success
* - NULL on error
*/
-struct rte_kvargs *rte_kvargs_parse(const char *args, const char *valid_keys[]);
+struct rte_kvargs *rte_kvargs_parse(const char *args,
+ const char *const valid_keys[]);
/**
* Free a rte_kvargs structure
diff --git a/lib/librte_latencystats/Makefile b/lib/librte_latencystats/Makefile
new file mode 100644
index 00000000..eaacbb73
--- /dev/null
+++ b/lib/librte_latencystats/Makefile
@@ -0,0 +1,50 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_latencystats.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+LDLIBS += -lm
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_latencystats_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_LATENCY_STATS) := rte_latencystats.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_LATENCY_STATS)-include := rte_latencystats.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_latencystats/rte_latencystats.c b/lib/librte_latencystats/rte_latencystats.c
new file mode 100644
index 00000000..ce029a12
--- /dev/null
+++ b/lib/librte_latencystats/rte_latencystats.c
@@ -0,0 +1,360 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <stdbool.h>
+#include <math.h>
+
+#include <rte_mbuf.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_metrics.h>
+#include <rte_memzone.h>
+#include <rte_lcore.h>
+
+#include "rte_latencystats.h"
+
+/** Nano seconds per second */
+#define NS_PER_SEC 1E9
+
+/** Clock cycles per nano second */
+static uint64_t
+latencystat_cycles_per_ns(void)
+{
+ return rte_get_timer_hz() / NS_PER_SEC;
+}
+
+/* Macros for printing using RTE_LOG */
+#define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
+
+static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
+static int latency_stats_index;
+static uint64_t samp_intvl;
+static uint64_t timer_tsc;
+static uint64_t prev_tsc;
+
+struct rte_latency_stats {
+ float min_latency; /**< Minimum latency in nano seconds */
+ float avg_latency; /**< Average latency in nano seconds */
+ float max_latency; /**< Maximum latency in nano seconds */
+ float jitter; /** Latency variation */
+};
+
+static struct rte_latency_stats *glob_stats;
+
+struct rxtx_cbs {
+ struct rte_eth_rxtx_callback *cb;
+};
+
+static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+static struct rxtx_cbs tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+
+struct latency_stats_nameoff {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct latency_stats_nameoff lat_stats_strings[] = {
+ {"min_latency_ns", offsetof(struct rte_latency_stats, min_latency)},
+ {"avg_latency_ns", offsetof(struct rte_latency_stats, avg_latency)},
+ {"max_latency_ns", offsetof(struct rte_latency_stats, max_latency)},
+ {"jitter_ns", offsetof(struct rte_latency_stats, jitter)},
+};
+
+#define NUM_LATENCY_STATS (sizeof(lat_stats_strings) / \
+ sizeof(lat_stats_strings[0]))
+
+int32_t
+rte_latencystats_update(void)
+{
+ unsigned int i;
+ float *stats_ptr = NULL;
+ uint64_t values[NUM_LATENCY_STATS] = {0};
+ int ret;
+
+ for (i = 0; i < NUM_LATENCY_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(glob_stats,
+ lat_stats_strings[i].offset);
+ values[i] = (uint64_t)floor((*stats_ptr)/
+ latencystat_cycles_per_ns());
+ }
+
+ ret = rte_metrics_update_values(RTE_METRICS_GLOBAL,
+ latency_stats_index,
+ values, NUM_LATENCY_STATS);
+ if (ret < 0)
+ RTE_LOG(INFO, LATENCY_STATS, "Failed to push the stats\n");
+
+ return ret;
+}
+
+static void
+rte_latencystats_fill_values(struct rte_metric_value *values)
+{
+ unsigned int i;
+ float *stats_ptr = NULL;
+
+ for (i = 0; i < NUM_LATENCY_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(glob_stats,
+ lat_stats_strings[i].offset);
+ values[i].key = i;
+ values[i].value = (uint64_t)floor((*stats_ptr)/
+ latencystat_cycles_per_ns());
+ }
+}
+
+static uint16_t
+add_time_stamps(uint8_t pid __rte_unused,
+ uint16_t qid __rte_unused,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ uint16_t max_pkts __rte_unused,
+ void *user_cb __rte_unused)
+{
+ unsigned int i;
+ uint64_t diff_tsc, now;
+
+ /*
+ * For every sample interval,
+ * time stamp is marked on one received packet.
+ */
+ now = rte_rdtsc();
+ for (i = 0; i < nb_pkts; i++) {
+ diff_tsc = now - prev_tsc;
+ timer_tsc += diff_tsc;
+ if (timer_tsc >= samp_intvl) {
+ pkts[i]->timestamp = now;
+ timer_tsc = 0;
+ }
+ prev_tsc = now;
+ now = rte_rdtsc();
+ }
+
+ return nb_pkts;
+}
+
+static uint16_t
+calc_latency(uint8_t pid __rte_unused,
+ uint16_t qid __rte_unused,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts,
+ void *_ __rte_unused)
+{
+ unsigned int i, cnt = 0;
+ uint64_t now;
+ float latency[nb_pkts];
+ static float prev_latency;
+ /*
+ * Alpha represents degree of weighting decrease in EWMA,
+ * a constant smoothing factor between 0 and 1. The value
+ * is used below for measuring average latency.
+ */
+ const float alpha = 0.2;
+
+ now = rte_rdtsc();
+ for (i = 0; i < nb_pkts; i++) {
+ if (pkts[i]->timestamp)
+ latency[cnt++] = now - pkts[i]->timestamp;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ /*
+ * The jitter is calculated as statistical mean of interpacket
+ * delay variation. The "jitter estimate" is computed by taking
+ * the absolute values of the ipdv sequence and applying an
+ * exponential filter with parameter 1/16 to generate the
+ * estimate. i.e J=J+(|D(i-1,i)|-J)/16. Where J is jitter,
+ * D(i-1,i) is difference in latency of two consecutive packets
+ * i-1 and i.
+ * Reference: Calculated as per RFC 5481, sec 4.1,
+ * RFC 3393 sec 4.5, RFC 1889 sec.
+ */
+ glob_stats->jitter += (fabsf(prev_latency - latency[i])
+ - glob_stats->jitter)/16;
+ if (glob_stats->min_latency == 0)
+ glob_stats->min_latency = latency[i];
+ else if (latency[i] < glob_stats->min_latency)
+ glob_stats->min_latency = latency[i];
+ else if (latency[i] > glob_stats->max_latency)
+ glob_stats->max_latency = latency[i];
+ /*
+ * The average latency is measured using exponential moving
+ * average, i.e. using EWMA
+ * https://en.wikipedia.org/wiki/Moving_average
+ */
+ glob_stats->avg_latency +=
+ alpha * (latency[i] - glob_stats->avg_latency);
+ prev_latency = latency[i];
+ }
+
+ return nb_pkts;
+}
+
+int
+rte_latencystats_init(uint64_t app_samp_intvl,
+ rte_latency_stats_flow_type_fn user_cb)
+{
+ unsigned int i;
+ uint8_t pid;
+ uint16_t qid;
+ struct rxtx_cbs *cbs = NULL;
+ const uint8_t nb_ports = rte_eth_dev_count();
+ const char *ptr_strings[NUM_LATENCY_STATS] = {0};
+ const struct rte_memzone *mz = NULL;
+ const unsigned int flags = 0;
+
+ if (rte_memzone_lookup(MZ_RTE_LATENCY_STATS))
+ return -EEXIST;
+
+ /** Allocate stats in shared memory fo multi process support */
+ mz = rte_memzone_reserve(MZ_RTE_LATENCY_STATS, sizeof(*glob_stats),
+ rte_socket_id(), flags);
+ if (mz == NULL) {
+ RTE_LOG(ERR, LATENCY_STATS, "Cannot reserve memory: %s:%d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ glob_stats = mz->addr;
+ samp_intvl = app_samp_intvl * latencystat_cycles_per_ns();
+
+ /** Register latency stats with stats library */
+ for (i = 0; i < NUM_LATENCY_STATS; i++)
+ ptr_strings[i] = lat_stats_strings[i].name;
+
+ latency_stats_index = rte_metrics_reg_names(ptr_strings,
+ NUM_LATENCY_STATS);
+ if (latency_stats_index < 0) {
+ RTE_LOG(DEBUG, LATENCY_STATS,
+ "Failed to register latency stats names\n");
+ return -1;
+ }
+
+ /** Register Rx/Tx callbacks */
+ for (pid = 0; pid < nb_ports; pid++) {
+ struct rte_eth_dev_info dev_info;
+ rte_eth_dev_info_get(pid, &dev_info);
+ for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
+ cbs = &rx_cbs[pid][qid];
+ cbs->cb = rte_eth_add_first_rx_callback(pid, qid,
+ add_time_stamps, user_cb);
+ if (!cbs->cb)
+ RTE_LOG(INFO, LATENCY_STATS, "Failed to "
+ "register Rx callback for pid=%d, "
+ "qid=%d\n", pid, qid);
+ }
+ for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
+ cbs = &tx_cbs[pid][qid];
+ cbs->cb = rte_eth_add_tx_callback(pid, qid,
+ calc_latency, user_cb);
+ if (!cbs->cb)
+ RTE_LOG(INFO, LATENCY_STATS, "Failed to "
+ "register Tx callback for pid=%d, "
+ "qid=%d\n", pid, qid);
+ }
+ }
+ return 0;
+}
+
+int
+rte_latencystats_uninit(void)
+{
+ uint8_t pid;
+ uint16_t qid;
+ int ret = 0;
+ struct rxtx_cbs *cbs = NULL;
+ const uint8_t nb_ports = rte_eth_dev_count();
+
+ /** De register Rx/Tx callbacks */
+ for (pid = 0; pid < nb_ports; pid++) {
+ struct rte_eth_dev_info dev_info;
+ rte_eth_dev_info_get(pid, &dev_info);
+ for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
+ cbs = &rx_cbs[pid][qid];
+ ret = rte_eth_remove_rx_callback(pid, qid, cbs->cb);
+ if (ret)
+ RTE_LOG(INFO, LATENCY_STATS, "failed to "
+ "remove Rx callback for pid=%d, "
+ "qid=%d\n", pid, qid);
+ }
+ for (qid = 0; qid < dev_info.nb_tx_queues; qid++) {
+ cbs = &tx_cbs[pid][qid];
+ ret = rte_eth_remove_tx_callback(pid, qid, cbs->cb);
+ if (ret)
+ RTE_LOG(INFO, LATENCY_STATS, "failed to "
+ "remove Tx callback for pid=%d, "
+ "qid=%d\n", pid, qid);
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_latencystats_get_names(struct rte_metric_name *names, uint16_t size)
+{
+ unsigned int i;
+
+ if (names == NULL || size < NUM_LATENCY_STATS)
+ return NUM_LATENCY_STATS;
+
+ for (i = 0; i < NUM_LATENCY_STATS; i++)
+ snprintf(names[i].name, sizeof(names[i].name),
+ "%s", lat_stats_strings[i].name);
+
+ return NUM_LATENCY_STATS;
+}
+
+int
+rte_latencystats_get(struct rte_metric_value *values, uint16_t size)
+{
+ if (size < NUM_LATENCY_STATS || values == NULL)
+ return NUM_LATENCY_STATS;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ const struct rte_memzone *mz;
+ mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
+ if (mz == NULL) {
+ RTE_LOG(ERR, LATENCY_STATS,
+ "Latency stats memzone not found\n");
+ return -ENOMEM;
+ }
+ glob_stats = mz->addr;
+ }
+
+ /* Retrieve latency stats */
+ rte_latencystats_fill_values(values);
+
+ return NUM_LATENCY_STATS;
+}
diff --git a/lib/librte_latencystats/rte_latencystats.h b/lib/librte_latencystats/rte_latencystats.h
new file mode 100644
index 00000000..d85cf3a5
--- /dev/null
+++ b/lib/librte_latencystats/rte_latencystats.h
@@ -0,0 +1,156 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_LATENCYSTATS_H_
+#define _RTE_LATENCYSTATS_H_
+
+/**
+ * @file
+ * RTE latency stats
+ *
+ * library to provide application and flow based latency stats.
+ */
+
+#include <stdint.h>
+#include <rte_metrics.h>
+#include <rte_mbuf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Note: This function pointer is for future flow based latency stats
+ * implementation.
+ *
+ * Function type used for identifting flow types of a Rx packet.
+ *
+ * The callback function is called on Rx for each packet.
+ * This function is used for flow based latency calculations.
+ *
+ * @param pkt
+ * Packet that has to be identified with its flow types.
+ * @param user_param
+ * The arbitrary user parameter passed in by the application when
+ * the callback was originally configured.
+ * @return
+ * The flow_mask, representing the multiple flow types of a packet.
+ */
+typedef uint16_t (*rte_latency_stats_flow_type_fn)(struct rte_mbuf *pkt,
+ void *user_param);
+
+/**
+ * Registers Rx/Tx callbacks for each active port, queue.
+ *
+ * @param samp_intvl
+ * Sampling time period in nano seconds, at which packet
+ * should be marked with time stamp.
+ * @param user_cb
+ * Note: This param is for future flow based latency stats
+ * implementation.
+ * User callback to be called to get flow types of a packet.
+ * Used for flow based latency calculation.
+ * If the value is NULL, global stats will be calculated,
+ * else flow based latency stats will be calculated.
+ * For now just pass on the NULL value to this param.
+ * @return
+ * -1 : On error
+ * -ENOMEM: On error
+ * 0 : On success
+ */
+int rte_latencystats_init(uint64_t samp_intvl,
+ rte_latency_stats_flow_type_fn user_cb);
+
+/**
+ * Calculates the latency and jitter values internally, exposing the updated
+ * values via *rte_latencystats_get* or the rte_metrics API.
+ * @return:
+ * 0 : on Success
+ * < 0 : Error in updating values.
+ */
+int32_t rte_latencystats_update(void);
+
+/**
+ * Removes registered Rx/Tx callbacks for each active port, queue.
+ *
+ * @return
+ * -1: On error
+ * 0: On success
+ */
+int rte_latencystats_uninit(void);
+
+/**
+ * Retrieve names of latency statistics
+ *
+ * @param names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param size
+ * Capacity of latency stats names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ */
+int rte_latencystats_get_names(struct rte_metric_name *names,
+ uint16_t size);
+
+/**
+ * Retrieve latency statistics.
+ *
+ * @param values
+ * A pointer to a table of structure of type *rte_metric_value*
+ * to be filled with latency statistics ids and values.
+ * This parameter can be set to NULL if size is 0.
+ * @param size
+ * The size of the stats table, which should be large enough to store
+ * all the latency stats.
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * -ENOMEM: On failure.
+ */
+int rte_latencystats_get(struct rte_metric_value *values,
+ uint16_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LATENCYSTATS_H_ */
diff --git a/lib/librte_latencystats/rte_latencystats_version.map b/lib/librte_latencystats/rte_latencystats_version.map
new file mode 100644
index 00000000..ac8403e8
--- /dev/null
+++ b/lib/librte_latencystats/rte_latencystats_version.map
@@ -0,0 +1,11 @@
+DPDK_17.05 {
+ global:
+
+ rte_latencystats_get;
+ rte_latencystats_get_names;
+ rte_latencystats_init;
+ rte_latencystats_uninit;
+ rte_latencystats_update;
+
+ local: *;
+};
diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile
index 3dc549dc..32be46b3 100644
--- a/lib/librte_lpm/Makefile
+++ b/lib/librte_lpm/Makefile
@@ -55,7 +55,4 @@ else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include += rte_lpm_altivec.h
endif
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_LPM) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_lpm/rte_lpm6.c b/lib/librte_lpm/rte_lpm6.c
index 32fdba01..9cc7be77 100644
--- a/lib/librte_lpm/rte_lpm6.c
+++ b/lib/librte_lpm/rte_lpm6.c
@@ -97,7 +97,7 @@ struct rte_lpm6_tbl_entry {
/** Rules tbl entry structure. */
struct rte_lpm6_rule {
uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */
- uint8_t next_hop; /**< Rule next hop. */
+ uint32_t next_hop; /**< Rule next hop. */
uint8_t depth; /**< Rule depth. */
};
@@ -297,7 +297,7 @@ rte_lpm6_free(struct rte_lpm6 *lpm)
* the nexthop if so. Otherwise it adds a new rule if enough space is available.
*/
static inline int32_t
-rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth)
+rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint32_t next_hop, uint8_t depth)
{
uint32_t rule_index;
@@ -340,7 +340,7 @@ rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t next_hop, uint8_t depth)
*/
static void
expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
- uint8_t next_hop)
+ uint32_t next_hop)
{
uint32_t tbl8_group_end, tbl8_gindex_next, j;
@@ -377,7 +377,7 @@ expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t depth,
static inline int
add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip, uint8_t bytes,
- uint8_t first_byte, uint8_t depth, uint8_t next_hop)
+ uint8_t first_byte, uint8_t depth, uint32_t next_hop)
{
uint32_t tbl_index, tbl_range, tbl8_group_start, tbl8_group_end, i;
int32_t tbl8_gindex;
@@ -507,9 +507,17 @@ add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl,
* Add a route
*/
int
-rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
uint8_t next_hop)
{
+ return rte_lpm6_add_v1705(lpm, ip, depth, next_hop);
+}
+VERSION_SYMBOL(rte_lpm6_add, _v20, 2.0);
+
+int
+rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t next_hop)
+{
struct rte_lpm6_tbl_entry *tbl;
struct rte_lpm6_tbl_entry *tbl_next;
int32_t rule_index;
@@ -560,6 +568,10 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
return status;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_add, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip,
+ uint8_t depth, uint32_t next_hop),
+ rte_lpm6_add_v1705);
/*
* Takes a pointer to a table entry and inspect one level.
@@ -569,7 +581,7 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
static inline int
lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
const struct rte_lpm6_tbl_entry **tbl_next, uint8_t *ip,
- uint8_t first_byte, uint8_t *next_hop)
+ uint8_t first_byte, uint32_t *next_hop)
{
uint32_t tbl8_index, tbl_entry;
@@ -589,7 +601,7 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
return 1;
} else {
/* If not extended then we can have a match. */
- *next_hop = (uint8_t)tbl_entry;
+ *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK);
return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT;
}
}
@@ -598,7 +610,26 @@ lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl,
* Looks up an IP
*/
int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
+rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
+{
+ uint32_t next_hop32 = 0;
+ int32_t status;
+
+ /* DEBUG: Check user input arguments. */
+ if (next_hop == NULL)
+ return -EINVAL;
+
+ status = rte_lpm6_lookup_v1705(lpm, ip, &next_hop32);
+ if (status == 0)
+ *next_hop = (uint8_t)next_hop32;
+
+ return status;
+}
+VERSION_SYMBOL(rte_lpm6_lookup, _v20, 2.0);
+
+int
+rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
+ uint32_t *next_hop)
{
const struct rte_lpm6_tbl_entry *tbl;
const struct rte_lpm6_tbl_entry *tbl_next = NULL;
@@ -625,20 +656,23 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop)
return status;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_lookup, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip,
+ uint32_t *next_hop), rte_lpm6_lookup_v1705);
/*
* Looks up a group of IP addresses
*/
int
-rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
int16_t * next_hops, unsigned n)
{
unsigned i;
const struct rte_lpm6_tbl_entry *tbl;
const struct rte_lpm6_tbl_entry *tbl_next = NULL;
- uint32_t tbl24_index;
- uint8_t first_byte, next_hop;
+ uint32_t tbl24_index, next_hop;
+ uint8_t first_byte;
int status;
/* DEBUG: Check user input arguments. */
@@ -664,11 +698,59 @@ rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
if (status < 0)
next_hops[i] = -1;
else
- next_hops[i] = next_hop;
+ next_hops[i] = (int16_t)next_hop;
+ }
+
+ return 0;
+}
+VERSION_SYMBOL(rte_lpm6_lookup_bulk_func, _v20, 2.0);
+
+int
+rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int32_t *next_hops, unsigned int n)
+{
+ unsigned int i;
+ const struct rte_lpm6_tbl_entry *tbl;
+ const struct rte_lpm6_tbl_entry *tbl_next = NULL;
+ uint32_t tbl24_index, next_hop;
+ uint8_t first_byte;
+ int status;
+
+ /* DEBUG: Check user input arguments. */
+ if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ first_byte = LOOKUP_FIRST_BYTE;
+ tbl24_index = (ips[i][0] << BYTES2_SIZE) |
+ (ips[i][1] << BYTE_SIZE) | ips[i][2];
+
+ /* Calculate pointer to the first entry to be inspected */
+ tbl = &lpm->tbl24[tbl24_index];
+
+ do {
+ /* Continue inspecting following levels
+ * until success or failure
+ */
+ status = lookup_step(lpm, tbl, &tbl_next, ips[i],
+ first_byte++, &next_hop);
+ tbl = tbl_next;
+ } while (status == 1);
+
+ if (status < 0)
+ next_hops[i] = -1;
+ else
+ next_hops[i] = (int32_t)next_hop;
}
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_lookup_bulk_func, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int32_t *next_hops, unsigned int n),
+ rte_lpm6_lookup_bulk_func_v1705);
/*
* Finds a rule in rule table.
@@ -698,8 +780,28 @@ rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
* Look for a rule in the high-level rules table
*/
int
-rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
-uint8_t *next_hop)
+rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t *next_hop)
+{
+ uint32_t next_hop32 = 0;
+ int32_t status;
+
+ /* DEBUG: Check user input arguments. */
+ if (next_hop == NULL)
+ return -EINVAL;
+
+ status = rte_lpm6_is_rule_present_v1705(lpm, ip, depth, &next_hop32);
+ if (status > 0)
+ *next_hop = (uint8_t)next_hop32;
+
+ return status;
+
+}
+VERSION_SYMBOL(rte_lpm6_is_rule_present, _v20, 2.0);
+
+int
+rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t *next_hop)
{
uint8_t ip_masked[RTE_LPM6_IPV6_ADDR_SIZE];
int32_t rule_index;
@@ -724,6 +826,10 @@ uint8_t *next_hop)
/* If rule is not found return 0. */
return 0;
}
+BIND_DEFAULT_SYMBOL(rte_lpm6_is_rule_present, _v1705, 17.05);
+MAP_STATIC_SYMBOL(int rte_lpm6_is_rule_present(struct rte_lpm6 *lpm,
+ uint8_t *ip, uint8_t depth, uint32_t *next_hop),
+ rte_lpm6_is_rule_present_v1705);
/*
* Delete a rule from the rule table.
diff --git a/lib/librte_lpm/rte_lpm6.h b/lib/librte_lpm/rte_lpm6.h
index 13d027f9..3a3342da 100644
--- a/lib/librte_lpm/rte_lpm6.h
+++ b/lib/librte_lpm/rte_lpm6.h
@@ -39,6 +39,7 @@
*/
#include <stdint.h>
+#include <rte_compat.h>
#ifdef __cplusplus
extern "C" {
@@ -123,7 +124,13 @@ rte_lpm6_free(struct rte_lpm6 *lpm);
*/
int
rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t next_hop);
+int
+rte_lpm6_add_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
uint8_t next_hop);
+int
+rte_lpm6_add_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t next_hop);
/**
* Check if a rule is present in the LPM table,
@@ -142,7 +149,13 @@ rte_lpm6_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
*/
int
rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
-uint8_t *next_hop);
+ uint32_t *next_hop);
+int
+rte_lpm6_is_rule_present_v20(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint8_t *next_hop);
+int
+rte_lpm6_is_rule_present_v1705(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
+ uint32_t *next_hop);
/**
* Delete a rule from the LPM table.
@@ -199,7 +212,12 @@ rte_lpm6_delete_all(struct rte_lpm6 *lpm);
* -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
*/
int
-rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
+rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint32_t *next_hop);
+int
+rte_lpm6_lookup_v20(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
+int
+rte_lpm6_lookup_v1705(const struct rte_lpm6 *lpm, uint8_t *ip,
+ uint32_t *next_hop);
/**
* Lookup multiple IP addresses in an LPM table.
@@ -220,7 +238,15 @@ rte_lpm6_lookup(const struct rte_lpm6 *lpm, uint8_t *ip, uint8_t *next_hop);
int
rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm,
uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
- int16_t * next_hops, unsigned n);
+ int32_t *next_hops, unsigned int n);
+int
+rte_lpm6_lookup_bulk_func_v20(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int16_t *next_hops, unsigned int n);
+int
+rte_lpm6_lookup_bulk_func_v1705(const struct rte_lpm6 *lpm,
+ uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE],
+ int32_t *next_hops, unsigned int n);
#ifdef __cplusplus
}
diff --git a/lib/librte_lpm/rte_lpm_version.map b/lib/librte_lpm/rte_lpm_version.map
index 239b371e..90beac85 100644
--- a/lib/librte_lpm/rte_lpm_version.map
+++ b/lib/librte_lpm/rte_lpm_version.map
@@ -34,3 +34,13 @@ DPDK_16.04 {
rte_lpm_delete_all;
} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_lpm6_add;
+ rte_lpm6_is_rule_present;
+ rte_lpm6_lookup;
+ rte_lpm6_lookup_bulk_func;
+
+} DPDK_16.04;
diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
index 4ae2e8c8..54827305 100644
--- a/lib/librte_mbuf/Makefile
+++ b/lib/librte_mbuf/Makefile
@@ -38,7 +38,7 @@ CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
EXPORT_MAP := rte_mbuf_version.map
-LIBABIVER := 2
+LIBABIVER := 3
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c
@@ -46,7 +46,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h rte_mbuf_ptype.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF) += lib/librte_eal lib/librte_mempool
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 63f43c89..0e3e36a5 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -62,7 +62,7 @@
/*
* ctrlmbuf constructor, given as a callback function to
- * rte_mempool_create()
+ * rte_mempool_obj_iter() or rte_mempool_create()
*/
void
rte_ctrlmbuf_init(struct rte_mempool *mp,
@@ -77,7 +77,8 @@ rte_ctrlmbuf_init(struct rte_mempool *mp,
/*
* pktmbuf pool constructor, given as a callback function to
- * rte_mempool_create()
+ * rte_mempool_create(), or called directly if using
+ * rte_mempool_create_empty()/rte_mempool_populate()
*/
void
rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
@@ -110,7 +111,7 @@ rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
/*
* pktmbuf constructor, given as a callback function to
- * rte_mempool_create().
+ * rte_mempool_obj_iter() or rte_mempool_create().
* Set the fields of a packet mbuf to their default values.
*/
void
@@ -145,6 +146,8 @@ rte_pktmbuf_init(struct rte_mempool *mp,
m->pool = mp;
m->nb_segs = 1;
m->port = 0xff;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
}
/* helper to create a mbuf pool */
@@ -320,6 +323,7 @@ const char *rte_get_rx_ol_flag_name(uint64_t mask)
case PKT_RX_IEEE1588_TMST: return "PKT_RX_IEEE1588_TMST";
case PKT_RX_QINQ_STRIPPED: return "PKT_RX_QINQ_STRIPPED";
case PKT_RX_LRO: return "PKT_RX_LRO";
+ case PKT_RX_TIMESTAMP: return "PKT_RX_TIMESTAMP";
default: return NULL;
}
}
@@ -354,6 +358,7 @@ rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
{ PKT_RX_IEEE1588_TMST, PKT_RX_IEEE1588_TMST, NULL },
{ PKT_RX_QINQ_STRIPPED, PKT_RX_QINQ_STRIPPED, NULL },
{ PKT_RX_LRO, PKT_RX_LRO, NULL },
+ { PKT_RX_TIMESTAMP, PKT_RX_TIMESTAMP, NULL },
};
const char *name;
unsigned int i;
@@ -404,6 +409,7 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
case PKT_TX_TUNNEL_GRE: return "PKT_TX_TUNNEL_GRE";
case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
+ case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
default: return NULL;
}
}
@@ -434,6 +440,7 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
"PKT_TX_TUNNEL_NONE" },
{ PKT_TX_TUNNEL_GENEVE, PKT_TX_TUNNEL_MASK,
"PKT_TX_TUNNEL_NONE" },
+ { PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
};
const char *name;
unsigned int i;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6ea..1cb03109 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -44,6 +44,13 @@
* buffers. The message buffers are stored in a mempool, using the
* RTE mempool library.
*
+ * The preferred way to create a mbuf pool is to use
+ * rte_pktmbuf_pool_create(). However, in some situations, an
+ * application may want to have more control (ex: populate the pool with
+ * specific memory), in this case it is possible to use functions from
+ * rte_mempool. See how rte_pktmbuf_pool_create() is implemented for
+ * details.
+ *
* This library provides an API to allocate/free packet mbufs, which are
* used to carry network packets.
*
@@ -177,11 +184,22 @@ extern "C" {
*/
#define PKT_RX_LRO (1ULL << 16)
+/**
+ * Indicate that the timestamp field in the mbuf is valid.
+ */
+#define PKT_RX_TIMESTAMP (1ULL << 17)
+
/* add new RX flags here */
/* add new TX flags here */
/**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC (1ULL << 44)
+
+/**
* Bits 45:48 used for the tunnel type.
* When doing Tx offload like TSO or checksum, the HW needs to configure the
* tunnel type into the HW descriptors.
@@ -283,6 +301,21 @@ extern "C" {
*/
#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+/**
+ * Bitmask of all supported packet Tx offload features flags,
+ * which can be set for packet.
+ */
+#define PKT_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_IEEE1588_TMST | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ PKT_TX_MACSEC)
+
#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
@@ -370,16 +403,21 @@ struct rte_mbuf {
MARKER cacheline0;
void *buf_addr; /**< Virtual address of segment buffer. */
- phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
-
- uint16_t buf_len; /**< Length of segment buffer. */
+ /**
+ * Physical address of segment buffer.
+ * Force alignment to 8-bytes, so as to ensure we have the exact
+ * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
+ * working on vector drivers easier.
+ */
+ phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
- /* next 6 bytes are initialised on RX descriptor rearm */
- MARKER8 rearm_data;
+ /* next 8 bytes are initialised on RX descriptor rearm */
+ MARKER64 rearm_data;
uint16_t data_off;
/**
- * 16-bit Reference counter.
+ * Reference counter. Its size should at least equal to the size
+ * of port field (16 bits), to support zero-copy broadcast.
* It should only be accessed using the following functions:
* rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
* rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
@@ -391,8 +429,10 @@ struct rte_mbuf {
rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
uint16_t refcnt; /**< Non-atomically accessed refcnt */
};
- uint8_t nb_segs; /**< Number of segments. */
- uint8_t port; /**< Input port. */
+ uint16_t nb_segs; /**< Number of segments. */
+
+ /** Input port (16 bits to support more than 256 virtual ports). */
+ uint16_t port;
uint64_t ol_flags; /**< Offload features. */
@@ -448,11 +488,16 @@ struct rte_mbuf {
uint32_t usr; /**< User defined tags. See rte_distributor_process() */
} hash; /**< hash information */
- uint32_t seqn; /**< Sequence number. See also rte_reorder_insert() */
-
/** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */
uint16_t vlan_tci_outer;
+ uint16_t buf_len; /**< Length of segment buffer. */
+
+ /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference
+ * are not normalized but are always the same for a given port.
+ */
+ uint64_t timestamp;
+
/* second cache line - fields only used in slow path or on TX */
MARKER cacheline1 __rte_cache_min_aligned;
@@ -493,6 +538,10 @@ struct rte_mbuf {
/** Timesync flags for use with IEEE1588. */
uint16_t timesync;
+
+ /** Sequence number. See also rte_reorder_insert(). */
+ uint32_t seqn;
+
} __rte_cache_aligned;
/**
@@ -739,6 +788,13 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
+#define MBUF_RAW_ALLOC_CHECK(m) do { \
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
+ RTE_ASSERT((m)->next == NULL); \
+ RTE_ASSERT((m)->nb_segs == 1); \
+ __rte_mbuf_sanity_check(m, 0); \
+} while (0)
+
/**
* Allocate an unitialized mbuf from mempool *mp*.
*
@@ -747,6 +803,11 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
+ * The caller can expect that the following fields of the mbuf structure
+ * are initialized: buf_addr, buf_physaddr, buf_len, refcnt=1, nb_segs=1,
+ * next=NULL, pool, priv_size. The other fields must be initialized
+ * by the caller.
+ *
* @param mp
* The mempool from which mbuf is allocated.
* @return
@@ -761,28 +822,43 @@ static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
if (rte_mempool_get(mp, &mb) < 0)
return NULL;
m = (struct rte_mbuf *)mb;
- RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
- rte_mbuf_refcnt_set(m, 1);
- __rte_mbuf_sanity_check(m, 0);
-
+ MBUF_RAW_ALLOC_CHECK(m);
return m;
}
/**
- * @internal Put mbuf back into its original mempool.
- * The use of that function is reserved for RTE internal needs.
- * Please use rte_pktmbuf_free().
+ * Put mbuf back into its original mempool.
+ *
+ * The caller must ensure that the mbuf is direct and properly
+ * reinitialized (refcnt=1, next=NULL, nb_segs=1), as done by
+ * rte_pktmbuf_prefree_seg().
+ *
+ * This function should be used with care, when optimization is
+ * required. For standard needs, prefer rte_pktmbuf_free() or
+ * rte_pktmbuf_free_seg().
*
* @param m
* The mbuf to be freed.
*/
static inline void __attribute__((always_inline))
-__rte_mbuf_raw_free(struct rte_mbuf *m)
+rte_mbuf_raw_free(struct rte_mbuf *m)
{
- RTE_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+ RTE_ASSERT(RTE_MBUF_DIRECT(m));
+ RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
+ RTE_ASSERT(m->next == NULL);
+ RTE_ASSERT(m->nb_segs == 1);
+ __rte_mbuf_sanity_check(m, 0);
rte_mempool_put(m->pool, m);
}
+/* compat with older versions */
+__rte_deprecated
+static inline void
+__rte_mbuf_raw_free(struct rte_mbuf *m)
+{
+ rte_mbuf_raw_free(m);
+}
+
/* Operations on ctrl mbuf */
/**
@@ -791,14 +867,14 @@ __rte_mbuf_raw_free(struct rte_mbuf *m)
* This function initializes some fields in an mbuf structure that are
* not modified by the user once created (mbuf type, origin pool, buffer
* start address, and so on). This function is given as a callback function
- * to rte_mempool_create() at pool creation time.
+ * to rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
*
* @param mp
* The mempool from which the mbuf is allocated.
* @param opaque_arg
* A pointer that can be used by the user to retrieve useful information
- * for mbuf initialization. This pointer comes from the ``init_arg``
- * parameter of rte_mempool_create().
+ * for mbuf initialization. This pointer is the opaque argument passed to
+ * rte_mempool_obj_iter() or rte_mempool_create().
* @param m
* The mbuf to initialize.
* @param i
@@ -872,14 +948,14 @@ rte_is_ctrlmbuf(struct rte_mbuf *m)
* This function initializes some fields in the mbuf structure that are
* not modified by the user once created (origin pool, buffer start
* address, and so on). This function is given as a callback function to
- * rte_mempool_create() at pool creation time.
+ * rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
*
* @param mp
* The mempool from which mbufs originate.
* @param opaque_arg
* A pointer that can be used by the user to retrieve useful information
- * for mbuf initialization. This pointer comes from the ``init_arg``
- * parameter of rte_mempool_create().
+ * for mbuf initialization. This pointer is the opaque argument passed to
+ * rte_mempool_obj_iter() or rte_mempool_create().
* @param m
* The mbuf to initialize.
* @param i
@@ -894,7 +970,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
*
* This function initializes the mempool private data in the case of a
* pktmbuf pool. This private data is needed by the driver. The
- * function is given as a callback function to rte_mempool_create() at
+ * function must be called on the mempool before it is used, or it
+ * can be given as a callback function to rte_mempool_create() at
* pool creation. It can be extended by the user, for example, to
* provide another packet size.
*
@@ -902,8 +979,8 @@ void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
* The mempool from which mbufs originate.
* @param opaque_arg
* A pointer that can be used by the user to retrieve useful information
- * for mbuf initialization. This pointer comes from the ``init_arg``
- * parameter of rte_mempool_create().
+ * for mbuf initialization. This pointer is the opaque argument passed to
+ * rte_mempool_create().
*/
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
@@ -911,8 +988,7 @@ void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
* Create a mbuf pool.
*
* This function creates and initializes a packet mbuf pool. It is
- * a wrapper to rte_mempool_create() with the proper packet constructor
- * and mempool constructor.
+ * a wrapper to rte_mempool functions.
*
* @param name
* The name of the mbuf pool.
@@ -1079,25 +1155,25 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
switch (count % 4) {
case 0:
while (idx != count) {
- RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
- rte_mbuf_refcnt_set(mbufs[idx], 1);
+ MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
+ /* fall-through */
case 3:
- RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
- rte_mbuf_refcnt_set(mbufs[idx], 1);
+ MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
+ /* fall-through */
case 2:
- RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
- rte_mbuf_refcnt_set(mbufs[idx], 1);
+ MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
+ /* fall-through */
case 1:
- RTE_ASSERT(rte_mbuf_refcnt_read(mbufs[idx]) == 0);
- rte_mbuf_refcnt_set(mbufs[idx], 1);
+ MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
rte_pktmbuf_reset(mbufs[idx]);
idx++;
+ /* fall-through */
}
}
return 0;
@@ -1139,7 +1215,6 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
- mi->next = m->next;
mi->data_off = m->data_off;
mi->data_len = m->data_len;
mi->port = m->port;
@@ -1153,6 +1228,7 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
mi->nb_segs = 1;
mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
mi->packet_type = m->packet_type;
+ mi->timestamp = m->timestamp;
__rte_mbuf_sanity_check(mi, 1);
__rte_mbuf_sanity_check(m, 0);
@@ -1189,24 +1265,71 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
m->data_len = 0;
m->ol_flags = 0;
- if (rte_mbuf_refcnt_update(md, -1) == 0)
- __rte_mbuf_raw_free(md);
+ if (rte_mbuf_refcnt_update(md, -1) == 0) {
+ md->next = NULL;
+ md->nb_segs = 1;
+ rte_mbuf_refcnt_set(md, 1);
+ rte_mbuf_raw_free(md);
+ }
}
-static inline struct rte_mbuf* __attribute__((always_inline))
-__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
+/**
+ * Decrease reference counter and unlink a mbuf segment
+ *
+ * This function does the same than a free, except that it does not
+ * return the segment to its pool.
+ * It decreases the reference counter, and if it reaches 0, it is
+ * detached from its parent for an indirect mbuf.
+ *
+ * @param m
+ * The mbuf to be unlinked
+ * @return
+ * - (m) if it is the last reference. It can be recycled or freed.
+ * - (NULL) if the mbuf still has remaining references on it.
+ */
+__attribute__((always_inline))
+static inline struct rte_mbuf *
+rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
{
__rte_mbuf_sanity_check(m, 0);
- if (likely(rte_mbuf_refcnt_update(m, -1) == 0)) {
- /* if this is an indirect mbuf, it is detached. */
+ if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+
if (RTE_MBUF_INDIRECT(m))
rte_pktmbuf_detach(m);
+
+ if (m->next != NULL) {
+ m->next = NULL;
+ m->nb_segs = 1;
+ }
+
+ return m;
+
+ } else if (rte_atomic16_add_return(&m->refcnt_atomic, -1) == 0) {
+
+
+ if (RTE_MBUF_INDIRECT(m))
+ rte_pktmbuf_detach(m);
+
+ if (m->next != NULL) {
+ m->next = NULL;
+ m->nb_segs = 1;
+ }
+ rte_mbuf_refcnt_set(m, 1);
+
return m;
}
return NULL;
}
+/* deprecated, replaced by rte_pktmbuf_prefree_seg() */
+__rte_deprecated
+static inline struct rte_mbuf *
+__rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
+{
+ return rte_pktmbuf_prefree_seg(m);
+}
+
/**
* Free a segment of a packet mbuf into its original mempool.
*
@@ -1219,10 +1342,9 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
static inline void __attribute__((always_inline))
rte_pktmbuf_free_seg(struct rte_mbuf *m)
{
- if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) {
- m->next = NULL;
- __rte_mbuf_raw_free(m);
- }
+ m = rte_pktmbuf_prefree_seg(m);
+ if (likely(m != NULL))
+ rte_mbuf_raw_free(m);
}
/**
@@ -1647,6 +1769,108 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
}
/**
+ * Validate general requirements for Tx offload in mbuf.
+ *
+ * This function checks correctness and completeness of Tx offload settings.
+ *
+ * @param m
+ * The packet mbuf to be validated.
+ * @return
+ * 0 if packet is valid
+ */
+static inline int
+rte_validate_tx_offload(const struct rte_mbuf *m)
+{
+ uint64_t ol_flags = m->ol_flags;
+ uint64_t inner_l3_offset = m->l2_len;
+
+ /* Does packet set any of available offloads? */
+ if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
+ return 0;
+
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
+
+ /* Headers are fragmented */
+ if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
+ return -ENOTSUP;
+
+ /* IP checksum can be counted only for IPv4 packet */
+ if ((ol_flags & PKT_TX_IP_CKSUM) && (ol_flags & PKT_TX_IPV6))
+ return -EINVAL;
+
+ /* IP type not set when required */
+ if (ol_flags & (PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
+ if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
+ return -EINVAL;
+
+ /* Check requirements for TSO packet */
+ if (ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz == 0) ||
+ ((ol_flags & PKT_TX_IPV4) &&
+ !(ol_flags & PKT_TX_IP_CKSUM)))
+ return -EINVAL;
+
+ /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
+ if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ !(ol_flags & PKT_TX_OUTER_IPV4))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Linearize data in mbuf.
+ *
+ * This function moves the mbuf data in the first segment if there is enough
+ * tailroom. The subsequent segments are unchained and freed.
+ *
+ * @param mbuf
+ * mbuf to linearize
+ * @return
+ * - 0, on success
+ * - -1, on error
+ */
+static inline int
+rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
+{
+ int seg_len, copy_len;
+ struct rte_mbuf *m;
+ struct rte_mbuf *m_next;
+ char *buffer;
+
+ if (rte_pktmbuf_is_contiguous(mbuf))
+ return 0;
+
+ /* Extend first segment to the total packet length */
+ copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
+
+ if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
+ return -1;
+
+ buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
+ mbuf->data_len = (uint16_t)(mbuf->pkt_len);
+
+ /* Append data from next segments to the first one */
+ m = mbuf->next;
+ while (m != NULL) {
+ m_next = m->next;
+
+ seg_len = rte_pktmbuf_data_len(m);
+ rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
+ buffer += seg_len;
+
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+
+ return 0;
+}
+
+/**
* Dump an mbuf structure to a file.
*
* Dump all fields for the given packet mbuf and all its associated
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index ff6de9d1..a3269c4c 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -91,6 +91,9 @@
* RTE_PTYPE_INNER_L4_UDP.
*/
+#include <stddef.h>
+#include <stdint.h>
+
#ifdef __cplusplus
extern "C" {
#endif
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index 057a6ab4..7b5bdfee 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -43,11 +43,7 @@ LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c
-SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ring.c
-SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_stack.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index aa513b97..f65310f6 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -818,7 +818,6 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
goto exit_unlock;
}
mp->mz = mz;
- mp->socket_id = socket_id;
mp->size = n;
mp->flags = flags;
mp->socket_id = socket_id;
@@ -869,6 +868,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
+ int ret;
struct rte_mempool *mp;
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
@@ -881,13 +881,16 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* set the correct index into the table of ops structs.
*/
if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
- rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
+ ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
else if (flags & MEMPOOL_F_SP_PUT)
- rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
+ ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
else if (flags & MEMPOOL_F_SC_GET)
- rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
+ ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
else
- rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+ ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+
+ if (ret)
+ goto fail;
/* call the mempool priv initializer */
if (mp_init)
@@ -998,12 +1001,6 @@ rte_mempool_in_use_count(const struct rte_mempool *mp)
return mp->size - rte_mempool_avail_count(mp);
}
-unsigned int
-rte_mempool_count(const struct rte_mempool *mp)
-{
- return rte_mempool_avail_count(mp);
-}
-
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
@@ -1047,7 +1044,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
/* Force to drop the "const" attribute. This is done only when
* DEBUG is enabled */
tmp = (void *) obj_table_const;
- obj_table = (void **) tmp;
+ obj_table = tmp;
while (n--) {
obj = obj_table[n];
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 956ce04b..48bc8ea3 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -51,13 +51,15 @@
* meta-data in the object data and retrieve them when allocating a
* new object.
*
- * Note: the mempool implementation is not preemptable. A lcore must
- * not be interrupted by another task that uses the same mempool
- * (because it uses a ring which is not preemptable). Also, mempool
- * functions must not be used outside the DPDK environment: for
- * example, in linuxapp environment, a thread that is not created by
- * the EAL must not use mempools. This is due to the per-lcore cache
- * that won't work as rte_lcore_id() will not return a correct value.
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
*/
#include <stdio.h>
@@ -357,7 +359,7 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
* Prototype for implementation specific data provisioning function.
*
* The function should provide the implementation specific memory for
- * for use by the other mempool ops functions in a given mempool ops struct.
+ * use by the other mempool ops functions in a given mempool ops struct.
* E.g. the default ops provides an instance of the rte_ring for this purpose.
* it will most likely point to a different type of data structure, and
* will be transparent to the application programmer.
@@ -551,7 +553,7 @@ int rte_mempool_register_ops(const struct rte_mempool_ops *ops);
/**
* Macro to statically register the ops of a mempool handler.
* Note that the rte_mempool_register_ops fails silently here when
- * more then RTE_MEMPOOL_MAX_OPS_IDX is registered.
+ * more than RTE_MEMPOOL_MAX_OPS_IDX is registered.
*/
#define MEMPOOL_REGISTER_OPS(ops) \
void mp_hdlr_init_##ops(void); \
@@ -654,7 +656,7 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* when using rte_mempool_get() or rte_mempool_get_bulk() is
* "single-consumer". Otherwise, it is "multi-consumers".
* - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
- * necessarilly be contiguous in physical memory.
+ * necessarily be contiguous in physical memory.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* with rte_errno set appropriately. Possible rte_errno values include:
@@ -794,7 +796,7 @@ rte_mempool_free(struct rte_mempool *mp);
* Add physically contiguous memory for objects in the pool at init
*
* Add a virtually and physically contiguous memory chunk in the pool
- * where objects can be instanciated.
+ * where objects can be instantiated.
*
* If the given physical address is unknown (paddr = RTE_BAD_PHYS_ADDR),
* the chunk doesn't need to be physically contiguous (only virtually),
@@ -825,7 +827,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* Add physical memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
- * be instanciated. The physical addresses corresponding to the virtual
+ * be instantiated. The physical addresses corresponding to the virtual
* area are described in paddr[], pg_num, pg_shift.
*
* @param mp
@@ -856,7 +858,7 @@ int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
* Add virtually contiguous memory for objects in the pool at init
*
* Add a virtually contiguous memory chunk in the pool where objects can
- * be instanciated.
+ * be instantiated.
*
* @param mp
* A pointer to the mempool structure.
@@ -1038,19 +1040,15 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
*/
static inline void __attribute__((always_inline))
__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, struct rte_mempool_cache *cache, int flags)
+ unsigned n, struct rte_mempool_cache *cache)
{
void **cache_objs;
/* increment stat now, adding in mempool always success */
__MEMPOOL_STAT_ADD(mp, put, n);
- /* No cache provided or single producer */
- if (unlikely(cache == NULL || flags & MEMPOOL_F_SP_PUT))
- goto ring_enqueue;
-
- /* Go straight to ring if put would overflow mem allocated for cache */
- if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
+ /* No cache provided or if put would overflow mem allocated for cache */
+ if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_enqueue;
cache_objs = &cache->objs[cache->len];
@@ -1104,50 +1102,11 @@ ring_enqueue:
*/
static inline void __attribute__((always_inline))
rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, struct rte_mempool_cache *cache, int flags)
+ unsigned n, struct rte_mempool_cache *cache,
+ __rte_unused int flags)
{
__mempool_check_cookies(mp, obj_table, n, 0);
- __mempool_generic_put(mp, obj_table, n, cache, flags);
-}
-
-/**
- * @deprecated
- * Put several objects back in the mempool (multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the mempool from the obj_table.
- */
-__rte_deprecated
-static inline void __attribute__((always_inline))
-rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
-{
- struct rte_mempool_cache *cache;
- cache = rte_mempool_default_cache(mp, rte_lcore_id());
- rte_mempool_generic_put(mp, obj_table, n, cache, 0);
-}
-
-/**
- * @deprecated
- * Put several objects back in the mempool (NOT multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the mempool from obj_table.
- */
-__rte_deprecated
-static inline void __attribute__((always_inline))
-rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n)
-{
- rte_mempool_generic_put(mp, obj_table, n, NULL, MEMPOOL_F_SP_PUT);
+ __mempool_generic_put(mp, obj_table, n, cache);
}
/**
@@ -1174,40 +1133,6 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
}
/**
- * @deprecated
- * Put one object in the mempool (multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj
- * A pointer to the object to be added.
- */
-__rte_deprecated
-static inline void __attribute__((always_inline))
-rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
-{
- struct rte_mempool_cache *cache;
- cache = rte_mempool_default_cache(mp, rte_lcore_id());
- rte_mempool_generic_put(mp, &obj, 1, cache, 0);
-}
-
-/**
- * @deprecated
- * Put one object back in the mempool (NOT multi-producers safe).
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj
- * A pointer to the object to be added.
- */
-__rte_deprecated
-static inline void __attribute__((always_inline))
-rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
-{
- rte_mempool_generic_put(mp, &obj, 1, NULL, MEMPOOL_F_SP_PUT);
-}
-
-/**
* Put one object back in the mempool.
*
* This function calls the multi-producer or the single-producer
@@ -1244,15 +1169,14 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
*/
static inline int __attribute__((always_inline))
__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
- unsigned n, struct rte_mempool_cache *cache, int flags)
+ unsigned n, struct rte_mempool_cache *cache)
{
int ret;
uint32_t index, len;
void **cache_objs;
- /* No cache provided or single consumer */
- if (unlikely(cache == NULL || flags & MEMPOOL_F_SC_GET ||
- n >= cache->size))
+ /* No cache provided or cannot be satisfied from cache */
+ if (unlikely(cache == NULL || n >= cache->size))
goto ring_dequeue;
cache_objs = cache->objs;
@@ -1326,72 +1250,16 @@ ring_dequeue:
*/
static inline int __attribute__((always_inline))
rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
- struct rte_mempool_cache *cache, int flags)
+ struct rte_mempool_cache *cache, __rte_unused int flags)
{
int ret;
- ret = __mempool_generic_get(mp, obj_table, n, cache, flags);
+ ret = __mempool_generic_get(mp, obj_table, n, cache);
if (ret == 0)
__mempool_check_cookies(mp, obj_table, n, 1);
return ret;
}
/**
- * @deprecated
- * Get several objects from the mempool (multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to get from mempool to obj_table.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
- */
-__rte_deprecated
-static inline int __attribute__((always_inline))
-rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
- struct rte_mempool_cache *cache;
- cache = rte_mempool_default_cache(mp, rte_lcore_id());
- return rte_mempool_generic_get(mp, obj_table, n, cache, 0);
-}
-
-/**
- * @deprecated
- * Get several objects from the mempool (NOT multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to get from the mempool to obj_table.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is
- * retrieved.
- */
-__rte_deprecated
-static inline int __attribute__((always_inline))
-rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
-{
- return rte_mempool_generic_get(mp, obj_table, n, NULL,
- MEMPOOL_F_SC_GET);
-}
-
-/**
* Get several objects from the mempool.
*
* This function calls the multi-consumers or the single-consumer
@@ -1422,56 +1290,6 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
}
/**
- * @deprecated
- * Get one object from the mempool (multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
- */
-__rte_deprecated
-static inline int __attribute__((always_inline))
-rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
-{
- struct rte_mempool_cache *cache;
- cache = rte_mempool_default_cache(mp, rte_lcore_id());
- return rte_mempool_generic_get(mp, obj_p, 1, cache, 0);
-}
-
-/**
- * @deprecated
- * Get one object from the mempool (NOT multi-consumers safe).
- *
- * If cache is enabled, objects will be retrieved first from cache,
- * subsequently from the common pool. Note that it can return -ENOENT when
- * the local cache and common pool are empty, even if cache from other
- * lcores are full.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
- * @return
- * - 0: Success; objects taken.
- * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
- */
-__rte_deprecated
-static inline int __attribute__((always_inline))
-rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
-{
- return rte_mempool_generic_get(mp, obj_p, 1, NULL, MEMPOOL_F_SC_GET);
-}
-
-/**
* Get one object from the mempool.
*
* This function calls the multi-consumers or the single-consumer
@@ -1512,22 +1330,6 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
unsigned int rte_mempool_avail_count(const struct rte_mempool *mp);
/**
- * @deprecated
- * Return the number of entries in the mempool.
- *
- * When cache is enabled, this function has to browse the length of
- * all lcores, so it should not be used in a data path, but only for
- * debug purposes.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @return
- * The number of entries in the mempool.
- */
-__rte_deprecated
-unsigned rte_mempool_count(const struct rte_mempool *mp);
-
-/**
* Return the number of elements which have been allocated from the mempool
*
* When cache is enabled, this function has to browse the length of
@@ -1543,31 +1345,6 @@ unsigned int
rte_mempool_in_use_count(const struct rte_mempool *mp);
/**
- * @deprecated
- * Return the number of free entries in the mempool ring.
- * i.e. how many entries can be freed back to the mempool.
- *
- * NOTE: This corresponds to the number of elements *allocated* from the
- * memory pool, not the number of elements in the pool itself. To count
- * the number elements currently available in the pool, use "rte_mempool_count"
- *
- * When cache is enabled, this function has to browse the length of
- * all lcores, so it should not be used in a data path, but only for
- * debug purposes. User-owned mempool caches are not accounted for.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @return
- * The number of free entries in the mempool.
- */
-__rte_deprecated
-static inline unsigned
-rte_mempool_free_count(const struct rte_mempool *mp)
-{
- return rte_mempool_in_use_count(mp);
-}
-
-/**
* Test if the mempool is full.
*
* When cache is enabled, this function has to browse the length of all
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index dee1c990..f9c07944 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -3,7 +3,6 @@ DPDK_2.0 {
rte_mempool_audit;
rte_mempool_calc_obj_size;
- rte_mempool_count;
rte_mempool_create;
rte_mempool_dump;
rte_mempool_list_dump;
diff --git a/lib/librte_meter/Makefile b/lib/librte_meter/Makefile
index f07fced7..539bfddd 100644
--- a/lib/librte_meter/Makefile
+++ b/lib/librte_meter/Makefile
@@ -53,7 +53,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_METER) := rte_meter.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_METER)-include := rte_meter.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_METER) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_metrics/Makefile b/lib/librte_metrics/Makefile
new file mode 100644
index 00000000..d4990e83
--- /dev/null
+++ b/lib/librte_metrics/Makefile
@@ -0,0 +1,49 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_metrics.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+EXPORT_MAP := rte_metrics_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_METRICS) := rte_metrics.c
+
+# Install header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_METRICS)-include += rte_metrics.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c
new file mode 100644
index 00000000..e9a122c1
--- /dev/null
+++ b/lib/librte_metrics/rte_metrics.c
@@ -0,0 +1,302 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_metrics.h>
+#include <rte_lcore.h>
+#include <rte_memzone.h>
+#include <rte_spinlock.h>
+
+#define RTE_METRICS_MAX_METRICS 256
+#define RTE_METRICS_MEMZONE_NAME "RTE_METRICS"
+
+/**
+ * Internal stats metadata and value entry.
+ *
+ * @internal
+ */
+struct rte_metrics_meta_s {
+ /** Name of metric */
+ char name[RTE_METRICS_MAX_NAME_LEN];
+ /** Current value for metric */
+ uint64_t value[RTE_MAX_ETHPORTS];
+ /** Used for global metrics */
+ uint64_t global_value;
+ /** Index of next root element (zero for none) */
+ uint16_t idx_next_set;
+ /** Index of next metric in set (zero for none) */
+ uint16_t idx_next_stat;
+};
+
+/**
+ * Internal stats info structure.
+ *
+ * @internal
+ * Offsets into metadata are used instead of pointers because ASLR
+ * means that having the same physical addresses in different
+ * processes is not guaranteed.
+ */
+struct rte_metrics_data_s {
+ /** Index of last metadata entry with valid data.
+ * This value is not valid if cnt_stats is zero.
+ */
+ uint16_t idx_last_set;
+ /** Number of metrics. */
+ uint16_t cnt_stats;
+ /** Metric data memory block. */
+ struct rte_metrics_meta_s metadata[RTE_METRICS_MAX_METRICS];
+ /** Metric data access lock */
+ rte_spinlock_t lock;
+};
+
+void
+rte_metrics_init(int socket_id)
+{
+ struct rte_metrics_data_s *stats;
+ const struct rte_memzone *memzone;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
+ if (memzone != NULL)
+ return;
+ memzone = rte_memzone_reserve(RTE_METRICS_MEMZONE_NAME,
+ sizeof(struct rte_metrics_data_s), socket_id, 0);
+ if (memzone == NULL)
+ rte_exit(EXIT_FAILURE, "Unable to allocate stats memzone\n");
+ stats = memzone->addr;
+ memset(stats, 0, sizeof(struct rte_metrics_data_s));
+ rte_spinlock_init(&stats->lock);
+}
+
+int
+rte_metrics_reg_name(const char *name)
+{
+ const char * const list_names[] = {name};
+
+ return rte_metrics_reg_names(list_names, 1);
+}
+
+int
+rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
+{
+ struct rte_metrics_meta_s *entry;
+ struct rte_metrics_data_s *stats;
+ const struct rte_memzone *memzone;
+ uint16_t idx_name;
+ uint16_t idx_base;
+
+ /* Some sanity checks */
+ if (cnt_names < 1 || names == NULL)
+ return -EINVAL;
+
+ memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
+ if (memzone == NULL)
+ return -EIO;
+ stats = memzone->addr;
+
+ if (stats->cnt_stats + cnt_names >= RTE_METRICS_MAX_METRICS)
+ return -ENOMEM;
+
+ rte_spinlock_lock(&stats->lock);
+
+ /* Overwritten later if this is actually first set.. */
+ stats->metadata[stats->idx_last_set].idx_next_set = stats->cnt_stats;
+
+ stats->idx_last_set = idx_base = stats->cnt_stats;
+
+ for (idx_name = 0; idx_name < cnt_names; idx_name++) {
+ entry = &stats->metadata[idx_name + stats->cnt_stats];
+ strncpy(entry->name, names[idx_name],
+ RTE_METRICS_MAX_NAME_LEN);
+ memset(entry->value, 0, sizeof(entry->value));
+ entry->idx_next_stat = idx_name + stats->cnt_stats + 1;
+ }
+ entry->idx_next_stat = 0;
+ entry->idx_next_set = 0;
+ stats->cnt_stats += cnt_names;
+
+ rte_spinlock_unlock(&stats->lock);
+
+ return idx_base;
+}
+
+int
+rte_metrics_update_value(int port_id, uint16_t key, const uint64_t value)
+{
+ return rte_metrics_update_values(port_id, key, &value, 1);
+}
+
+int
+rte_metrics_update_values(int port_id,
+ uint16_t key,
+ const uint64_t *values,
+ uint32_t count)
+{
+ struct rte_metrics_meta_s *entry;
+ struct rte_metrics_data_s *stats;
+ const struct rte_memzone *memzone;
+ uint16_t idx_metric;
+ uint16_t idx_value;
+ uint16_t cnt_setsize;
+
+ if (port_id != RTE_METRICS_GLOBAL &&
+ (port_id < 0 || port_id > RTE_MAX_ETHPORTS))
+ return -EINVAL;
+
+ if (values == NULL)
+ return -EINVAL;
+
+ memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
+ if (memzone == NULL)
+ return -EIO;
+ stats = memzone->addr;
+
+ rte_spinlock_lock(&stats->lock);
+ idx_metric = key;
+ cnt_setsize = 1;
+ while (idx_metric < stats->cnt_stats) {
+ entry = &stats->metadata[idx_metric];
+ if (entry->idx_next_stat == 0)
+ break;
+ cnt_setsize++;
+ idx_metric++;
+ }
+ /* Check update does not cross set border */
+ if (count > cnt_setsize) {
+ rte_spinlock_unlock(&stats->lock);
+ return -ERANGE;
+ }
+
+ if (port_id == RTE_METRICS_GLOBAL)
+ for (idx_value = 0; idx_value < count; idx_value++) {
+ idx_metric = key + idx_value;
+ stats->metadata[idx_metric].global_value =
+ values[idx_value];
+ }
+ else
+ for (idx_value = 0; idx_value < count; idx_value++) {
+ idx_metric = key + idx_value;
+ stats->metadata[idx_metric].value[port_id] =
+ values[idx_value];
+ }
+ rte_spinlock_unlock(&stats->lock);
+ return 0;
+}
+
+int
+rte_metrics_get_names(struct rte_metric_name *names,
+ uint16_t capacity)
+{
+ struct rte_metrics_data_s *stats;
+ const struct rte_memzone *memzone;
+ uint16_t idx_name;
+ int return_value;
+
+ memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
+ /* If not allocated, fail silently */
+ if (memzone == NULL)
+ return 0;
+
+ stats = memzone->addr;
+ rte_spinlock_lock(&stats->lock);
+ if (names != NULL) {
+ if (capacity < stats->cnt_stats) {
+ return_value = stats->cnt_stats;
+ rte_spinlock_unlock(&stats->lock);
+ return return_value;
+ }
+ for (idx_name = 0; idx_name < stats->cnt_stats; idx_name++)
+ strncpy(names[idx_name].name,
+ stats->metadata[idx_name].name,
+ RTE_METRICS_MAX_NAME_LEN);
+ }
+ return_value = stats->cnt_stats;
+ rte_spinlock_unlock(&stats->lock);
+ return return_value;
+}
+
+int
+rte_metrics_get_values(int port_id,
+ struct rte_metric_value *values,
+ uint16_t capacity)
+{
+ struct rte_metrics_meta_s *entry;
+ struct rte_metrics_data_s *stats;
+ const struct rte_memzone *memzone;
+ uint16_t idx_name;
+ int return_value;
+
+ if (port_id != RTE_METRICS_GLOBAL &&
+ (port_id < 0 || port_id > RTE_MAX_ETHPORTS))
+ return -EINVAL;
+
+ memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
+ /* If not allocated, fail silently */
+ if (memzone == NULL)
+ return 0;
+ stats = memzone->addr;
+ rte_spinlock_lock(&stats->lock);
+
+ if (values != NULL) {
+ if (capacity < stats->cnt_stats) {
+ return_value = stats->cnt_stats;
+ rte_spinlock_unlock(&stats->lock);
+ return return_value;
+ }
+ if (port_id == RTE_METRICS_GLOBAL)
+ for (idx_name = 0;
+ idx_name < stats->cnt_stats;
+ idx_name++) {
+ entry = &stats->metadata[idx_name];
+ values[idx_name].key = idx_name;
+ values[idx_name].value = entry->global_value;
+ }
+ else
+ for (idx_name = 0;
+ idx_name < stats->cnt_stats;
+ idx_name++) {
+ entry = &stats->metadata[idx_name];
+ values[idx_name].key = idx_name;
+ values[idx_name].value = entry->value[port_id];
+ }
+ }
+ return_value = stats->cnt_stats;
+ rte_spinlock_unlock(&stats->lock);
+ return return_value;
+}
diff --git a/lib/librte_metrics/rte_metrics.h b/lib/librte_metrics/rte_metrics.h
new file mode 100644
index 00000000..0fa3104e
--- /dev/null
+++ b/lib/librte_metrics/rte_metrics.h
@@ -0,0 +1,250 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * DPDK Metrics module
+ *
+ * Metrics are statistics that are not generated by PMDs, and hence
+ * are better reported through a mechanism that is independent from
+ * the ethdev-based extended statistics. Providers will typically
+ * be other libraries and consumers will typically be applications.
+ *
+ * Metric information is populated using a push model, where producers
+ * update the values contained within the metric library by calling
+ * an update function on the relevant metrics. Consumers receive
+ * metric information by querying the central metric data, which is
+ * held in shared memory. Currently only bulk querying of metrics
+ * by consumers is supported.
+ */
+
+#ifndef _RTE_METRICS_H_
+#define _RTE_METRICS_H_
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum length of metric name (including null-terminator) */
+#define RTE_METRICS_MAX_NAME_LEN 64
+
+/**
+ * Global metric special id.
+ *
+ * When used for the port_id parameter when calling
+ * rte_metrics_update_metric() or rte_metrics_update_metric(),
+ * the global metric, which are not associated with any specific
+ * port (i.e. device), are updated.
+ */
+#define RTE_METRICS_GLOBAL -1
+
+
+/**
+ * A name-key lookup for metrics.
+ *
+ * An array of this structure is returned by rte_metrics_get_names().
+ * The struct rte_metric_value references these names via their array index.
+ */
+struct rte_metric_name {
+ /** String describing metric */
+ char name[RTE_METRICS_MAX_NAME_LEN];
+};
+
+
+/**
+ * Metric value structure.
+ *
+ * This structure is used by rte_metrics_get_values() to return metrics,
+ * which are statistics that are not generated by PMDs. It maps a name key,
+ * which corresponds to an index in the array returned by
+ * rte_metrics_get_names().
+ */
+struct rte_metric_value {
+ /** Numeric identifier of metric. */
+ uint16_t key;
+ /** Value for metric */
+ uint64_t value;
+};
+
+
+/**
+ * Initializes metric module. This function must be called from
+ * a primary process before metrics are used.
+ *
+ * @param socket_id
+ * Socket to use for shared memory allocation.
+ */
+void rte_metrics_init(int socket_id);
+
+/**
+ * Register a metric, making it available as a reporting parameter.
+ *
+ * Registering a metric is the way producers declare a parameter
+ * that they wish to be reported. Once registered, the associated
+ * numeric key can be obtained via rte_metrics_get_names(), which
+ * is required for updating said metric's value.
+ *
+ * @param name
+ * Metric name
+ *
+ * @return
+ * - Zero or positive: Success (index key of new metric)
+ * - -EIO: Error, unable to access metrics shared memory
+ * (rte_metrics_init() not called)
+ * - -EINVAL: Error, invalid parameters
+ * - -ENOMEM: Error, maximum metrics reached
+ */
+int rte_metrics_reg_name(const char *name);
+
+/**
+ * Register a set of metrics.
+ *
+ * This is a bulk version of rte_metrics_reg_names() and aside from
+ * handling multiple keys at once is functionally identical.
+ *
+ * @param names
+ * List of metric names
+ *
+ * @param cnt_names
+ * Number of metrics in set
+ *
+ * @return
+ * - Zero or positive: Success (index key of start of set)
+ * - -EIO: Error, unable to access metrics shared memory
+ * (rte_metrics_init() not called)
+ * - -EINVAL: Error, invalid parameters
+ * - -ENOMEM: Error, maximum metrics reached
+ */
+int rte_metrics_reg_names(const char * const *names, uint16_t cnt_names);
+
+/**
+ * Get metric name-key lookup table.
+ *
+ * @param names
+ * A struct rte_metric_name array of at least *capacity* in size to
+ * receive key names. If this is NULL, function returns the required
+ * number of elements for this array.
+ *
+ * @param capacity
+ * Size (number of elements) of struct rte_metric_name array.
+ * Disregarded if names is NULL.
+ *
+ * @return
+ * - Positive value above capacity: error, *names* is too small.
+ * Return value is required size.
+ * - Positive value equal or less than capacity: Success. Return
+ * value is number of elements filled in.
+ * - Negative value: error.
+ */
+int rte_metrics_get_names(
+ struct rte_metric_name *names,
+ uint16_t capacity);
+
+/**
+ * Get metric value table.
+ *
+ * @param port_id
+ * Port id to query
+ *
+ * @param values
+ * A struct rte_metric_value array of at least *capacity* in size to
+ * receive metric ids and values. If this is NULL, function returns
+ * the required number of elements for this array.
+ *
+ * @param capacity
+ * Size (number of elements) of struct rte_metric_value array.
+ * Disregarded if names is NULL.
+ *
+ * @return
+ * - Positive value above capacity: error, *values* is too small.
+ * Return value is required size.
+ * - Positive value equal or less than capacity: Success. Return
+ * value is number of elements filled in.
+ * - Negative value: error.
+ */
+int rte_metrics_get_values(
+ int port_id,
+ struct rte_metric_value *values,
+ uint16_t capacity);
+
+/**
+ * Updates a metric
+ *
+ * @param port_id
+ * Port to update metrics for
+ * @param key
+ * Id of metric to update
+ * @param value
+ * New value
+ *
+ * @return
+ * - -EIO if unable to access shared metrics memory
+ * - Zero on success
+ */
+int rte_metrics_update_value(
+ int port_id,
+ uint16_t key,
+ const uint64_t value);
+
+/**
+ * Updates a metric set. Note that it is an error to try to
+ * update across a set boundary.
+ *
+ * @param port_id
+ * Port to update metrics for
+ * @param key
+ * Base id of metrics set to update
+ * @param values
+ * Set of new values
+ * @param count
+ * Number of new values
+ *
+ * @return
+ * - -ERANGE if count exceeds metric set size
+ * - -EIO if unable to access shared metrics memory
+ * - Zero on success
+ */
+int rte_metrics_update_values(
+ int port_id,
+ uint16_t key,
+ const uint64_t *values,
+ uint32_t count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_metrics/rte_metrics_version.map b/lib/librte_metrics/rte_metrics_version.map
new file mode 100644
index 00000000..4c5234cd
--- /dev/null
+++ b/lib/librte_metrics/rte_metrics_version.map
@@ -0,0 +1,13 @@
+DPDK_17.05 {
+ global:
+
+ rte_metrics_get_names;
+ rte_metrics_get_values;
+ rte_metrics_init;
+ rte_metrics_reg_name;
+ rte_metrics_reg_names;
+ rte_metrics_update_value;
+ rte_metrics_update_values;
+
+ local: *;
+};
diff --git a/lib/librte_net/Makefile b/lib/librte_net/Makefile
index 20cf6644..56727c4d 100644
--- a/lib/librte_net/Makefile
+++ b/lib/librte_net/Makefile
@@ -39,12 +39,12 @@ EXPORT_MAP := rte_net_version.map
LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_NET) := rte_net.c
+SRCS-$(CONFIG_RTE_LIBRTE_NET) += rte_net_crc.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_sctp.h rte_icmp.h rte_arp.h
SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_ether.h rte_gre.h rte_net.h
-
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NET) += lib/librte_eal lib/librte_mbuf
+SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include += rte_net_crc.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_net/net_crc_sse.h b/lib/librte_net/net_crc_sse.h
new file mode 100644
index 00000000..8bce522a
--- /dev/null
+++ b/lib/librte_net/net_crc_sse.h
@@ -0,0 +1,363 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_NET_CRC_SSE_H_
+#define _RTE_NET_CRC_SSE_H_
+
+#include <rte_branch_prediction.h>
+
+#include <x86intrin.h>
+#include <cpuid.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** PCLMULQDQ CRC computation context structure */
+struct crc_pclmulqdq_ctx {
+ __m128i rk1_rk2;
+ __m128i rk5_rk6;
+ __m128i rk7_rk8;
+};
+
+struct crc_pclmulqdq_ctx crc32_eth_pclmulqdq __rte_aligned(16);
+struct crc_pclmulqdq_ctx crc16_ccitt_pclmulqdq __rte_aligned(16);
+/**
+ * @brief Performs one folding round
+ *
+ * Logically function operates as follows:
+ * DATA = READ_NEXT_16BYTES();
+ * F1 = LSB8(FOLD)
+ * F2 = MSB8(FOLD)
+ * T1 = CLMUL(F1, RK1)
+ * T2 = CLMUL(F2, RK2)
+ * FOLD = XOR(T1, T2, DATA)
+ *
+ * @param data_block
+ * 16 byte data block
+ * @param precomp
+ * Precomputed rk1 constanst
+ * @param fold
+ * Current16 byte folded data
+ *
+ * @return
+ * New 16 byte folded data
+ */
+static inline __attribute__((always_inline)) __m128i
+crcr32_folding_round(__m128i data_block,
+ __m128i precomp,
+ __m128i fold)
+{
+ __m128i tmp0 = _mm_clmulepi64_si128(fold, precomp, 0x01);
+ __m128i tmp1 = _mm_clmulepi64_si128(fold, precomp, 0x10);
+
+ return _mm_xor_si128(tmp1, _mm_xor_si128(data_block, tmp0));
+}
+
+/**
+ * Performs reduction from 128 bits to 64 bits
+ *
+ * @param data128
+ * 128 bits data to be reduced
+ * @param precomp
+ * precomputed constants rk5, rk6
+ *
+ * @return
+ * 64 bits reduced data
+ */
+
+static inline __attribute__((always_inline)) __m128i
+crcr32_reduce_128_to_64(__m128i data128, __m128i precomp)
+{
+ __m128i tmp0, tmp1, tmp2;
+
+ /* 64b fold */
+ tmp0 = _mm_clmulepi64_si128(data128, precomp, 0x00);
+ tmp1 = _mm_srli_si128(data128, 8);
+ tmp0 = _mm_xor_si128(tmp0, tmp1);
+
+ /* 32b fold */
+ tmp2 = _mm_slli_si128(tmp0, 4);
+ tmp1 = _mm_clmulepi64_si128(tmp2, precomp, 0x10);
+
+ return _mm_xor_si128(tmp1, tmp0);
+}
+
+/**
+ * Performs Barret's reduction from 64 bits to 32 bits
+ *
+ * @param data64
+ * 64 bits data to be reduced
+ * @param precomp
+ * rk7 precomputed constant
+ *
+ * @return
+ * reduced 32 bits data
+ */
+
+static inline __attribute__((always_inline)) uint32_t
+crcr32_reduce_64_to_32(__m128i data64, __m128i precomp)
+{
+ static const uint32_t mask1[4] __rte_aligned(16) = {
+ 0xffffffff, 0xffffffff, 0x00000000, 0x00000000
+ };
+
+ static const uint32_t mask2[4] __rte_aligned(16) = {
+ 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff
+ };
+ __m128i tmp0, tmp1, tmp2;
+
+ tmp0 = _mm_and_si128(data64, _mm_load_si128((const __m128i *)mask2));
+
+ tmp1 = _mm_clmulepi64_si128(tmp0, precomp, 0x00);
+ tmp1 = _mm_xor_si128(tmp1, tmp0);
+ tmp1 = _mm_and_si128(tmp1, _mm_load_si128((const __m128i *)mask1));
+
+ tmp2 = _mm_clmulepi64_si128(tmp1, precomp, 0x10);
+ tmp2 = _mm_xor_si128(tmp2, tmp1);
+ tmp2 = _mm_xor_si128(tmp2, tmp0);
+
+ return _mm_extract_epi32(tmp2, 2);
+}
+
+static const uint8_t crc_xmm_shift_tab[48] __rte_aligned(16) = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+/**
+ * Shifts left 128 bit register by specified number of bytes
+ *
+ * @param reg
+ * 128 bit value
+ * @param num
+ * number of bytes to shift left reg by (0-16)
+ *
+ * @return
+ * reg << (num * 8)
+ */
+
+static inline __attribute__((always_inline)) __m128i
+xmm_shift_left(__m128i reg, const unsigned int num)
+{
+ const __m128i *p = (const __m128i *)(crc_xmm_shift_tab + 16 - num);
+
+ return _mm_shuffle_epi8(reg, _mm_loadu_si128(p));
+}
+
+static inline __attribute__((always_inline)) uint32_t
+crc32_eth_calc_pclmulqdq(
+ const uint8_t *data,
+ uint32_t data_len,
+ uint32_t crc,
+ const struct crc_pclmulqdq_ctx *params)
+{
+ __m128i temp, fold, k;
+ uint32_t n;
+
+ /* Get CRC init value */
+ temp = _mm_insert_epi32(_mm_setzero_si128(), crc, 0);
+
+ /**
+ * Folding all data into single 16 byte data block
+ * Assumes: fold holds first 16 bytes of data
+ */
+
+ if (unlikely(data_len < 32)) {
+ if (unlikely(data_len == 16)) {
+ /* 16 bytes */
+ fold = _mm_loadu_si128((const __m128i *)data);
+ fold = _mm_xor_si128(fold, temp);
+ goto reduction_128_64;
+ }
+
+ if (unlikely(data_len < 16)) {
+ /* 0 to 15 bytes */
+ uint8_t buffer[16] __rte_aligned(16);
+
+ memset(buffer, 0, sizeof(buffer));
+ memcpy(buffer, data, data_len);
+
+ fold = _mm_load_si128((const __m128i *)buffer);
+ fold = _mm_xor_si128(fold, temp);
+ if (unlikely(data_len < 4)) {
+ fold = xmm_shift_left(fold, 8 - data_len);
+ goto barret_reduction;
+ }
+ fold = xmm_shift_left(fold, 16 - data_len);
+ goto reduction_128_64;
+ }
+ /* 17 to 31 bytes */
+ fold = _mm_loadu_si128((const __m128i *)data);
+ fold = _mm_xor_si128(fold, temp);
+ n = 16;
+ k = params->rk1_rk2;
+ goto partial_bytes;
+ }
+
+ /** At least 32 bytes in the buffer */
+ /** Apply CRC initial value */
+ fold = _mm_loadu_si128((const __m128i *)data);
+ fold = _mm_xor_si128(fold, temp);
+
+ /** Main folding loop - the last 16 bytes is processed separately */
+ k = params->rk1_rk2;
+ for (n = 16; (n + 16) <= data_len; n += 16) {
+ temp = _mm_loadu_si128((const __m128i *)&data[n]);
+ fold = crcr32_folding_round(temp, k, fold);
+ }
+
+partial_bytes:
+ if (likely(n < data_len)) {
+
+ const uint32_t mask3[4] __rte_aligned(16) = {
+ 0x80808080, 0x80808080, 0x80808080, 0x80808080
+ };
+
+ const uint8_t shf_table[32] __rte_aligned(16) = {
+ 0x00, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ };
+
+ __m128i last16, a, b;
+
+ last16 = _mm_loadu_si128((const __m128i *)&data[data_len - 16]);
+
+ temp = _mm_loadu_si128((const __m128i *)
+ &shf_table[data_len & 15]);
+ a = _mm_shuffle_epi8(fold, temp);
+
+ temp = _mm_xor_si128(temp,
+ _mm_load_si128((const __m128i *)mask3));
+ b = _mm_shuffle_epi8(fold, temp);
+ b = _mm_blendv_epi8(b, last16, temp);
+
+ /* k = rk1 & rk2 */
+ temp = _mm_clmulepi64_si128(a, k, 0x01);
+ fold = _mm_clmulepi64_si128(a, k, 0x10);
+
+ fold = _mm_xor_si128(fold, temp);
+ fold = _mm_xor_si128(fold, b);
+ }
+
+ /** Reduction 128 -> 32 Assumes: fold holds 128bit folded data */
+reduction_128_64:
+ k = params->rk5_rk6;
+ fold = crcr32_reduce_128_to_64(fold, k);
+
+barret_reduction:
+ k = params->rk7_rk8;
+ n = crcr32_reduce_64_to_32(fold, k);
+
+ return n;
+}
+
+
+static inline void
+rte_net_crc_sse42_init(void)
+{
+ uint64_t k1, k2, k5, k6;
+ uint64_t p = 0, q = 0;
+
+ /** Initialize CRC16 data */
+ k1 = 0x189aeLLU;
+ k2 = 0x8e10LLU;
+ k5 = 0x189aeLLU;
+ k6 = 0x114aaLLU;
+ q = 0x11c581910LLU;
+ p = 0x10811LLU;
+
+ /** Save the params in context structure */
+ crc16_ccitt_pclmulqdq.rk1_rk2 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(k1), _mm_cvtsi64_m64(k2));
+ crc16_ccitt_pclmulqdq.rk5_rk6 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(k5), _mm_cvtsi64_m64(k6));
+ crc16_ccitt_pclmulqdq.rk7_rk8 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(q), _mm_cvtsi64_m64(p));
+
+ /** Initialize CRC32 data */
+ k1 = 0xccaa009eLLU;
+ k2 = 0x1751997d0LLU;
+ k5 = 0xccaa009eLLU;
+ k6 = 0x163cd6124LLU;
+ q = 0x1f7011640LLU;
+ p = 0x1db710641LLU;
+
+ /** Save the params in context structure */
+ crc32_eth_pclmulqdq.rk1_rk2 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(k1), _mm_cvtsi64_m64(k2));
+ crc32_eth_pclmulqdq.rk5_rk6 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(k5), _mm_cvtsi64_m64(k6));
+ crc32_eth_pclmulqdq.rk7_rk8 =
+ _mm_setr_epi64(_mm_cvtsi64_m64(q), _mm_cvtsi64_m64(p));
+
+ /**
+ * Reset the register as following calculation may
+ * use other data types such as float, double, etc.
+ */
+ _mm_empty();
+
+}
+
+static inline uint32_t
+rte_crc16_ccitt_sse42_handler(const uint8_t *data,
+ uint32_t data_len)
+{
+ /** return 16-bit CRC value */
+ return (uint16_t)~crc32_eth_calc_pclmulqdq(data,
+ data_len,
+ 0xffff,
+ &crc16_ccitt_pclmulqdq);
+}
+
+static inline uint32_t
+rte_crc32_eth_sse42_handler(const uint8_t *data,
+ uint32_t data_len)
+{
+ return ~crc32_eth_calc_pclmulqdq(data,
+ data_len,
+ 0xffffffffUL,
+ &crc32_eth_pclmulqdq);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_NET_CRC_SSE_H_ */
diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h
index ff3d0654..917d42a1 100644
--- a/lib/librte_net/rte_ether.h
+++ b/lib/librte_net/rte_ether.h
@@ -333,6 +333,7 @@ struct vxlan_hdr {
#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
#define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
+#define ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */
#define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr))
/**< VXLAN tunnel header length. */
@@ -357,7 +358,7 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)
return -1;
struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
- m->ol_flags |= PKT_RX_VLAN_PKT;
+ m->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
/* Copy ether header over rather than moving whole packet */
@@ -407,6 +408,8 @@ static inline int rte_vlan_insert(struct rte_mbuf **m)
vh = (struct vlan_hdr *) (nh + 1);
vh->vlan_tci = rte_cpu_to_be_16((*m)->vlan_tci);
+ (*m)->ol_flags &= ~PKT_RX_VLAN_STRIPPED;
+
return 0;
}
diff --git a/lib/librte_net/rte_net.h b/lib/librte_net/rte_net.h
index d4156aea..79c764ad 100644
--- a/lib/librte_net/rte_net.h
+++ b/lib/librte_net/rte_net.h
@@ -38,6 +38,11 @@
extern "C" {
#endif
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+
/**
* Structure containing header lengths associated to a packet, filled
* by rte_net_get_ptype().
@@ -86,6 +91,112 @@ struct rte_net_hdr_lens {
uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
+/**
+ * Prepare pseudo header checksum
+ *
+ * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
+ * provided mbufs packet data and based on the requested offload flags.
+ *
+ * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
+ * in packet data,
+ * - for TSO the IP payload length is not included in pseudo header.
+ *
+ * This function expects that used headers are in the first data segment of
+ * mbuf, are not fragmented and can be safely modified.
+ *
+ * @param m
+ * The packet mbuf to be fixed.
+ * @param ol_flags
+ * TX offloads flags to use with this packet.
+ * @return
+ * 0 if checksum is initialized properly
+ */
+static inline int
+rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct tcp_hdr *tcp_hdr;
+ struct udp_hdr *udp_hdr;
+ uint64_t inner_l3_offset = m->l2_len;
+
+ if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
+ (ol_flags & PKT_TX_OUTER_IPV6))
+ inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
+
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ inner_l3_offset);
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+
+ udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
+ m->l3_len);
+ udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
+ ol_flags);
+ } else {
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ inner_l3_offset);
+ /* non-TSO udp */
+ udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
+ inner_l3_offset + m->l3_len);
+ udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
+ ol_flags);
+ }
+ } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ inner_l3_offset);
+
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+
+ /* non-TSO tcp or TSO */
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
+ m->l3_len);
+ tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
+ ol_flags);
+ } else {
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
+ inner_l3_offset);
+ /* non-TSO tcp or TSO */
+ tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
+ inner_l3_offset + m->l3_len);
+ tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
+ ol_flags);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Prepare pseudo header checksum
+ *
+ * This function prepares pseudo header checksum for TSO and non-TSO tcp/udp in
+ * provided mbufs packet data.
+ *
+ * - for non-TSO tcp/udp packets full pseudo-header checksum is counted and set
+ * in packet data,
+ * - for TSO the IP payload length is not included in pseudo header.
+ *
+ * This function expects that used headers are in the first data segment of
+ * mbuf, are not fragmented and can be safely modified.
+ *
+ * @param m
+ * The packet mbuf to be fixed.
+ * @return
+ * 0 if checksum is initialized properly
+ */
+static inline int
+rte_net_intel_cksum_prepare(struct rte_mbuf *m)
+{
+ return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_net/rte_net_crc.c b/lib/librte_net/rte_net_crc.c
new file mode 100644
index 00000000..9d1ee63f
--- /dev/null
+++ b/lib/librte_net/rte_net_crc.c
@@ -0,0 +1,207 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <string.h>
+#include <stdint.h>
+
+#include <rte_cpuflags.h>
+#include <rte_common.h>
+#include <rte_net_crc.h>
+
+#if defined(RTE_ARCH_X86_64) \
+ && defined(RTE_MACHINE_CPUFLAG_SSE4_2) \
+ && defined(RTE_MACHINE_CPUFLAG_PCLMULQDQ)
+#define X86_64_SSE42_PCLMULQDQ 1
+#endif
+
+#ifdef X86_64_SSE42_PCLMULQDQ
+#include <net_crc_sse.h>
+#endif
+
+/* crc tables */
+static uint32_t crc32_eth_lut[CRC_LUT_SIZE];
+static uint32_t crc16_ccitt_lut[CRC_LUT_SIZE];
+
+static uint32_t
+rte_crc16_ccitt_handler(const uint8_t *data, uint32_t data_len);
+
+static uint32_t
+rte_crc32_eth_handler(const uint8_t *data, uint32_t data_len);
+
+typedef uint32_t
+(*rte_net_crc_handler)(const uint8_t *data, uint32_t data_len);
+
+static rte_net_crc_handler *handlers;
+
+static rte_net_crc_handler handlers_scalar[] = {
+ [RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_handler,
+ [RTE_NET_CRC32_ETH] = rte_crc32_eth_handler,
+};
+
+#ifdef X86_64_SSE42_PCLMULQDQ
+static rte_net_crc_handler handlers_sse42[] = {
+ [RTE_NET_CRC16_CCITT] = rte_crc16_ccitt_sse42_handler,
+ [RTE_NET_CRC32_ETH] = rte_crc32_eth_sse42_handler,
+};
+#endif
+
+/**
+ * Reflect the bits about the middle
+ *
+ * @param val
+ * value to be reflected
+ *
+ * @return
+ * reflected value
+ */
+static uint32_t
+reflect_32bits(uint32_t val)
+{
+ uint32_t i, res = 0;
+
+ for (i = 0; i < 32; i++)
+ if ((val & (1 << i)) != 0)
+ res |= (uint32_t)(1 << (31 - i));
+
+ return res;
+}
+
+static void
+crc32_eth_init_lut(uint32_t poly,
+ uint32_t *lut)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < CRC_LUT_SIZE; i++) {
+ uint32_t crc = reflect_32bits(i);
+
+ for (j = 0; j < 8; j++) {
+ if (crc & 0x80000000L)
+ crc = (crc << 1) ^ poly;
+ else
+ crc <<= 1;
+ }
+ lut[i] = reflect_32bits(crc);
+ }
+}
+
+static inline __attribute__((always_inline)) uint32_t
+crc32_eth_calc_lut(const uint8_t *data,
+ uint32_t data_len,
+ uint32_t crc,
+ const uint32_t *lut)
+{
+ while (data_len--)
+ crc = lut[(crc ^ *data++) & 0xffL] ^ (crc >> 8);
+
+ return crc;
+}
+
+static void
+rte_net_crc_scalar_init(void)
+{
+ /* 32-bit crc init */
+ crc32_eth_init_lut(CRC32_ETH_POLYNOMIAL, crc32_eth_lut);
+
+ /* 16-bit CRC init */
+ crc32_eth_init_lut(CRC16_CCITT_POLYNOMIAL << 16, crc16_ccitt_lut);
+}
+
+static inline uint32_t
+rte_crc16_ccitt_handler(const uint8_t *data, uint32_t data_len)
+{
+ /* return 16-bit CRC value */
+ return (uint16_t)~crc32_eth_calc_lut(data,
+ data_len,
+ 0xffff,
+ crc16_ccitt_lut);
+}
+
+static inline uint32_t
+rte_crc32_eth_handler(const uint8_t *data, uint32_t data_len)
+{
+ /* return 32-bit CRC value */
+ return ~crc32_eth_calc_lut(data,
+ data_len,
+ 0xffffffffUL,
+ crc32_eth_lut);
+}
+
+void
+rte_net_crc_set_alg(enum rte_net_crc_alg alg)
+{
+ switch (alg) {
+ case RTE_NET_CRC_SSE42:
+#ifdef X86_64_SSE42_PCLMULQDQ
+ handlers = handlers_sse42;
+#else
+ alg = RTE_NET_CRC_SCALAR;
+#endif
+ break;
+ case RTE_NET_CRC_SCALAR:
+ default:
+ handlers = handlers_scalar;
+ break;
+ }
+}
+
+uint32_t
+rte_net_crc_calc(const void *data,
+ uint32_t data_len,
+ enum rte_net_crc_type type)
+{
+ uint32_t ret;
+ rte_net_crc_handler f_handle;
+
+ f_handle = handlers[type];
+ ret = f_handle(data, data_len);
+
+ return ret;
+}
+
+/* Select highest available crc algorithm as default one */
+static inline void __attribute__((constructor))
+rte_net_crc_init(void)
+{
+ enum rte_net_crc_alg alg = RTE_NET_CRC_SCALAR;
+
+ rte_net_crc_scalar_init();
+
+#ifdef X86_64_SSE42_PCLMULQDQ
+ alg = RTE_NET_CRC_SSE42;
+ rte_net_crc_sse42_init();
+#endif
+
+ rte_net_crc_set_alg(alg);
+}
diff --git a/examples/dpdk_qat/crypto.h b/lib/librte_net/rte_net_crc.h
index f68b0b65..d22286c6 100644
--- a/examples/dpdk_qat/crypto.h
+++ b/lib/librte_net/rte_net_crc.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Corporation.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,60 +31,68 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CRYPTO_H_
-#define CRYPTO_H_
+#ifndef _RTE_NET_CRC_H_
+#define _RTE_NET_CRC_H_
-/* Pass Labels/Values to crypto units */
-enum cipher_alg {
- /* Option to not do any cryptography */
- NO_CIPHER,
- CIPHER_DES,
- CIPHER_DES_CBC,
- CIPHER_DES3,
- CIPHER_DES3_CBC,
- CIPHER_AES,
- CIPHER_AES_CBC_128,
- CIPHER_KASUMI_F8,
- NUM_CRYPTO,
-};
+#include <stdint.h>
-enum hash_alg {
- /* Option to not do any hash */
- NO_HASH,
- HASH_MD5,
- HASH_SHA1,
- HASH_SHA1_96,
- HASH_SHA224,
- HASH_SHA256,
- HASH_SHA384,
- HASH_SHA512,
- HASH_AES_XCBC,
- HASH_AES_XCBC_96,
- HASH_KASUMI_F9,
- NUM_HMAC,
-};
+#ifdef __cplusplus
+extern "C" {
+#endif
-/* Return value from crypto_{encrypt/decrypt} */
-enum crypto_result {
- /* Packet was successfully put into crypto queue */
- CRYPTO_RESULT_IN_PROGRESS,
- /* Cryptography has failed in some way */
- CRYPTO_RESULT_FAIL,
-};
+/** CRC polynomials */
+#define CRC32_ETH_POLYNOMIAL 0x04c11db7UL
+#define CRC16_CCITT_POLYNOMIAL 0x1021U
-extern enum crypto_result crypto_encrypt(struct rte_mbuf *pkt, enum cipher_alg c,
- enum hash_alg h);
-extern enum crypto_result crypto_decrypt(struct rte_mbuf *pkt, enum cipher_alg c,
- enum hash_alg h);
+#define CRC_LUT_SIZE 256
-extern int crypto_init(void);
+/** CRC types */
+enum rte_net_crc_type {
+ RTE_NET_CRC16_CCITT = 0,
+ RTE_NET_CRC32_ETH,
+ RTE_NET_CRC_REQS
+};
-extern int per_core_crypto_init(uint32_t lcore_id);
+/** CRC compute algorithm */
+enum rte_net_crc_alg {
+ RTE_NET_CRC_SCALAR = 0,
+ RTE_NET_CRC_SSE42,
+};
-extern void crypto_exit(void);
+/**
+ * This API set the CRC computation algorithm (i.e. scalar version,
+ * x86 64-bit sse4.2 intrinsic version, etc.) and internal data
+ * structure.
+ *
+ * @param alg
+ * This parameter is used to select the CRC implementation version.
+ * - RTE_NET_CRC_SCALAR
+ * - RTE_NET_CRC_SSE42 (Use 64-bit SSE4.2 intrinsic)
+ */
+void
+rte_net_crc_set_alg(enum rte_net_crc_alg alg);
+
+/**
+ * CRC compute API
+ *
+ * @param data
+ * Pointer to the packet data for CRC computation
+ * @param data_len
+ * Data length for CRC computation
+ * @param type
+ * CRC type (enum rte_net_crc_type)
+ *
+ * @return
+ * CRC value
+ */
+uint32_t
+rte_net_crc_calc(const void *data,
+ uint32_t data_len,
+ enum rte_net_crc_type type);
-extern void *crypto_get_next_response(void);
+#ifdef __cplusplus
+}
+#endif
-extern void crypto_flush_tx_queue(uint32_t lcore_id);
-#endif /* CRYPTO_H_ */
+#endif /* _RTE_NET_CRC_H_ */
diff --git a/lib/librte_net/rte_net_version.map b/lib/librte_net/rte_net_version.map
index 3b15e651..687c40ea 100644
--- a/lib/librte_net/rte_net_version.map
+++ b/lib/librte_net/rte_net_version.map
@@ -4,3 +4,11 @@ DPDK_16.11 {
local: *;
};
+
+DPDK_17.05 {
+ global:
+
+ rte_net_crc_calc;
+ rte_net_crc_set_alg;
+
+} DPDK_16.11;
diff --git a/lib/librte_pdump/Makefile b/lib/librte_pdump/Makefile
index 166441a2..1c03bcbb 100644
--- a/lib/librte_pdump/Makefile
+++ b/lib/librte_pdump/Makefile
@@ -48,10 +48,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PDUMP) := rte_pdump.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_PDUMP)-include := rte_pdump.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += lib/librte_ether
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index 59686837..b599d65d 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -197,7 +197,7 @@ pdump_copy(struct rte_mbuf **pkts, uint16_t nb_pkts, void *user_params)
dup_bufs[d_pkts++] = p;
}
- ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts);
+ ring_enq = rte_ring_enqueue_burst(ring, (void *)dup_bufs, d_pkts, NULL);
if (unlikely(ring_enq < d_pkts)) {
RTE_LOG(DEBUG, PDUMP,
"only %d of packets enqueued to ring\n", ring_enq);
@@ -337,7 +337,7 @@ pdump_regitser_tx_callbacks(uint16_t end_q, uint8_t port, uint16_t queue,
static int
set_pdump_rxtx_cbs(struct pdump_request *p)
{
- uint16_t nb_rx_q, nb_tx_q = 0, end_q, queue;
+ uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
uint8_t port;
int ret = 0;
uint32_t flags;
@@ -740,7 +740,7 @@ pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
rte_errno = EINVAL;
return -1;
}
- if (ring->prod.sp_enqueue || ring->cons.sc_dequeue) {
+ if (ring->prod.single || ring->cons.single) {
RTE_LOG(ERR, PDUMP, "ring with either SP or SC settings"
" is not valid for pdump, should have MP and MC settings\n");
rte_errno = EINVAL;
diff --git a/lib/librte_pdump/rte_pdump.h b/lib/librte_pdump/rte_pdump.h
index 924b8043..ba6e39b0 100644
--- a/lib/librte_pdump/rte_pdump.h
+++ b/lib/librte_pdump/rte_pdump.h
@@ -201,7 +201,7 @@ rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
*
* @param path
* directory path for server or client socket.
- * @type
+ * @param type
* specifies RTE_PDUMP_SOCKET_SERVER if socket path is for server.
* (or)
* specifies RTE_PDUMP_SOCKET_CLIENT if socket path is for client.
diff --git a/lib/librte_pipeline/Makefile b/lib/librte_pipeline/Makefile
index 05d64ff8..7a835fd5 100644
--- a/lib/librte_pipeline/Makefile
+++ b/lib/librte_pipeline/Makefile
@@ -51,11 +51,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_table
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += lib/librte_port
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_port/Makefile b/lib/librte_port/Makefile
index 44fa7352..76629a13 100644
--- a/lib/librte_port/Makefile
+++ b/lib/librte_port/Makefile
@@ -77,15 +77,4 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_kni.h
endif
SYMLINK-$(CONFIG_RTE_LIBRTE_PORT)-include += rte_port_source_sink.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) := lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_ip_frag
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_sched
-ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PORT) += lib/librte_kni
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_port/rte_port_ethdev.c b/lib/librte_port/rte_port_ethdev.c
index 73e5f185..d5c5fba5 100644
--- a/lib/librte_port/rte_port_ethdev.c
+++ b/lib/librte_port/rte_port_ethdev.c
@@ -67,7 +67,7 @@ static void *
rte_port_ethdev_reader_create(void *params, int socket_id)
{
struct rte_port_ethdev_reader_params *conf =
- (struct rte_port_ethdev_reader_params *) params;
+ params;
struct rte_port_ethdev_reader *port;
/* Check input parameters */
@@ -95,7 +95,7 @@ static int
rte_port_ethdev_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
struct rte_port_ethdev_reader *p =
- (struct rte_port_ethdev_reader *) port;
+ port;
uint16_t rx_pkt_cnt;
rx_pkt_cnt = rte_eth_rx_burst(p->port_id, p->queue_id, pkts, n_pkts);
@@ -120,7 +120,7 @@ static int rte_port_ethdev_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_ethdev_reader *p =
- (struct rte_port_ethdev_reader *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -163,7 +163,7 @@ static void *
rte_port_ethdev_writer_create(void *params, int socket_id)
{
struct rte_port_ethdev_writer_params *conf =
- (struct rte_port_ethdev_writer_params *) params;
+ params;
struct rte_port_ethdev_writer *port;
/* Check input parameters */
@@ -212,7 +212,7 @@ static int
rte_port_ethdev_writer_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_ETHDEV_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -228,7 +228,7 @@ rte_port_ethdev_writer_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
@@ -274,7 +274,7 @@ static int
rte_port_ethdev_writer_flush(void *port)
{
struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst(p);
@@ -300,7 +300,7 @@ static int rte_port_ethdev_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_ethdev_writer *p =
- (struct rte_port_ethdev_writer *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -344,7 +344,7 @@ static void *
rte_port_ethdev_writer_nodrop_create(void *params, int socket_id)
{
struct rte_port_ethdev_writer_nodrop_params *conf =
- (struct rte_port_ethdev_writer_nodrop_params *) params;
+ params;
struct rte_port_ethdev_writer_nodrop *port;
/* Check input parameters */
@@ -418,7 +418,7 @@ static int
rte_port_ethdev_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_ethdev_writer_nodrop *p =
- (struct rte_port_ethdev_writer_nodrop *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_ETHDEV_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
@@ -434,7 +434,7 @@ rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_ethdev_writer_nodrop *p =
- (struct rte_port_ethdev_writer_nodrop *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
@@ -456,8 +456,8 @@ rte_port_ethdev_writer_nodrop_tx_bulk(void *port,
return 0;
/*
- * If we didnt manage to send all packets in single burst, move
- * remaining packets to the buffer and call send burst.
+ * If we did not manage to send all packets in single burst,
+ * move remaining packets to the buffer and call send burst.
*/
for (; n_pkts_ok < n_pkts; n_pkts_ok++) {
struct rte_mbuf *pkt = pkts[n_pkts_ok];
@@ -487,7 +487,7 @@ static int
rte_port_ethdev_writer_nodrop_flush(void *port)
{
struct rte_port_ethdev_writer_nodrop *p =
- (struct rte_port_ethdev_writer_nodrop *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst_nodrop(p);
@@ -513,7 +513,7 @@ static int rte_port_ethdev_writer_nodrop_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_ethdev_writer_nodrop *p =
- (struct rte_port_ethdev_writer_nodrop *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_fd.c b/lib/librte_port/rte_port_fd.c
index 0d640f34..b5b37291 100644
--- a/lib/librte_port/rte_port_fd.c
+++ b/lib/librte_port/rte_port_fd.c
@@ -67,7 +67,7 @@ static void *
rte_port_fd_reader_create(void *params, int socket_id)
{
struct rte_port_fd_reader_params *conf =
- (struct rte_port_fd_reader_params *) params;
+ params;
struct rte_port_fd_reader *port;
/* Check input parameters */
@@ -107,18 +107,13 @@ rte_port_fd_reader_create(void *params, int socket_id)
static int
rte_port_fd_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
- struct rte_port_fd_reader *p = (struct rte_port_fd_reader *) port;
- uint32_t i;
+ struct rte_port_fd_reader *p = port;
+ uint32_t i, j;
- if (rte_mempool_get_bulk(p->mempool, (void **) pkts, n_pkts) != 0)
+ if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0)
return 0;
for (i = 0; i < n_pkts; i++) {
- rte_mbuf_refcnt_set(pkts[i], 1);
- rte_pktmbuf_reset(pkts[i]);
- }
-
- for (i = 0; i < n_pkts; i++) {
struct rte_mbuf *pkt = pkts[i];
void *pkt_data = rte_pktmbuf_mtod(pkt, void *);
ssize_t n_bytes;
@@ -131,12 +126,12 @@ rte_port_fd_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
pkt->pkt_len = n_bytes;
}
- for ( ; i < n_pkts; i++)
- rte_pktmbuf_free(pkts[i]);
+ for (j = i; j < n_pkts; j++)
+ rte_pktmbuf_free(pkts[j]);
RTE_PORT_FD_READER_STATS_PKTS_IN_ADD(p, i);
- return n_pkts;
+ return i;
}
static int
@@ -156,7 +151,7 @@ static int rte_port_fd_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_fd_reader *p =
- (struct rte_port_fd_reader *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -197,7 +192,7 @@ static void *
rte_port_fd_writer_create(void *params, int socket_id)
{
struct rte_port_fd_writer_params *conf =
- (struct rte_port_fd_writer_params *) params;
+ params;
struct rte_port_fd_writer *port;
/* Check input parameters */
@@ -253,7 +248,7 @@ static int
rte_port_fd_writer_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_fd_writer *p =
- (struct rte_port_fd_writer *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_FD_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -269,7 +264,7 @@ rte_port_fd_writer_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_fd_writer *p =
- (struct rte_port_fd_writer *) port;
+ port;
uint32_t tx_buf_count = p->tx_buf_count;
if ((pkts_mask & (pkts_mask + 1)) == 0) {
@@ -301,7 +296,7 @@ static int
rte_port_fd_writer_flush(void *port)
{
struct rte_port_fd_writer *p =
- (struct rte_port_fd_writer *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst(p);
@@ -327,7 +322,7 @@ static int rte_port_fd_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_fd_writer *p =
- (struct rte_port_fd_writer *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -369,7 +364,7 @@ static void *
rte_port_fd_writer_nodrop_create(void *params, int socket_id)
{
struct rte_port_fd_writer_nodrop_params *conf =
- (struct rte_port_fd_writer_nodrop_params *) params;
+ params;
struct rte_port_fd_writer_nodrop *port;
/* Check input parameters */
@@ -438,7 +433,7 @@ static int
rte_port_fd_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_fd_writer_nodrop *p =
- (struct rte_port_fd_writer_nodrop *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_FD_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
@@ -454,7 +449,7 @@ rte_port_fd_writer_nodrop_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_fd_writer_nodrop *p =
- (struct rte_port_fd_writer_nodrop *) port;
+ port;
uint32_t tx_buf_count = p->tx_buf_count;
if ((pkts_mask & (pkts_mask + 1)) == 0) {
@@ -486,7 +481,7 @@ static int
rte_port_fd_writer_nodrop_flush(void *port)
{
struct rte_port_fd_writer_nodrop *p =
- (struct rte_port_fd_writer_nodrop *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst_nodrop(p);
@@ -512,7 +507,7 @@ static int rte_port_fd_writer_nodrop_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_fd_writer_nodrop *p =
- (struct rte_port_fd_writer_nodrop *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_frag.c b/lib/librte_port/rte_port_frag.c
index 0fcace99..a00c9ae1 100644
--- a/lib/librte_port/rte_port_frag.c
+++ b/lib/librte_port/rte_port_frag.c
@@ -88,7 +88,7 @@ static void *
rte_port_ring_reader_frag_create(void *params, int socket_id, int is_ipv4)
{
struct rte_port_ring_reader_frag_params *conf =
- (struct rte_port_ring_reader_frag_params *) params;
+ params;
struct rte_port_ring_reader_frag *port;
/* Check input parameters */
@@ -159,7 +159,7 @@ rte_port_ring_reader_frag_rx(void *port,
uint32_t n_pkts)
{
struct rte_port_ring_reader_frag *p =
- (struct rte_port_ring_reader_frag *) port;
+ port;
uint32_t n_pkts_out;
n_pkts_out = 0;
@@ -186,7 +186,8 @@ rte_port_ring_reader_frag_rx(void *port,
/* If "pkts" buffer is empty, read packet burst from ring */
if (p->n_pkts == 0) {
p->n_pkts = rte_ring_sc_dequeue_burst(p->ring,
- (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void **) p->pkts, RTE_PORT_IN_BURST_SIZE_MAX,
+ NULL);
RTE_PORT_RING_READER_FRAG_STATS_PKTS_IN_ADD(p, p->n_pkts);
if (p->n_pkts == 0)
return n_pkts_out;
@@ -276,7 +277,7 @@ rte_port_frag_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_ring_reader_frag *p =
- (struct rte_port_ring_reader_frag *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_kni.c b/lib/librte_port/rte_port_kni.c
index 08f4ac2a..2515fb2a 100644
--- a/lib/librte_port/rte_port_kni.c
+++ b/lib/librte_port/rte_port_kni.c
@@ -66,7 +66,7 @@ static void *
rte_port_kni_reader_create(void *params, int socket_id)
{
struct rte_port_kni_reader_params *conf =
- (struct rte_port_kni_reader_params *) params;
+ params;
struct rte_port_kni_reader *port;
/* Check input parameters */
@@ -93,7 +93,7 @@ static int
rte_port_kni_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
struct rte_port_kni_reader *p =
- (struct rte_port_kni_reader *) port;
+ port;
uint16_t rx_pkt_cnt;
rx_pkt_cnt = rte_kni_rx_burst(p->kni, pkts, n_pkts);
@@ -118,7 +118,7 @@ static int rte_port_kni_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_kni_reader *p =
- (struct rte_port_kni_reader *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -160,7 +160,7 @@ static void *
rte_port_kni_writer_create(void *params, int socket_id)
{
struct rte_port_kni_writer_params *conf =
- (struct rte_port_kni_writer_params *) params;
+ params;
struct rte_port_kni_writer *port;
/* Check input parameters */
@@ -207,7 +207,7 @@ static int
rte_port_kni_writer_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_kni_writer *p =
- (struct rte_port_kni_writer *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -223,7 +223,7 @@ rte_port_kni_writer_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_kni_writer *p =
- (struct rte_port_kni_writer *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
uint64_t expr = (pkts_mask & (pkts_mask + 1)) |
@@ -268,7 +268,7 @@ static int
rte_port_kni_writer_flush(void *port)
{
struct rte_port_kni_writer *p =
- (struct rte_port_kni_writer *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst(p);
@@ -294,7 +294,7 @@ static int rte_port_kni_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_kni_writer *p =
- (struct rte_port_kni_writer *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -337,7 +337,7 @@ static void *
rte_port_kni_writer_nodrop_create(void *params, int socket_id)
{
struct rte_port_kni_writer_nodrop_params *conf =
- (struct rte_port_kni_writer_nodrop_params *) params;
+ params;
struct rte_port_kni_writer_nodrop *port;
/* Check input parameters */
@@ -410,7 +410,7 @@ static int
rte_port_kni_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_kni_writer_nodrop *p =
- (struct rte_port_kni_writer_nodrop *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_KNI_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -426,7 +426,7 @@ rte_port_kni_writer_nodrop_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_kni_writer_nodrop *p =
- (struct rte_port_kni_writer_nodrop *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
@@ -478,7 +478,7 @@ static int
rte_port_kni_writer_nodrop_flush(void *port)
{
struct rte_port_kni_writer_nodrop *p =
- (struct rte_port_kni_writer_nodrop *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst_nodrop(p);
@@ -504,7 +504,7 @@ static int rte_port_kni_writer_nodrop_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_kni_writer_nodrop *p =
- (struct rte_port_kni_writer_nodrop *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_ras.c b/lib/librte_port/rte_port_ras.c
index c4bb5081..415fadd5 100644
--- a/lib/librte_port/rte_port_ras.c
+++ b/lib/librte_port/rte_port_ras.c
@@ -93,7 +93,7 @@ static void *
rte_port_ring_writer_ras_create(void *params, int socket_id, int is_ipv4)
{
struct rte_port_ring_writer_ras_params *conf =
- (struct rte_port_ring_writer_ras_params *) params;
+ params;
struct rte_port_ring_writer_ras *port;
uint64_t frag_cycles;
@@ -167,7 +167,7 @@ send_burst(struct rte_port_ring_writer_ras *p)
uint32_t nb_tx;
nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);
RTE_PORT_RING_WRITER_RAS_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -243,7 +243,7 @@ static int
rte_port_ring_writer_ras_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_ring_writer_ras *p =
- (struct rte_port_ring_writer_ras *) port;
+ port;
RTE_PORT_RING_WRITER_RAS_STATS_PKTS_IN_ADD(p, 1);
p->f_ras(p, pkt);
@@ -259,7 +259,7 @@ rte_port_ring_writer_ras_tx_bulk(void *port,
uint64_t pkts_mask)
{
struct rte_port_ring_writer_ras *p =
- (struct rte_port_ring_writer_ras *) port;
+ port;
if ((pkts_mask & (pkts_mask + 1)) == 0) {
uint64_t n_pkts = __builtin_popcountll(pkts_mask);
@@ -295,7 +295,7 @@ static int
rte_port_ring_writer_ras_flush(void *port)
{
struct rte_port_ring_writer_ras *p =
- (struct rte_port_ring_writer_ras *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst(p);
@@ -307,7 +307,7 @@ static int
rte_port_ring_writer_ras_free(void *port)
{
struct rte_port_ring_writer_ras *p =
- (struct rte_port_ring_writer_ras *) port;
+ port;
if (port == NULL) {
RTE_LOG(ERR, PORT, "%s: Parameter port is NULL\n", __func__);
@@ -326,7 +326,7 @@ rte_port_ras_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_ring_writer_ras *p =
- (struct rte_port_ring_writer_ras *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_ring.c b/lib/librte_port/rte_port_ring.c
index 3b9d3d08..64bd965f 100644
--- a/lib/librte_port/rte_port_ring.c
+++ b/lib/librte_port/rte_port_ring.c
@@ -67,14 +67,14 @@ rte_port_ring_reader_create_internal(void *params, int socket_id,
uint32_t is_multi)
{
struct rte_port_ring_reader_params *conf =
- (struct rte_port_ring_reader_params *) params;
+ params;
struct rte_port_ring_reader *port;
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->cons.sc_dequeue && is_multi) ||
- (!(conf->ring->cons.sc_dequeue) && !is_multi)) {
+ (conf->ring->cons.single && is_multi) ||
+ (!(conf->ring->cons.single) && !is_multi)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
}
@@ -108,10 +108,11 @@ rte_port_ring_multi_reader_create(void *params, int socket_id)
static int
rte_port_ring_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
- struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
+ struct rte_port_ring_reader *p = port;
uint32_t nb_rx;
- nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_sc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);
return nb_rx;
@@ -121,10 +122,11 @@ static int
rte_port_ring_multi_reader_rx(void *port, struct rte_mbuf **pkts,
uint32_t n_pkts)
{
- struct rte_port_ring_reader *p = (struct rte_port_ring_reader *) port;
+ struct rte_port_ring_reader *p = port;
uint32_t nb_rx;
- nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts, n_pkts);
+ nb_rx = rte_ring_mc_dequeue_burst(p->ring, (void **) pkts,
+ n_pkts, NULL);
RTE_PORT_RING_READER_STATS_PKTS_IN_ADD(p, nb_rx);
return nb_rx;
@@ -148,7 +150,7 @@ rte_port_ring_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_ring_reader *p =
- (struct rte_port_ring_reader *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -192,14 +194,14 @@ rte_port_ring_writer_create_internal(void *params, int socket_id,
uint32_t is_multi)
{
struct rte_port_ring_writer_params *conf =
- (struct rte_port_ring_writer_params *) params;
+ params;
struct rte_port_ring_writer *port;
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
@@ -241,7 +243,7 @@ send_burst(struct rte_port_ring_writer *p)
uint32_t nb_tx;
nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);
RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -256,7 +258,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
uint32_t nb_tx;
nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);
RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, p->tx_buf_count - nb_tx);
for ( ; nb_tx < p->tx_buf_count; nb_tx++)
@@ -268,7 +270,7 @@ send_burst_mp(struct rte_port_ring_writer *p)
static int
rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+ struct rte_port_ring_writer *p = port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -281,7 +283,7 @@ rte_port_ring_writer_tx(void *port, struct rte_mbuf *pkt)
static int
rte_port_ring_multi_writer_tx(void *port, struct rte_mbuf *pkt)
{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+ struct rte_port_ring_writer *p = port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, 1);
@@ -298,7 +300,7 @@ rte_port_ring_writer_tx_bulk_internal(void *port,
uint32_t is_multi)
{
struct rte_port_ring_writer *p =
- (struct rte_port_ring_writer *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
@@ -318,11 +320,11 @@ rte_port_ring_writer_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
- n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
- n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring, (void **)pkts,
- n_pkts);
+ n_pkts_ok = rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
RTE_PORT_RING_WRITER_STATS_PKTS_DROP_ADD(p, n_pkts - n_pkts_ok);
for ( ; n_pkts_ok < n_pkts; n_pkts_ok++) {
@@ -372,7 +374,7 @@ rte_port_ring_multi_writer_tx_bulk(void *port,
static int
rte_port_ring_writer_flush(void *port)
{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+ struct rte_port_ring_writer *p = port;
if (p->tx_buf_count > 0)
send_burst(p);
@@ -383,7 +385,7 @@ rte_port_ring_writer_flush(void *port)
static int
rte_port_ring_multi_writer_flush(void *port)
{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+ struct rte_port_ring_writer *p = port;
if (p->tx_buf_count > 0)
send_burst_mp(p);
@@ -394,7 +396,7 @@ rte_port_ring_multi_writer_flush(void *port)
static int
rte_port_ring_writer_free(void *port)
{
- struct rte_port_ring_writer *p = (struct rte_port_ring_writer *) port;
+ struct rte_port_ring_writer *p = port;
if (port == NULL) {
RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
@@ -416,7 +418,7 @@ rte_port_ring_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_ring_writer *p =
- (struct rte_port_ring_writer *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -461,14 +463,14 @@ rte_port_ring_writer_nodrop_create_internal(void *params, int socket_id,
uint32_t is_multi)
{
struct rte_port_ring_writer_nodrop_params *conf =
- (struct rte_port_ring_writer_nodrop_params *) params;
+ params;
struct rte_port_ring_writer_nodrop *port;
/* Check input parameters */
if ((conf == NULL) ||
(conf->ring == NULL) ||
- (conf->ring->prod.sp_enqueue && is_multi) ||
- (!(conf->ring->prod.sp_enqueue) && !is_multi) ||
+ (conf->ring->prod.single && is_multi) ||
+ (!(conf->ring->prod.single) && !is_multi) ||
(conf->tx_burst_sz > RTE_PORT_IN_BURST_SIZE_MAX)) {
RTE_LOG(ERR, PORT, "%s: Invalid Parameters\n", __func__);
return NULL;
@@ -517,7 +519,7 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;
nb_tx = rte_ring_sp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);
/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -527,7 +529,8 @@ send_burst_nodrop(struct rte_port_ring_writer_nodrop *p)
for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_sp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);
/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -550,7 +553,7 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
uint32_t nb_tx = 0, i;
nb_tx = rte_ring_mp_enqueue_burst(p->ring, (void **)p->tx_buf,
- p->tx_buf_count);
+ p->tx_buf_count, NULL);
/* We sent all the packets in a first try */
if (nb_tx >= p->tx_buf_count) {
@@ -560,7 +563,8 @@ send_burst_mp_nodrop(struct rte_port_ring_writer_nodrop *p)
for (i = 0; i < p->n_retries; i++) {
nb_tx += rte_ring_mp_enqueue_burst(p->ring,
- (void **) (p->tx_buf + nb_tx), p->tx_buf_count - nb_tx);
+ (void **) (p->tx_buf + nb_tx),
+ p->tx_buf_count - nb_tx, NULL);
/* We sent all the packets in more than one try */
if (nb_tx >= p->tx_buf_count) {
@@ -581,7 +585,7 @@ static int
rte_port_ring_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
@@ -595,7 +599,7 @@ static int
rte_port_ring_multi_writer_nodrop_tx(void *port, struct rte_mbuf *pkt)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
p->tx_buf[p->tx_buf_count++] = pkt;
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, 1);
@@ -612,7 +616,7 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
uint32_t is_multi)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
uint64_t bsz_mask = p->bsz_mask;
uint32_t tx_buf_count = p->tx_buf_count;
@@ -633,10 +637,12 @@ rte_port_ring_writer_nodrop_tx_bulk_internal(void *port,
RTE_PORT_RING_WRITER_NODROP_STATS_PKTS_IN_ADD(p, n_pkts);
if (is_multi)
n_pkts_ok =
- rte_ring_mp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_mp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
else
n_pkts_ok =
- rte_ring_sp_enqueue_burst(p->ring, (void **)pkts, n_pkts);
+ rte_ring_sp_enqueue_burst(p->ring,
+ (void **)pkts, n_pkts, NULL);
if (n_pkts_ok >= n_pkts)
return 0;
@@ -699,7 +705,7 @@ static int
rte_port_ring_writer_nodrop_flush(void *port)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst_nodrop(p);
@@ -711,7 +717,7 @@ static int
rte_port_ring_multi_writer_nodrop_flush(void *port)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
if (p->tx_buf_count > 0)
send_burst_mp_nodrop(p);
@@ -723,7 +729,7 @@ static int
rte_port_ring_writer_nodrop_free(void *port)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
if (port == NULL) {
RTE_LOG(ERR, PORT, "%s: Port is NULL\n", __func__);
@@ -745,7 +751,7 @@ rte_port_ring_writer_nodrop_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_ring_writer_nodrop *p =
- (struct rte_port_ring_writer_nodrop *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_sched.c b/lib/librte_port/rte_port_sched.c
index 25217d62..9100a197 100644
--- a/lib/librte_port/rte_port_sched.c
+++ b/lib/librte_port/rte_port_sched.c
@@ -64,7 +64,7 @@ static void *
rte_port_sched_reader_create(void *params, int socket_id)
{
struct rte_port_sched_reader_params *conf =
- (struct rte_port_sched_reader_params *) params;
+ params;
struct rte_port_sched_reader *port;
/* Check input parameters */
@@ -91,7 +91,7 @@ rte_port_sched_reader_create(void *params, int socket_id)
static int
rte_port_sched_reader_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
- struct rte_port_sched_reader *p = (struct rte_port_sched_reader *) port;
+ struct rte_port_sched_reader *p = port;
uint32_t nb_rx;
nb_rx = rte_sched_port_dequeue(p->sched, pkts, n_pkts);
@@ -118,7 +118,7 @@ rte_port_sched_reader_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_sched_reader *p =
- (struct rte_port_sched_reader *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -160,7 +160,7 @@ static void *
rte_port_sched_writer_create(void *params, int socket_id)
{
struct rte_port_sched_writer_params *conf =
- (struct rte_port_sched_writer_params *) params;
+ params;
struct rte_port_sched_writer *port;
/* Check input parameters */
@@ -292,7 +292,7 @@ rte_port_sched_writer_stats_read(void *port,
struct rte_port_out_stats *stats, int clear)
{
struct rte_port_sched_writer *p =
- (struct rte_port_sched_writer *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_port/rte_port_source_sink.c b/lib/librte_port/rte_port_source_sink.c
index 4cad7109..a79f2f64 100644
--- a/lib/librte_port/rte_port_source_sink.c
+++ b/lib/librte_port/rte_port_source_sink.c
@@ -228,7 +228,7 @@ static void *
rte_port_source_create(void *params, int socket_id)
{
struct rte_port_source_params *p =
- (struct rte_port_source_params *) params;
+ params;
struct rte_port_source *port;
/* Check input arguments*/
@@ -265,7 +265,7 @@ static int
rte_port_source_free(void *port)
{
struct rte_port_source *p =
- (struct rte_port_source *)port;
+ port;
/* Check input parameters */
if (p == NULL)
@@ -286,17 +286,12 @@ rte_port_source_free(void *port)
static int
rte_port_source_rx(void *port, struct rte_mbuf **pkts, uint32_t n_pkts)
{
- struct rte_port_source *p = (struct rte_port_source *) port;
+ struct rte_port_source *p = port;
uint32_t i;
- if (rte_mempool_get_bulk(p->mempool, (void **) pkts, n_pkts) != 0)
+ if (rte_pktmbuf_alloc_bulk(p->mempool, pkts, n_pkts) != 0)
return 0;
- for (i = 0; i < n_pkts; i++) {
- rte_mbuf_refcnt_set(pkts[i], 1);
- rte_pktmbuf_reset(pkts[i]);
- }
-
if (p->pkt_buff != NULL) {
for (i = 0; i < n_pkts; i++) {
uint8_t *pkt_data = rte_pktmbuf_mtod(pkts[i],
@@ -323,7 +318,7 @@ rte_port_source_stats_read(void *port,
struct rte_port_in_stats *stats, int clear)
{
struct rte_port_source *p =
- (struct rte_port_source *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
@@ -400,7 +395,7 @@ pcap_sink_open(struct rte_port_sink *port,
static void
pcap_sink_write_pkt(struct rte_port_sink *port, struct rte_mbuf *mbuf)
{
- uint8_t *pcap_dumper = (uint8_t *)(port->dumper);
+ uint8_t *pcap_dumper = (port->dumper);
struct pcap_pkthdr pcap_hdr;
uint8_t jumbo_pkt_buf[ETHER_MAX_JUMBO_FRAME_LEN];
uint8_t *pkt;
@@ -524,7 +519,7 @@ rte_port_sink_create(void *params, int socket_id)
static int
rte_port_sink_tx(void *port, struct rte_mbuf *pkt)
{
- struct rte_port_sink *p = (struct rte_port_sink *) port;
+ struct rte_port_sink *p = port;
RTE_PORT_SINK_STATS_PKTS_IN_ADD(p, 1);
if (p->dumper != NULL)
@@ -539,7 +534,7 @@ static int
rte_port_sink_tx_bulk(void *port, struct rte_mbuf **pkts,
uint64_t pkts_mask)
{
- struct rte_port_sink *p = (struct rte_port_sink *) port;
+ struct rte_port_sink *p = port;
if ((pkts_mask & (pkts_mask + 1)) == 0) {
uint64_t n_pkts = __builtin_popcountll(pkts_mask);
@@ -591,7 +586,7 @@ static int
rte_port_sink_flush(void *port)
{
struct rte_port_sink *p =
- (struct rte_port_sink *)port;
+ port;
if (p == NULL)
return 0;
@@ -605,7 +600,7 @@ static int
rte_port_sink_free(void *port)
{
struct rte_port_sink *p =
- (struct rte_port_sink *)port;
+ port;
if (p == NULL)
return 0;
@@ -622,7 +617,7 @@ rte_port_sink_stats_read(void *port, struct rte_port_out_stats *stats,
int clear)
{
struct rte_port_sink *p =
- (struct rte_port_sink *) port;
+ port;
if (stats != NULL)
memcpy(stats, &p->stats, sizeof(p->stats));
diff --git a/lib/librte_power/Makefile b/lib/librte_power/Makefile
index cee95cd8..06cd10e8 100644
--- a/lib/librte_power/Makefile
+++ b/lib/librte_power/Makefile
@@ -47,7 +47,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_POWER) += rte_power_kvm_vm.c guest_channel.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_POWER)-include := rte_power.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_POWER) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_reorder/Makefile b/lib/librte_reorder/Makefile
index 0d111aad..4e44e72f 100644
--- a/lib/librte_reorder/Makefile
+++ b/lib/librte_reorder/Makefile
@@ -47,9 +47,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_REORDER) := rte_reorder.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_REORDER)-include := rte_reorder.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_REORDER) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile
index 4b1112e4..3e2f4b87 100644
--- a/lib/librte_ring/Makefile
+++ b/lib/librte_ring/Makefile
@@ -46,6 +46,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_RING)-include := rte_ring.h
-DEPDIRS-$(CONFIG_RTE_LIBRTE_RING) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
index ca0a1082..5f98c33f 100644
--- a/lib/librte_ring/rte_ring.c
+++ b/lib/librte_ring/rte_ring.c
@@ -127,18 +127,10 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
/* compilation-time checks */
RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_RING_SPLIT_PROD_CONS
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
RTE_CACHE_LINE_MASK) != 0);
-#endif
RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
RTE_CACHE_LINE_MASK) != 0);
-#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
- RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
- RTE_CACHE_LINE_MASK) != 0);
-#endif
/* init the ring structure */
memset(r, 0, sizeof(*r));
@@ -146,11 +138,10 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned count,
if (ret < 0 || ret >= (int)sizeof(r->name))
return -ENAMETOOLONG;
r->flags = flags;
- r->prod.watermark = count;
- r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
- r->prod.size = r->cons.size = count;
- r->prod.mask = r->cons.mask = count-1;
+ r->prod.single = (flags & RING_F_SP_ENQ) ? __IS_SP : __IS_MP;
+ r->cons.single = (flags & RING_F_SC_DEQ) ? __IS_SC : __IS_MC;
+ r->size = count;
+ r->mask = count - 1;
r->prod.head = r->cons.head = 0;
r->prod.tail = r->cons.tail = 0;
@@ -264,76 +255,19 @@ rte_ring_free(struct rte_ring *r)
rte_free(te);
}
-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
-{
- if (count >= r->prod.size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->prod.size;
-
- r->prod.watermark = count;
- return 0;
-}
-
/* dump the status of the ring on the console */
void
rte_ring_dump(FILE *f, const struct rte_ring *r)
{
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats sum;
- unsigned lcore_id;
-#endif
-
fprintf(f, "ring <%s>@%p\n", r->name, r);
fprintf(f, " flags=%x\n", r->flags);
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
+ fprintf(f, " size=%"PRIu32"\n", r->size);
fprintf(f, " ct=%"PRIu32"\n", r->cons.tail);
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
fprintf(f, " used=%u\n", rte_ring_count(r));
fprintf(f, " avail=%u\n", rte_ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
- fprintf(f, " watermark=0\n");
- else
- fprintf(f, " watermark=%"PRIu32"\n", r->prod.watermark);
-
- /* sum and dump statistics */
-#ifdef RTE_LIBRTE_RING_DEBUG
- memset(&sum, 0, sizeof(sum));
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
- sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
- sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
- sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
- sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
- sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
- sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
- sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
- sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
- sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
- }
- fprintf(f, " size=%"PRIu32"\n", r->prod.size);
- fprintf(f, " enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
- fprintf(f, " enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
- fprintf(f, " enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
- fprintf(f, " enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
- fprintf(f, " enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
- fprintf(f, " enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
- fprintf(f, " deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
- fprintf(f, " deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
- fprintf(f, " deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
- fprintf(f, " deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
-#else
- fprintf(f, " no statistics available\n");
-#endif
}
/* dump the status of all rings on the console */
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 32b8c8d2..97f025a1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -106,38 +106,30 @@ extern "C" {
enum rte_ring_queue_behavior {
RTE_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
- RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items a possible from ring */
+ RTE_RING_QUEUE_VARIABLE /* Enq/Deq as many items as possible from ring */
};
-#ifdef RTE_LIBRTE_RING_DEBUG
-/**
- * A structure that stores the ring statistics (per-lcore).
- */
-struct rte_ring_debug_stats {
- uint64_t enq_success_bulk; /**< Successful enqueues number. */
- uint64_t enq_success_objs; /**< Objects successfully enqueued. */
- uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */
- uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */
- uint64_t enq_fail_bulk; /**< Failed enqueues number. */
- uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */
- uint64_t deq_success_bulk; /**< Successful dequeues number. */
- uint64_t deq_success_objs; /**< Objects successfully dequeued. */
- uint64_t deq_fail_bulk; /**< Failed dequeues number. */
- uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */
-} __rte_cache_aligned;
-#endif
-
#define RTE_RING_MZ_PREFIX "RG_"
/**< The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \
sizeof(RTE_RING_MZ_PREFIX) + 1)
-#ifndef RTE_RING_PAUSE_REP_COUNT
-#define RTE_RING_PAUSE_REP_COUNT 0 /**< Yield after pause num of times, no yield
- * if RTE_RING_PAUSE_REP not defined. */
+struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+
+#if RTE_CACHE_LINE_SIZE < 128
+#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
+#else
+#define PROD_ALIGN RTE_CACHE_LINE_SIZE
+#define CONS_ALIGN RTE_CACHE_LINE_SIZE
#endif
-struct rte_memzone; /* forward declaration, so as not to require memzone.h */
+/* structure to hold a pair of head/tail values and other metadata */
+struct rte_ring_headtail {
+ volatile uint32_t head; /**< Prod/consumer head. */
+ volatile uint32_t tail; /**< Prod/consumer tail. */
+ uint32_t single; /**< True if single prod/cons */
+};
/**
* An RTE ring structure.
@@ -155,68 +147,29 @@ struct rte_ring {
* compatibility requirements, it could be changed to RTE_RING_NAMESIZE
* next time the ABI changes
*/
- char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
- int flags; /**< Flags supplied at creation. */
+ char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned; /**< Name of the ring. */
+ int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
/**< Memzone, if any, containing the rte_ring */
+ uint32_t size; /**< Size of ring. */
+ uint32_t mask; /**< Mask (size-1) of ring. */
/** Ring producer status. */
- struct prod {
- uint32_t watermark; /**< Maximum items before EDQUOT. */
- uint32_t sp_enqueue; /**< True, if single producer. */
- uint32_t size; /**< Size of ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Producer head. */
- volatile uint32_t tail; /**< Producer tail. */
- } prod __rte_cache_aligned;
+ struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
/** Ring consumer status. */
- struct cons {
- uint32_t sc_dequeue; /**< True, if single consumer. */
- uint32_t size; /**< Size of the ring. */
- uint32_t mask; /**< Mask (size-1) of ring. */
- volatile uint32_t head; /**< Consumer head. */
- volatile uint32_t tail; /**< Consumer tail. */
-#ifdef RTE_RING_SPLIT_PROD_CONS
- } cons __rte_cache_aligned;
-#else
- } cons;
-#endif
-
-#ifdef RTE_LIBRTE_RING_DEBUG
- struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
-#endif
-
- void *ring[] __rte_cache_aligned; /**< Memory space of ring starts here.
- * not volatile so need to be careful
- * about compiler re-ordering */
+ struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
};
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
-#define RTE_RING_QUOT_EXCEED (1 << 31) /**< Quota exceed for burst ops */
#define RTE_RING_SZ_MASK (unsigned)(0x0fffffff) /**< Ring size mask */
-/**
- * @internal When debug is enabled, store ring statistics.
- * @param r
- * A pointer to the ring.
- * @param name
- * The name of the statistics field to increment in the ring.
- * @param n
- * The number to add to the object-oriented statistics.
- */
-#ifdef RTE_LIBRTE_RING_DEBUG
-#define __RING_STAT_ADD(r, name, n) do { \
- unsigned __lcore_id = rte_lcore_id(); \
- if (__lcore_id < RTE_MAX_LCORE) { \
- r->stats[__lcore_id].name##_objs += n; \
- r->stats[__lcore_id].name##_bulk += 1; \
- } \
- } while(0)
-#else
-#define __RING_STAT_ADD(r, name, n) do {} while(0)
-#endif
+/* @internal defines for passing to the enqueue dequeue worker functions */
+#define __IS_SP 1
+#define __IS_MP 0
+#define __IS_SC 1
+#define __IS_MC 0
/**
* Calculate the memory size needed for a ring
@@ -321,26 +274,6 @@ struct rte_ring *rte_ring_create(const char *name, unsigned count,
void rte_ring_free(struct rte_ring *r);
/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r
- * A pointer to the ring structure.
- * @param count
- * The new water mark value.
- * @return
- * - 0: Success; water mark changed.
- * - -EINVAL: Invalid water mark value.
- */
-int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
-
-/**
* Dump the status of the ring to a file.
*
* @param f
@@ -353,171 +286,147 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
/* the actual enqueue of pointers on the ring.
* Placed here since identical code needed in both
* single and multi producer enqueue functions */
-#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
- uint32_t idx = prod_head & mask; \
+#define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
+ unsigned int i; \
+ const uint32_t size = (r)->size; \
+ uint32_t idx = prod_head & (r)->mask; \
+ obj_type *ring = (obj_type *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx+1] = obj_table[i+1]; \
- r->ring[idx+2] = obj_table[i+2]; \
- r->ring[idx+3] = obj_table[i+3]; \
+ ring[idx] = obj_table[i]; \
+ ring[idx+1] = obj_table[i+1]; \
+ ring[idx+2] = obj_table[i+2]; \
+ ring[idx+3] = obj_table[i+3]; \
} \
switch (n & 0x3) { \
- case 3: r->ring[idx++] = obj_table[i++]; \
- case 2: r->ring[idx++] = obj_table[i++]; \
- case 1: r->ring[idx++] = obj_table[i++]; \
+ case 3: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 2: \
+ ring[idx++] = obj_table[i++]; /* fallthrough */ \
+ case 1: \
+ ring[idx++] = obj_table[i++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
+ ring[idx] = obj_table[i]; \
} \
-} while(0)
+} while (0)
/* the actual copy of pointers on the ring to obj_table.
* Placed here since identical code needed in both
* single and multi consumer dequeue functions */
-#define DEQUEUE_PTRS() do { \
- uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
+#define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
+ unsigned int i; \
+ uint32_t idx = cons_head & (r)->mask; \
+ const uint32_t size = (r)->size; \
+ obj_type *ring = (obj_type *)ring_start; \
if (likely(idx + n < size)) { \
for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i+1] = r->ring[idx+1]; \
- obj_table[i+2] = r->ring[idx+2]; \
- obj_table[i+3] = r->ring[idx+3]; \
+ obj_table[i] = ring[idx]; \
+ obj_table[i+1] = ring[idx+1]; \
+ obj_table[i+2] = ring[idx+2]; \
+ obj_table[i+3] = ring[idx+3]; \
} \
switch (n & 0x3) { \
- case 3: obj_table[i++] = r->ring[idx++]; \
- case 2: obj_table[i++] = r->ring[idx++]; \
- case 1: obj_table[i++] = r->ring[idx++]; \
+ case 3: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 2: \
+ obj_table[i++] = ring[idx++]; /* fallthrough */ \
+ case 1: \
+ obj_table[i++] = ring[idx++]; \
} \
} else { \
for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
+ obj_table[i] = ring[idx]; \
} \
} while (0)
+static inline __attribute__((always_inline)) void
+update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
+ uint32_t single)
+{
+ /*
+ * If there are other enqueues/dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ if (!single)
+ while (unlikely(ht->tail != old_val))
+ rte_pause();
+
+ ht->tail = new_val;
+}
+
/**
- * @internal Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
+ * @internal This function updates the producer head for enqueue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to the ring structure
+ * @param is_sp
+ * Indicates whether multi-producer path is needed or not
* @param n
- * The number of objects to add in the ring from the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where enqueue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where enqueue finishes
+ * @param free_entries
+ * Returns the amount of free space in the ring BEFORE head was moved
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *free_entries)
{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned max = n;
+ const uint32_t mask = r->mask;
+ unsigned int max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
- int ret;
-
- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
- /* move prod.head atomically */
do {
/* Reset n to the initial burst count */
n = max;
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
+ *old_head = r->prod.head;
+ const uint32_t cons_tail = r->cons.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
+ * *old_head > cons_tail). So 'free_entries' is always between 0
* and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
+ *free_entries = (mask + cons_tail - *old_head);
/* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
- return -ENOBUFS;
- }
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
- return 0;
- }
-
- n = free_entries;
- }
- }
-
- prod_next = prod_head + n;
- success = rte_atomic32_cmpset(&r->prod.head, prod_head,
- prod_next);
+ if (unlikely(n > *free_entries))
+ n = (behavior == RTE_RING_QUEUE_FIXED) ?
+ 0 : *free_entries;
+
+ if (n == 0)
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sp)
+ r->prod.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->prod.head,
+ *old_head, *new_head);
} while (unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
- rte_smp_wmb();
-
- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->prod.tail != prod_head)) {
- rte_pause();
-
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
- r->prod.tail = prod_next;
- return ret;
+ return n;
}
/**
- * @internal Enqueue several objects on a ring (NOT multi-producers safe).
+ * @internal Enqueue several objects on the ring
*
- * @param r
+ * @param r
* A pointer to the ring structure.
* @param obj_table
* A pointer to a table of void * pointers (objects).
@@ -525,242 +434,142 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* The number of objects to add in the ring from the obj_table.
* @param behavior
* RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
+ * @param is_sp
+ * Indicates whether to use single producer or multi-producer head update
+ * @param free_space
+ * returns the amount of space after the enqueue operation has finished
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
+ * Actual number of objects enqueued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sp, unsigned int *free_space)
{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
- int ret;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (unlikely(n > free_entries)) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, enq_fail, n);
- return -ENOBUFS;
- }
- else {
- /* No free entry available */
- if (unlikely(free_entries == 0)) {
- __RING_STAT_ADD(r, enq_fail, n);
- return 0;
- }
-
- n = free_entries;
- }
- }
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
- ENQUEUE_PTRS();
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n, void *);
rte_smp_wmb();
- /* if we exceed the watermark */
- if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | RTE_RING_QUOT_EXCEED);
- __RING_STAT_ADD(r, enq_quota, n);
- }
- else {
- ret = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : n;
- __RING_STAT_ADD(r, enq_success, n);
- }
-
- r->prod.tail = prod_next;
- return ret;
+ update_tail(&r->prod, prod_head, prod_next, is_sp);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
}
/**
- * @internal Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
+ * @internal This function updates the consumer head for dequeue
*
* @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to the ring structure
+ * @param is_sc
+ * Indicates whether multi-consumer path is needed or not
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of elements we will want to enqueue, i.e. how far should the
+ * head be moved
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param old_head
+ * Returns head value as it was before the move, i.e. where dequeue starts
+ * @param new_head
+ * Returns the current/new head value i.e. where dequeue finishes
+ * @param entries
+ * Returns the number of entries in the ring BEFORE head was moved
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-
-static inline int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ uint32_t *old_head, uint32_t *new_head,
+ uint32_t *entries)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
+ unsigned int max = n;
int success;
- unsigned i, rep = 0;
- uint32_t mask = r->prod.mask;
-
- /* Avoid the unnecessary cmpset operation below, which is also
- * potentially harmful when n equals 0. */
- if (n == 0)
- return 0;
/* move cons.head atomically */
do {
/* Restore n as it may change every loop */
n = max;
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
+ *old_head = r->cons.head;
+ const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- entries = (prod_tail - cons_head);
+ *entries = (prod_tail - *old_head);
/* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
- return -ENOENT;
- }
- else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
- return 0;
- }
-
- n = entries;
- }
- }
-
- cons_next = cons_head + n;
- success = rte_atomic32_cmpset(&r->cons.head, cons_head,
- cons_next);
+ if (n > *entries)
+ n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
+
+ if (unlikely(n == 0))
+ return 0;
+
+ *new_head = *old_head + n;
+ if (is_sc)
+ r->cons.head = *new_head, success = 1;
+ else
+ success = rte_atomic32_cmpset(&r->cons.head, *old_head,
+ *new_head);
} while (unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
- rte_smp_rmb();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (unlikely(r->cons.tail != cons_head)) {
- rte_pause();
-
- /* Set RTE_RING_PAUSE_REP_COUNT to avoid spin too long waiting
- * for other thread finish. It gives pre-empted thread a chance
- * to proceed and finish with ring dequeue operation. */
- if (RTE_RING_PAUSE_REP_COUNT &&
- ++rep == RTE_RING_PAUSE_REP_COUNT) {
- rep = 0;
- sched_yield();
- }
- }
- __RING_STAT_ADD(r, deq_success, n);
- r->cons.tail = cons_next;
-
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ return n;
}
/**
- * @internal Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
+ * @internal Dequeue several objects from the ring
*
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of void * pointers (objects).
* @param n
- * The number of objects to dequeue from the ring to the obj_table.
+ * The number of objects to pull from the ring.
* @param behavior
* RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * RTE_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
+ * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
+ * @param is_sc
+ * Indicates whether to use single consumer or multi-consumer head update
+ * @param available
+ * returns the number of remaining ring entries after the dequeue has finished
* @return
- * Depend on the behavior value
- * if behavior = RTE_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = RTE_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
+ * - Actual number of objects dequeued.
+ * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
-static inline int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
- unsigned n, enum rte_ring_queue_behavior behavior)
+static inline __attribute__((always_inline)) unsigned int
+__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
+ unsigned int n, enum rte_ring_queue_behavior behavior,
+ int is_sc, unsigned int *available)
{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries) {
- if (behavior == RTE_RING_QUEUE_FIXED) {
- __RING_STAT_ADD(r, deq_fail, n);
- return -ENOENT;
- }
- else {
- if (unlikely(entries == 0)){
- __RING_STAT_ADD(r, deq_fail, n);
- return 0;
- }
-
- n = entries;
- }
- }
-
- cons_next = cons_head + n;
- r->cons.head = cons_next;
-
- /* copy in table */
- DEQUEUE_PTRS();
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n, void *);
rte_smp_rmb();
- __RING_STAT_ADD(r, deq_success, n);
- r->cons.tail = cons_next;
- return behavior == RTE_RING_QUEUE_FIXED ? 0 : n;
+ update_tail(&r->cons, cons_head, cons_next, is_sc);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
}
/**
@@ -775,17 +584,18 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MP, free_space);
}
/**
@@ -797,17 +607,18 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SP, free_space);
}
/**
@@ -823,20 +634,18 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
- * - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ * The number of objects enqueued, either 0 or n
*/
-static inline int __attribute__((always_inline))
+static inline unsigned int __attribute__((always_inline))
rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
- else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->prod.single, free_space);
}
/**
@@ -851,14 +660,12 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_mp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -870,14 +677,12 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return rte_ring_sp_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -893,17 +698,12 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* A pointer to the object to be added.
* @return
* - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
rte_ring_enqueue(struct rte_ring *r, void *obj)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue(r, obj);
- else
- return rte_ring_mp_enqueue(r, obj);
+ return rte_ring_enqueue_bulk(r, &obj, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -918,15 +718,18 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_MC, available);
}
/**
@@ -939,15 +742,18 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* @param n
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ __IS_SC, available);
}
/**
@@ -963,18 +769,18 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue, no object is
- * dequeued.
+ * The number of objects dequeued, either 0 or n
*/
-static inline int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+static inline unsigned int __attribute__((always_inline))
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n,
+ unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
- else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
+ r->cons.single, available);
}
/**
@@ -995,7 +801,7 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
static inline int __attribute__((always_inline))
rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_mc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -1013,7 +819,7 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return rte_ring_sc_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOBUFS;
}
/**
@@ -1035,10 +841,7 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
static inline int __attribute__((always_inline))
rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue(r, obj_p);
- else
- return rte_ring_mc_dequeue(r, obj_p);
+ return rte_ring_dequeue_bulk(r, obj_p, 1, NULL) ? 0 : -ENOENT;
}
/**
@@ -1055,7 +858,7 @@ rte_ring_full(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
+ return ((cons_tail - prod_tail - 1) & r->mask) == 0;
}
/**
@@ -1088,7 +891,7 @@ rte_ring_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (prod_tail - cons_tail) & r->prod.mask;
+ return (prod_tail - cons_tail) & r->mask;
}
/**
@@ -1104,7 +907,21 @@ rte_ring_free_count(const struct rte_ring *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
- return (cons_tail - prod_tail - 1) & r->prod.mask;
+ return (cons_tail - prod_tail - 1) & r->mask;
+}
+
+/**
+ * Return the size of the ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The number of elements which can be stored in the ring.
+ */
+static inline unsigned int
+rte_ring_get_size(const struct rte_ring *r)
+{
+ return r->size;
}
/**
@@ -1139,14 +956,18 @@ struct rte_ring *rte_ring_lookup(const char *name);
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
}
/**
@@ -1158,14 +979,18 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_enqueue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
}
/**
@@ -1181,17 +1006,18 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
+ * @param free_space
+ * if non-NULL, returns the amount of space in the ring after the
+ * enqueue operation has finished.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned __attribute__((always_inline))
rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
- unsigned n)
+ unsigned int n, unsigned int *free_space)
{
- if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
- else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
+ r->prod.single, free_space);
}
/**
@@ -1208,13 +1034,18 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
}
/**
@@ -1228,13 +1059,18 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
}
/**
@@ -1250,16 +1086,19 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
* A pointer to a table of void * pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
+ * @param available
+ * If non-NULL, returns the number of remaining ring entries after the
+ * dequeue has finished.
* @return
* - Number of objects dequeued
*/
static inline unsigned __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned n)
+rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table,
+ unsigned int n, unsigned int *available)
{
- if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
- else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return __rte_ring_do_dequeue(r, obj_table, n,
+ RTE_RING_QUEUE_VARIABLE,
+ r->cons.single, available);
}
#ifdef __cplusplus
diff --git a/lib/librte_sched/Makefile b/lib/librte_sched/Makefile
index 44cb780f..18274e73 100644
--- a/lib/librte_sched/Makefile
+++ b/lib/librte_sched/Makefile
@@ -58,9 +58,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_SCHED) += rte_reciprocal.c
SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include := rte_sched.h rte_bitmap.h rte_sched_common.h rte_red.h rte_approx.h
SYMLINK-$(CONFIG_RTE_LIBRTE_SCHED)-include += rte_reciprocal.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_SCHED) += lib/librte_net lib/librte_timer
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_table/Makefile b/lib/librte_table/Makefile
index c82c7696..0d06d36a 100644
--- a/lib/librte_table/Makefile
+++ b/lib/librte_table/Makefile
@@ -72,15 +72,4 @@ SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru.h
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_array.h
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_stub.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) := lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_port
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_lpm
-ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_acl
-endif
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_hash
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_table/rte_table_acl.c b/lib/librte_table/rte_table_acl.c
index 8f1f8ceb..3c05e4a8 100644
--- a/lib/librte_table/rte_table_acl.c
+++ b/lib/librte_table/rte_table_acl.c
@@ -87,7 +87,7 @@ rte_table_acl_create(
int socket_id,
uint32_t entry_size)
{
- struct rte_table_acl_params *p = (struct rte_table_acl_params *) params;
+ struct rte_table_acl_params *p = params;
struct rte_table_acl *acl;
uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size;
uint32_t total_size;
@@ -168,7 +168,7 @@ rte_table_acl_create(
static int
rte_table_acl_free(void *table)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
/* Check input parameters */
if (table == NULL) {
@@ -248,9 +248,9 @@ rte_table_acl_entry_add(
int *key_found,
void **entry_ptr)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
struct rte_table_acl_rule_add_params *rule =
- (struct rte_table_acl_rule_add_params *) key;
+ key;
struct rte_pipeline_acl_rule acl_rule;
struct rte_acl_rule *rule_location;
struct rte_acl_ctx *ctx;
@@ -366,9 +366,9 @@ rte_table_acl_entry_delete(
int *key_found,
void *entry)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
struct rte_table_acl_rule_delete_params *rule =
- (struct rte_table_acl_rule_delete_params *) key;
+ key;
struct rte_acl_rule *deleted_rule = NULL;
struct rte_acl_ctx *ctx;
uint32_t pos, pos_valid, i;
@@ -450,7 +450,7 @@ rte_table_acl_entry_add_bulk(
int *key_found,
void **entries_ptr)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
struct rte_acl_ctx *ctx;
uint32_t rule_pos[n_keys];
uint32_t i;
@@ -507,7 +507,7 @@ rte_table_acl_entry_add_bulk(
return -EINVAL;
}
- rule = (struct rte_table_acl_rule_add_params *) keys[i];
+ rule = keys[i];
if (rule->priority > RTE_ACL_MAX_PRIORITY) {
RTE_LOG(ERR, TABLE, "%s: Priority is too high\n", __func__);
return -EINVAL;
@@ -518,7 +518,7 @@ rte_table_acl_entry_add_bulk(
memset(key_found, 0, n_keys * sizeof(int));
for (i = 0; i < n_keys; i++) {
struct rte_table_acl_rule_add_params *rule =
- (struct rte_table_acl_rule_add_params *) keys[i];
+ keys[i];
struct rte_pipeline_acl_rule acl_rule;
struct rte_acl_rule *rule_location;
uint32_t free_pos, free_pos_valid, j;
@@ -636,7 +636,7 @@ rte_table_acl_entry_delete_bulk(
int *key_found,
void **entries)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
struct rte_acl_rule *deleted_rules[n_keys];
uint32_t rule_pos[n_keys];
struct rte_acl_ctx *ctx;
@@ -675,7 +675,7 @@ rte_table_acl_entry_delete_bulk(
memset(rule_pos, 0, n_keys * sizeof(uint32_t));
for (i = 0; i < n_keys; i++) {
struct rte_table_acl_rule_delete_params *rule =
- (struct rte_table_acl_rule_delete_params *) keys[i];
+ keys[i];
uint32_t pos_valid, j;
/* Look for the rule in the table */
@@ -792,7 +792,7 @@ rte_table_acl_lookup(
pkts_mask &= ~pkt_mask;
- if (action_table_pos != RTE_ACL_INVALID_USERDATA) {
+ if (action_table_pos != 0) {
pkts_out_mask |= pkt_mask;
entries[pkt_pos] = (void *)
&acl->memory[action_table_pos *
@@ -810,7 +810,7 @@ rte_table_acl_lookup(
static int
rte_table_acl_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_acl *acl = (struct rte_table_acl *) table;
+ struct rte_table_acl *acl = table;
if (stats != NULL)
memcpy(stats, &acl->stats, sizeof(acl->stats));
diff --git a/lib/librte_table/rte_table_array.c b/lib/librte_table/rte_table_array.c
index 3bb68d11..cf7be88a 100644
--- a/lib/librte_table/rte_table_array.c
+++ b/lib/librte_table/rte_table_array.c
@@ -74,8 +74,7 @@ struct rte_table_array {
static void *
rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_array_params *p =
- (struct rte_table_array_params *) params;
+ struct rte_table_array_params *p = params;
struct rte_table_array *t;
uint32_t total_cl_size, total_size;
@@ -111,7 +110,7 @@ rte_table_array_create(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_array_free(void *table)
{
- struct rte_table_array *t = (struct rte_table_array *) table;
+ struct rte_table_array *t = table;
/* Check input parameters */
if (t == NULL) {
@@ -133,8 +132,8 @@ rte_table_array_entry_add(
int *key_found,
void **entry_ptr)
{
- struct rte_table_array *t = (struct rte_table_array *) table;
- struct rte_table_array_key *k = (struct rte_table_array_key *) key;
+ struct rte_table_array *t = table;
+ struct rte_table_array_key *k = key;
uint8_t *table_entry;
/* Check input parameters */
@@ -214,7 +213,7 @@ rte_table_array_lookup(
static int
rte_table_array_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_array *array = (struct rte_table_array *) table;
+ struct rte_table_array *array = table;
if (stats != NULL)
memcpy(stats, &array->stats, sizeof(array->stats));
diff --git a/lib/librte_table/rte_table_hash_cuckoo.c b/lib/librte_table/rte_table_hash_cuckoo.c
index ff7baee3..da1597fa 100644
--- a/lib/librte_table/rte_table_hash_cuckoo.c
+++ b/lib/librte_table/rte_table_hash_cuckoo.c
@@ -190,7 +190,7 @@ rte_table_hash_cuckoo_free(void *table) {
return -EINVAL;
}
- struct rte_table_hash *t = (struct rte_table_hash *)table;
+ struct rte_table_hash *t = table;
rte_hash_free(t->h_table);
rte_free(t);
@@ -218,7 +218,7 @@ rte_table_hash_cuckoo_entry_add(void *table, void *key, void *entry,
return -EINVAL;
}
- struct rte_table_hash *t = (struct rte_table_hash *)table;
+ struct rte_table_hash *t = table;
/* Find Existing entries */
pos = rte_hash_lookup(t->h_table, key);
@@ -268,7 +268,7 @@ rte_table_hash_cuckoo_entry_delete(void *table, void *key,
return -EINVAL;
}
- struct rte_table_hash *t = (struct rte_table_hash *)table;
+ struct rte_table_hash *t = table;
pos = rte_hash_del_key(t->h_table, key);
if (pos >= 0) {
@@ -359,7 +359,7 @@ static int
rte_table_hash_cuckoo_stats_read(void *table, struct rte_table_stats *stats,
int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_hash_ext.c b/lib/librte_table/rte_table_hash_ext.c
index e283a3d1..e7181026 100644
--- a/lib/librte_table/rte_table_hash_ext.c
+++ b/lib/librte_table/rte_table_hash_ext.c
@@ -172,7 +172,7 @@ static void *
rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
{
struct rte_table_hash_ext_params *p =
- (struct rte_table_hash_ext_params *) params;
+ params;
struct rte_table_hash *t;
uint32_t total_size, table_meta_sz;
uint32_t bucket_sz, bucket_ext_sz, key_sz;
@@ -258,7 +258,7 @@ rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_hash_ext_free(void *table)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
/* Check input parameters */
if (t == NULL)
@@ -272,7 +272,7 @@ static int
rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
int *key_found, void **entry_ptr)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
struct bucket *bkt0, *bkt, *bkt_prev;
uint64_t sig;
uint32_t bkt_index, i;
@@ -373,7 +373,7 @@ static int
rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
void *entry)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
struct bucket *bkt0, *bkt, *bkt_prev;
uint64_t sig;
uint32_t bkt_index, i;
@@ -444,7 +444,6 @@ static int rte_table_hash_ext_lookup_unoptimized(
uint64_t pkts_mask_out = 0;
__rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
- RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
for ( ; pkts_mask; ) {
struct bucket *bkt0, *bkt;
@@ -490,7 +489,6 @@ static int rte_table_hash_ext_lookup_unoptimized(
}
*lookup_hit_mask = pkts_mask_out;
- RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
return 0;
}
@@ -874,9 +872,13 @@ static int rte_table_hash_ext_lookup(
RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
/* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ if (__builtin_popcountll(pkts_mask) < 7) {
+ status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
pkts_mask, lookup_hit_mask, entries, 0);
+ RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
+ __builtin_popcountll(*lookup_hit_mask));
+ return status;
+ }
/* Pipeline stage 0 */
lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
@@ -1007,9 +1009,13 @@ static int rte_table_hash_ext_lookup_dosig(
RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
/* Cannot run the pipeline with less than 7 packets */
- if (__builtin_popcountll(pkts_mask) < 7)
- return rte_table_hash_ext_lookup_unoptimized(table, pkts,
+ if (__builtin_popcountll(pkts_mask) < 7) {
+ status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
pkts_mask, lookup_hit_mask, entries, 1);
+ RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
+ __builtin_popcountll(*lookup_hit_mask));
+ return status;
+ }
/* Pipeline stage 0 */
lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
@@ -1125,7 +1131,7 @@ static int rte_table_hash_ext_lookup_dosig(
static int
rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_hash_key16.c b/lib/librte_table/rte_table_hash_key16.c
index 08d4d77e..ce057b78 100644
--- a/lib/librte_table/rte_table_hash_key16.c
+++ b/lib/librte_table/rte_table_hash_key16.c
@@ -187,7 +187,7 @@ rte_table_hash_create_key16_lru(void *params,
static int
rte_table_hash_free_key16_lru(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -207,7 +207,7 @@ rte_table_hash_entry_add_key16_lru(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_16 *bucket;
uint64_t signature, pos;
uint32_t bucket_index, i;
@@ -273,7 +273,7 @@ rte_table_hash_entry_delete_key16_lru(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_16 *bucket;
uint64_t signature;
uint32_t bucket_index, i;
@@ -407,7 +407,7 @@ rte_table_hash_create_key16_ext(void *params,
static int
rte_table_hash_free_key16_ext(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -427,7 +427,7 @@ rte_table_hash_entry_add_key16_ext(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -504,7 +504,7 @@ rte_table_hash_entry_delete_key16_ext(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_16 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -1463,7 +1463,7 @@ grind_next_buckets:
static int
rte_table_hash_key16_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_hash_key32.c b/lib/librte_table/rte_table_hash_key32.c
index 161f6b7a..31fe6fda 100644
--- a/lib/librte_table/rte_table_hash_key32.c
+++ b/lib/librte_table/rte_table_hash_key32.c
@@ -179,7 +179,7 @@ rte_table_hash_create_key32_lru(void *params,
static int
rte_table_hash_free_key32_lru(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -199,7 +199,7 @@ rte_table_hash_entry_add_key32_lru(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_32 *bucket;
uint64_t signature, pos;
uint32_t bucket_index, i;
@@ -265,7 +265,7 @@ rte_table_hash_entry_delete_key32_lru(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_32 *bucket;
uint64_t signature;
uint32_t bucket_index, i;
@@ -329,7 +329,7 @@ rte_table_hash_create_key32_ext(void *params,
uint32_t entry_size)
{
struct rte_table_hash_key32_ext_params *p =
- (struct rte_table_hash_key32_ext_params *) params;
+ params;
struct rte_table_hash *f;
uint32_t n_buckets, n_buckets_ext, n_entries_per_bucket;
uint32_t key_size, bucket_size_cl, stack_size_cl, total_size, i;
@@ -392,7 +392,7 @@ rte_table_hash_create_key32_ext(void *params,
static int
rte_table_hash_free_key32_ext(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -412,7 +412,7 @@ rte_table_hash_entry_add_key32_ext(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -492,7 +492,7 @@ rte_table_hash_entry_delete_key32_ext(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_32 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -1110,7 +1110,7 @@ grind_next_buckets:
static int
rte_table_hash_key32_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_hash_key8.c b/lib/librte_table/rte_table_hash_key8.c
index b04f60dc..5f0c6566 100644
--- a/lib/librte_table/rte_table_hash_key8.c
+++ b/lib/librte_table/rte_table_hash_key8.c
@@ -180,7 +180,7 @@ rte_table_hash_create_key8_lru(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_hash_free_key8_lru(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -200,7 +200,7 @@ rte_table_hash_entry_add_key8_lru(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_8 *bucket;
uint64_t signature, mask, pos;
uint32_t bucket_index, i;
@@ -263,7 +263,7 @@ rte_table_hash_entry_delete_key8_lru(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_8 *bucket;
uint64_t signature, mask;
uint32_t bucket_index, i;
@@ -392,7 +392,7 @@ rte_table_hash_create_key8_ext(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_hash_free_key8_ext(void *table)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
/* Check input parameters */
if (f == NULL) {
@@ -412,7 +412,7 @@ rte_table_hash_entry_add_key8_ext(
int *key_found,
void **entry_ptr)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -493,7 +493,7 @@ rte_table_hash_entry_delete_key8_ext(
int *key_found,
void *entry)
{
- struct rte_table_hash *f = (struct rte_table_hash *) table;
+ struct rte_table_hash *f = table;
struct rte_bucket_4_8 *bucket0, *bucket, *bucket_prev;
uint64_t signature;
uint32_t bucket_index, i;
@@ -1415,7 +1415,7 @@ grind_next_buckets:
static int
rte_table_hash_key8_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_hash_lru.c b/lib/librte_table/rte_table_hash_lru.c
index 407c62ab..5a4864e2 100644
--- a/lib/librte_table/rte_table_hash_lru.c
+++ b/lib/librte_table/rte_table_hash_lru.c
@@ -149,7 +149,7 @@ static void *
rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
{
struct rte_table_hash_lru_params *p =
- (struct rte_table_hash_lru_params *) params;
+ params;
struct rte_table_hash *t;
uint32_t total_size, table_meta_sz;
uint32_t bucket_sz, key_sz, key_stack_sz, data_sz;
@@ -227,7 +227,7 @@ rte_table_hash_lru_create(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_hash_lru_free(void *table)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
/* Check input parameters */
if (t == NULL)
@@ -241,7 +241,7 @@ static int
rte_table_hash_lru_entry_add(void *table, void *key, void *entry,
int *key_found, void **entry_ptr)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
struct bucket *bkt;
uint64_t sig;
uint32_t bkt_index, i;
@@ -325,7 +325,7 @@ static int
rte_table_hash_lru_entry_delete(void *table, void *key, int *key_found,
void *entry)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
struct bucket *bkt;
uint64_t sig;
uint32_t bkt_index, i;
@@ -1068,7 +1068,7 @@ static int rte_table_hash_lru_lookup_dosig(
static int
rte_table_hash_lru_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_hash *t = (struct rte_table_hash *) table;
+ struct rte_table_hash *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_lpm.c b/lib/librte_table/rte_table_lpm.c
index 598b79f5..2f472b97 100644
--- a/lib/librte_table/rte_table_lpm.c
+++ b/lib/librte_table/rte_table_lpm.c
@@ -82,7 +82,7 @@ struct rte_table_lpm {
static void *
rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
{
- struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params;
+ struct rte_table_lpm_params *p = params;
struct rte_table_lpm *lpm;
struct rte_lpm_config lpm_config;
@@ -154,7 +154,7 @@ rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_lpm_free(void *table)
{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
+ struct rte_table_lpm *lpm = table;
/* Check input parameters */
if (lpm == NULL) {
@@ -210,8 +210,8 @@ rte_table_lpm_entry_add(
int *key_found,
void **entry_ptr)
{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
- struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
+ struct rte_table_lpm *lpm = table;
+ struct rte_table_lpm_key *ip_prefix = key;
uint32_t nht_pos, nht_pos0_valid;
int status;
uint32_t nht_pos0 = 0;
@@ -277,8 +277,8 @@ rte_table_lpm_entry_delete(
int *key_found,
void *entry)
{
- struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
- struct rte_table_lpm_key *ip_prefix = (struct rte_table_lpm_key *) key;
+ struct rte_table_lpm *lpm = table;
+ struct rte_table_lpm_key *ip_prefix = key;
uint32_t nht_pos;
int status;
@@ -372,7 +372,7 @@ rte_table_lpm_lookup(
static int
rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_lpm *t = (struct rte_table_lpm *) table;
+ struct rte_table_lpm *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_lpm_ipv6.c b/lib/librte_table/rte_table_lpm_ipv6.c
index 836f4cf6..5def2ad9 100644
--- a/lib/librte_table/rte_table_lpm_ipv6.c
+++ b/lib/librte_table/rte_table_lpm_ipv6.c
@@ -81,7 +81,7 @@ static void *
rte_table_lpm_ipv6_create(void *params, int socket_id, uint32_t entry_size)
{
struct rte_table_lpm_ipv6_params *p =
- (struct rte_table_lpm_ipv6_params *) params;
+ params;
struct rte_table_lpm_ipv6 *lpm;
struct rte_lpm6_config lpm6_config;
uint32_t total_size, nht_size;
@@ -152,7 +152,7 @@ rte_table_lpm_ipv6_create(void *params, int socket_id, uint32_t entry_size)
static int
rte_table_lpm_ipv6_free(void *table)
{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6 *lpm = table;
/* Check input parameters */
if (lpm == NULL) {
@@ -208,12 +208,11 @@ rte_table_lpm_ipv6_entry_add(
int *key_found,
void **entry_ptr)
{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6 *lpm = table;
struct rte_table_lpm_ipv6_key *ip_prefix =
- (struct rte_table_lpm_ipv6_key *) key;
- uint32_t nht_pos, nht_pos0_valid;
+ key;
+ uint32_t nht_pos, nht_pos0, nht_pos0_valid;
int status;
- uint8_t nht_pos0;
/* Check input parameters */
if (lpm == NULL) {
@@ -256,7 +255,7 @@ rte_table_lpm_ipv6_entry_add(
/* Add rule to low level LPM table */
if (rte_lpm6_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth,
- (uint8_t) nht_pos) < 0) {
+ nht_pos) < 0) {
RTE_LOG(ERR, TABLE, "%s: LPM IPv6 rule add failed\n", __func__);
return -1;
}
@@ -277,10 +276,10 @@ rte_table_lpm_ipv6_entry_delete(
int *key_found,
void *entry)
{
- struct rte_table_lpm_ipv6 *lpm = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6 *lpm = table;
struct rte_table_lpm_ipv6_key *ip_prefix =
- (struct rte_table_lpm_ipv6_key *) key;
- uint8_t nht_pos;
+ key;
+ uint32_t nht_pos;
int status;
/* Check input parameters */
@@ -356,7 +355,7 @@ rte_table_lpm_ipv6_lookup(
uint8_t *ip = RTE_MBUF_METADATA_UINT8_PTR(pkt,
lpm->offset);
int status;
- uint8_t nht_pos;
+ uint32_t nht_pos;
status = rte_lpm6_lookup(lpm->lpm, ip, &nht_pos);
if (status == 0) {
@@ -375,7 +374,7 @@ rte_table_lpm_ipv6_lookup(
static int
rte_table_lpm_ipv6_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_lpm_ipv6 *t = (struct rte_table_lpm_ipv6 *) table;
+ struct rte_table_lpm_ipv6 *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_table/rte_table_stub.c b/lib/librte_table/rte_table_stub.c
index 691d681a..1ee26bf2 100644
--- a/lib/librte_table/rte_table_stub.c
+++ b/lib/librte_table/rte_table_stub.c
@@ -98,7 +98,7 @@ rte_table_stub_lookup(
static int
rte_table_stub_stats_read(void *table, struct rte_table_stats *stats, int clear)
{
- struct rte_table_stub *t = (struct rte_table_stub *) table;
+ struct rte_table_stub *t = table;
if (stats != NULL)
memcpy(stats, &t->stats, sizeof(t->stats));
diff --git a/lib/librte_timer/Makefile b/lib/librte_timer/Makefile
index 2aabef85..03a15390 100644
--- a/lib/librte_timer/Makefile
+++ b/lib/librte_timer/Makefile
@@ -46,7 +46,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_TIMER) := rte_timer.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_TIMER)-include := rte_timer.h
-# this lib needs eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_TIMER) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile
index 415ffc6e..4a116fe3 100644
--- a/lib/librte_vhost/Makefile
+++ b/lib/librte_vhost/Makefile
@@ -36,7 +36,7 @@ LIB = librte_vhost.a
EXPORT_MAP := rte_vhost_version.map
-LIBABIVER := 3
+LIBABIVER := 4
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 -D_FILE_OFFSET_BITS=64
CFLAGS += -I vhost_user
@@ -51,13 +51,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := fd_man.c socket.c vhost.c vhost_user.c \
virtio_net.c
# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_virtio_net.h
-
-# dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VHOST) += lib/librte_net
+SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c
index 8a075da2..2ceacc9a 100644
--- a/lib/librte_vhost/fd_man.c
+++ b/lib/librte_vhost/fd_man.c
@@ -65,17 +65,12 @@ fdset_move(struct fdset *pfdset, int dst, int src)
pfdset->rwfds[dst] = pfdset->rwfds[src];
}
-/*
- * Find deleted fd entries and remove them
- */
static void
-fdset_shrink(struct fdset *pfdset)
+fdset_shrink_nolock(struct fdset *pfdset)
{
int i;
int last_valid_idx = get_last_valid_idx(pfdset, pfdset->num - 1);
- pthread_mutex_lock(&pfdset->fd_mutex);
-
for (i = 0; i < last_valid_idx; i++) {
if (pfdset->fd[i].fd != -1)
continue;
@@ -84,7 +79,16 @@ fdset_shrink(struct fdset *pfdset)
last_valid_idx = get_last_valid_idx(pfdset, last_valid_idx - 1);
}
pfdset->num = last_valid_idx + 1;
+}
+/*
+ * Find deleted fd entries and remove them
+ */
+static void
+fdset_shrink(struct fdset *pfdset)
+{
+ pthread_mutex_lock(&pfdset->fd_mutex);
+ fdset_shrink_nolock(pfdset);
pthread_mutex_unlock(&pfdset->fd_mutex);
}
@@ -151,8 +155,12 @@ fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat)
pthread_mutex_lock(&pfdset->fd_mutex);
i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
if (i == -1) {
- pthread_mutex_unlock(&pfdset->fd_mutex);
- return -2;
+ fdset_shrink_nolock(pfdset);
+ i = pfdset->num < MAX_FDS ? pfdset->num++ : -1;
+ if (i == -1) {
+ pthread_mutex_unlock(&pfdset->fd_mutex);
+ return -2;
+ }
}
fdset_add_fd(pfdset, i, fd, rcb, wcb, dat);
@@ -202,8 +210,8 @@ fdset_del(struct fdset *pfdset, int fd)
* will wait until the flag is reset to zero(which indicates the callback is
* finished), then it could free the context after fdset_del.
*/
-void
-fdset_event_dispatch(struct fdset *pfdset)
+void *
+fdset_event_dispatch(void *arg)
{
int i;
struct pollfd *pfd;
@@ -213,9 +221,10 @@ fdset_event_dispatch(struct fdset *pfdset)
int fd, numfds;
int remove1, remove2;
int need_shrink;
+ struct fdset *pfdset = arg;
if (pfdset == NULL)
- return;
+ return NULL;
while (1) {
@@ -286,4 +295,6 @@ fdset_event_dispatch(struct fdset *pfdset)
if (need_shrink)
fdset_shrink(pfdset);
}
+
+ return NULL;
}
diff --git a/lib/librte_vhost/fd_man.h b/lib/librte_vhost/fd_man.h
index d319cac6..90d34db1 100644
--- a/lib/librte_vhost/fd_man.h
+++ b/lib/librte_vhost/fd_man.h
@@ -64,6 +64,6 @@ int fdset_add(struct fdset *pfdset, int fd,
void *fdset_del(struct fdset *pfdset, int fd);
-void fdset_event_dispatch(struct fdset *pfdset);
+void *fdset_event_dispatch(void *arg);
#endif
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
new file mode 100644
index 00000000..605e47cb
--- /dev/null
+++ b/lib/librte_vhost/rte_vhost.h
@@ -0,0 +1,439 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_VHOST_H_
+#define _RTE_VHOST_H_
+
+/**
+ * @file
+ * Interface to vhost-user
+ */
+
+#include <stdint.h>
+#include <sys/eventfd.h>
+
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* These are not C++-aware. */
+#include <linux/vhost.h>
+#include <linux/virtio_ring.h>
+
+#define RTE_VHOST_USER_CLIENT (1ULL << 0)
+#define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
+#define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2)
+
+/**
+ * Information relating to memory regions including offsets to
+ * addresses in QEMUs memory file.
+ */
+struct rte_vhost_mem_region {
+ uint64_t guest_phys_addr;
+ uint64_t guest_user_addr;
+ uint64_t host_user_addr;
+ uint64_t size;
+ void *mmap_addr;
+ uint64_t mmap_size;
+ int fd;
+};
+
+/**
+ * Memory structure includes region and mapping information.
+ */
+struct rte_vhost_memory {
+ uint32_t nregions;
+ struct rte_vhost_mem_region regions[];
+};
+
+struct rte_vhost_vring {
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+ uint64_t log_guest_addr;
+
+ int callfd;
+ int kickfd;
+ uint16_t size;
+};
+
+/**
+ * Device and vring operations.
+ */
+struct vhost_device_ops {
+ int (*new_device)(int vid); /**< Add device. */
+ void (*destroy_device)(int vid); /**< Remove device. */
+
+ int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
+
+ /**
+ * Features could be changed after the feature negotiation.
+ * For example, VHOST_F_LOG_ALL will be set/cleared at the
+ * start/end of live migration, respectively. This callback
+ * is used to inform the application on such change.
+ */
+ int (*features_changed)(int vid, uint64_t features);
+
+ void *reserved[4]; /**< Reserved for future extension */
+};
+
+/**
+ * Convert guest physical address to host virtual address
+ *
+ * @param mem
+ * the guest memory regions
+ * @param gpa
+ * the guest physical address for querying
+ * @return
+ * the host virtual address on success, 0 on failure
+ */
+static inline uint64_t __attribute__((always_inline))
+rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
+{
+ struct rte_vhost_mem_region *reg;
+ uint32_t i;
+
+ for (i = 0; i < mem->nregions; i++) {
+ reg = &mem->regions[i];
+ if (gpa >= reg->guest_phys_addr &&
+ gpa < reg->guest_phys_addr + reg->size) {
+ return gpa - reg->guest_phys_addr +
+ reg->host_user_addr;
+ }
+ }
+
+ return 0;
+}
+
+#define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
+
+/**
+ * Log the memory write start with given address.
+ *
+ * This function only need be invoked when the live migration starts.
+ * Therefore, we won't need call it at all in the most of time. For
+ * making the performance impact be minimum, it's suggested to do a
+ * check before calling it:
+ *
+ * if (unlikely(RTE_VHOST_NEED_LOG(features)))
+ * rte_vhost_log_write(vid, addr, len);
+ *
+ * @param vid
+ * vhost device ID
+ * @param addr
+ * the starting address for write
+ * @param len
+ * the length to write
+ */
+void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
+
+/**
+ * Log the used ring update start at given offset.
+ *
+ * Same as rte_vhost_log_write, it's suggested to do a check before
+ * calling it:
+ *
+ * if (unlikely(RTE_VHOST_NEED_LOG(features)))
+ * rte_vhost_log_used_vring(vid, vring_idx, offset, len);
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * the vring index
+ * @param offset
+ * the offset inside the used ring
+ * @param len
+ * the length to write
+ */
+void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
+ uint64_t offset, uint64_t len);
+
+int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
+
+/**
+ * Register vhost driver. path could be different for multiple
+ * instance support.
+ */
+int rte_vhost_driver_register(const char *path, uint64_t flags);
+
+/* Unregister vhost driver. This is only meaningful to vhost user. */
+int rte_vhost_driver_unregister(const char *path);
+
+/**
+ * Set the feature bits the vhost-user driver supports.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param features
+ * Supported features
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_driver_set_features(const char *path, uint64_t features);
+
+/**
+ * Enable vhost-user driver features.
+ *
+ * Note that
+ * - the param features should be a subset of the feature bits provided
+ * by rte_vhost_driver_set_features().
+ * - it must be invoked before vhost-user negotiation starts.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param features
+ * Features to enable
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_driver_enable_features(const char *path, uint64_t features);
+
+/**
+ * Disable vhost-user driver features.
+ *
+ * The two notes at rte_vhost_driver_enable_features() also apply here.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param features
+ * Features to disable
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_driver_disable_features(const char *path, uint64_t features);
+
+/**
+ * Get the feature bits before feature negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param features
+ * A pointer to store the queried feature bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_driver_get_features(const char *path, uint64_t *features);
+
+/**
+ * Get the feature bits after negotiation
+ *
+ * @param vid
+ * Vhost device ID
+ * @param features
+ * A pointer to store the queried feature bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
+
+/* Register callbacks. */
+int rte_vhost_driver_callback_register(const char *path,
+ struct vhost_device_ops const * const ops);
+
+/**
+ *
+ * Start the vhost-user driver.
+ *
+ * This function triggers the vhost-user negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_driver_start(const char *path);
+
+/**
+ * Get the MTU value of the device if set in QEMU.
+ *
+ * @param vid
+ * virtio-net device ID
+ * @param mtu
+ * The variable to store the MTU value
+ *
+ * @return
+ * 0: success
+ * -EAGAIN: device not yet started
+ * -ENOTSUP: device does not support MTU feature
+ */
+int rte_vhost_get_mtu(int vid, uint16_t *mtu);
+
+/**
+ * Get the numa node from which the virtio net device's memory
+ * is allocated.
+ *
+ * @param vid
+ * vhost device ID
+ *
+ * @return
+ * The numa node, -1 on failure
+ */
+int rte_vhost_get_numa_node(int vid);
+
+/**
+ * @deprecated
+ * Get the number of queues the device supports.
+ *
+ * Note this function is deprecated, as it returns a queue pair number,
+ * which is vhost specific. Instead, rte_vhost_get_vring_num should
+ * be used.
+ *
+ * @param vid
+ * vhost device ID
+ *
+ * @return
+ * The number of queues, 0 on failure
+ */
+__rte_deprecated
+uint32_t rte_vhost_get_queue_num(int vid);
+
+/**
+ * Get the number of vrings the device supports.
+ *
+ * @param vid
+ * vhost device ID
+ *
+ * @return
+ * The number of vrings, 0 on failure
+ */
+uint16_t rte_vhost_get_vring_num(int vid);
+
+/**
+ * Get the virtio net device's ifname, which is the vhost-user socket
+ * file path.
+ *
+ * @param vid
+ * vhost device ID
+ * @param buf
+ * The buffer to stored the queried ifname
+ * @param len
+ * The length of buf
+ *
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_get_ifname(int vid, char *buf, size_t len);
+
+/**
+ * Get how many avail entries are left in the queue
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * virtio queue index
+ *
+ * @return
+ * num of avail entires left
+ */
+uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
+
+struct rte_mbuf;
+struct rte_mempool;
+/**
+ * This function adds buffers to the virtio devices RX virtqueue. Buffers can
+ * be received from the physical port or from another virtual device. A packet
+ * count is returned to indicate the number of packets that were succesfully
+ * added to the RX queue.
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * virtio queue index in mq case
+ * @param pkts
+ * array to contain packets to be enqueued
+ * @param count
+ * packets num to be enqueued
+ * @return
+ * num of packets enqueued
+ */
+uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count);
+
+/**
+ * This function gets guest buffers from the virtio device TX virtqueue,
+ * construct host mbufs, copies guest buffer content to host mbufs and
+ * store them in pkts to be processed.
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * virtio queue index in mq case
+ * @param mbuf_pool
+ * mbuf_pool where host mbuf is allocated.
+ * @param pkts
+ * array to contain packets to be dequeued
+ * @param count
+ * packets num to be dequeued
+ * @return
+ * num of packets dequeued
+ */
+uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
+
+/**
+ * Get guest mem table: a list of memory regions.
+ *
+ * An rte_vhost_vhost_memory object will be allocated internaly, to hold the
+ * guest memory regions. Application should free it at destroy_device()
+ * callback.
+ *
+ * @param vid
+ * vhost device ID
+ * @param mem
+ * To store the returned mem regions
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
+
+/**
+ * Get guest vring info, including the vring address, vring size, etc.
+ *
+ * @param vid
+ * vhost device ID
+ * @param vring_idx
+ * vring index
+ * @param vring
+ * the structure to hold the requested vring info
+ * @return
+ * 0 on success, -1 on failure
+ */
+int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
+ struct rte_vhost_vring *vring);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_VHOST_H_ */
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 5ceaa8a5..07858732 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -4,12 +4,8 @@ DPDK_2.0 {
rte_vhost_dequeue_burst;
rte_vhost_driver_callback_register;
rte_vhost_driver_register;
- rte_vhost_driver_session_start;
rte_vhost_enable_guest_notification;
rte_vhost_enqueue_burst;
- rte_vhost_feature_disable;
- rte_vhost_feature_enable;
- rte_vhost_feature_get;
local: *;
};
@@ -30,3 +26,22 @@ DPDK_16.07 {
rte_vhost_get_queue_num;
} DPDK_2.1;
+
+DPDK_17.05 {
+ global:
+
+ rte_vhost_driver_disable_features;
+ rte_vhost_driver_enable_features;
+ rte_vhost_driver_get_features;
+ rte_vhost_driver_set_features;
+ rte_vhost_driver_start;
+ rte_vhost_get_mem_table;
+ rte_vhost_get_mtu;
+ rte_vhost_get_negotiated_features;
+ rte_vhost_get_vhost_vring;
+ rte_vhost_get_vring_num;
+ rte_vhost_gpa_to_vva;
+ rte_vhost_log_used_vring;
+ rte_vhost_log_write;
+
+} DPDK_16.07;
diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
deleted file mode 100644
index 926039c5..00000000
--- a/lib/librte_vhost/rte_virtio_net.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTIO_NET_H_
-#define _VIRTIO_NET_H_
-
-/**
- * @file
- * Interface to vhost net
- */
-
-#include <stdint.h>
-#include <linux/vhost.h>
-#include <linux/virtio_ring.h>
-#include <linux/virtio_net.h>
-#include <sys/eventfd.h>
-#include <sys/socket.h>
-#include <linux/if.h>
-
-#include <rte_memory.h>
-#include <rte_mempool.h>
-#include <rte_ether.h>
-
-#define RTE_VHOST_USER_CLIENT (1ULL << 0)
-#define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
-#define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2)
-
-/* Enum for virtqueue management. */
-enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
-
-/**
- * Device and vring operations.
- */
-struct virtio_net_device_ops {
- int (*new_device)(int vid); /**< Add device. */
- void (*destroy_device)(int vid); /**< Remove device. */
-
- int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
-
- void *reserved[5]; /**< Reserved for future extension */
-};
-
-/**
- * Disable features in feature_mask. Returns 0 on success.
- */
-int rte_vhost_feature_disable(uint64_t feature_mask);
-
-/**
- * Enable features in feature_mask. Returns 0 on success.
- */
-int rte_vhost_feature_enable(uint64_t feature_mask);
-
-/* Returns currently supported vhost features */
-uint64_t rte_vhost_feature_get(void);
-
-int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
-
-/**
- * Register vhost driver. path could be different for multiple
- * instance support.
- */
-int rte_vhost_driver_register(const char *path, uint64_t flags);
-
-/* Unregister vhost driver. This is only meaningful to vhost user. */
-int rte_vhost_driver_unregister(const char *path);
-
-/* Register callbacks. */
-int rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const);
-/* Start vhost driver session blocking loop. */
-int rte_vhost_driver_session_start(void);
-
-/**
- * Get the numa node from which the virtio net device's memory
- * is allocated.
- *
- * @param vid
- * virtio-net device ID
- *
- * @return
- * The numa node, -1 on failure
- */
-int rte_vhost_get_numa_node(int vid);
-
-/**
- * Get the number of queues the device supports.
- *
- * @param vid
- * virtio-net device ID
- *
- * @return
- * The number of queues, 0 on failure
- */
-uint32_t rte_vhost_get_queue_num(int vid);
-
-/**
- * Get the virtio net device's ifname, which is the vhost-user socket
- * file path.
- *
- * @param vid
- * virtio-net device ID
- * @param buf
- * The buffer to stored the queried ifname
- * @param len
- * The length of buf
- *
- * @return
- * 0 on success, -1 on failure
- */
-int rte_vhost_get_ifname(int vid, char *buf, size_t len);
-
-/**
- * Get how many avail entries are left in the queue
- *
- * @param vid
- * virtio-net device ID
- * @param queue_id
- * virtio queue index
- *
- * @return
- * num of avail entires left
- */
-uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
-
-/**
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtual device. A packet
- * count is returned to indicate the number of packets that were succesfully
- * added to the RX queue.
- * @param vid
- * virtio-net device ID
- * @param queue_id
- * virtio queue index in mq case
- * @param pkts
- * array to contain packets to be enqueued
- * @param count
- * packets num to be enqueued
- * @return
- * num of packets enqueued
- */
-uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count);
-
-/**
- * This function gets guest buffers from the virtio device TX virtqueue,
- * construct host mbufs, copies guest buffer content to host mbufs and
- * store them in pkts to be processed.
- * @param vid
- * virtio-net device
- * @param queue_id
- * virtio queue index in mq case
- * @param mbuf_pool
- * mbuf_pool where host mbuf is allocated.
- * @param pkts
- * array to contain packets to be dequeued
- * @param count
- * packets num to be dequeued
- * @return
- * num of packets dequeued
- */
-uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
- struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
-
-#endif /* _VIRTIO_NET_H_ */
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index aaa9c270..c7f99b08 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -52,22 +52,42 @@
#include "vhost.h"
#include "vhost_user.h"
+
+TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
+
/*
* Every time rte_vhost_driver_register() is invoked, an associated
* vhost_user_socket struct will be created.
*/
struct vhost_user_socket {
+ struct vhost_user_connection_list conn_list;
+ pthread_mutex_t conn_mutex;
char *path;
- int listenfd;
- int connfd;
+ int socket_fd;
+ struct sockaddr_un un;
bool is_server;
bool reconnect;
bool dequeue_zero_copy;
+
+ /*
+ * The "supported_features" indicates the feature bits the
+ * vhost driver supports. The "features" indicates the feature
+ * bits after the rte_vhost_driver_features_disable/enable().
+ * It is also the final feature bits used for vhost-user
+ * features negotiation.
+ */
+ uint64_t supported_features;
+ uint64_t features;
+
+ struct vhost_device_ops const *notify_ops;
};
struct vhost_user_connection {
struct vhost_user_socket *vsocket;
+ int connfd;
int vid;
+
+ TAILQ_ENTRY(vhost_user_connection) next;
};
#define MAX_VHOST_SOCKET 1024
@@ -82,7 +102,8 @@ struct vhost_user {
static void vhost_user_server_new_connection(int fd, void *data, int *remove);
static void vhost_user_read_cb(int fd, void *dat, int *remove);
-static int vhost_user_create_client(struct vhost_user_socket *vsocket);
+static int create_unix_socket(struct vhost_user_socket *vsocket);
+static int vhost_user_start_client(struct vhost_user_socket *vsocket);
static struct vhost_user vhost_user = {
.fdset = {
@@ -209,19 +230,24 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
- vsocket->connfd = fd;
+ conn->connfd = fd;
conn->vsocket = vsocket;
conn->vid = vid;
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
NULL, conn);
if (ret < 0) {
- vsocket->connfd = -1;
+ conn->connfd = -1;
free(conn);
close(fd);
RTE_LOG(ERR, VHOST_CONFIG,
"failed to add fd %d into vhost server fdset\n",
fd);
+ return;
}
+
+ pthread_mutex_lock(&vsocket->conn_mutex);
+ TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
+ pthread_mutex_unlock(&vsocket->conn_mutex);
}
/* call back when there is new vhost-user connection from client */
@@ -247,29 +273,36 @@ vhost_user_read_cb(int connfd, void *dat, int *remove)
ret = vhost_user_msg_handler(conn->vid, connfd);
if (ret < 0) {
- vsocket->connfd = -1;
close(connfd);
*remove = 1;
vhost_destroy_device(conn->vid);
+
+ pthread_mutex_lock(&vsocket->conn_mutex);
+ TAILQ_REMOVE(&vsocket->conn_list, conn, next);
+ pthread_mutex_unlock(&vsocket->conn_mutex);
+
free(conn);
- if (vsocket->reconnect)
- vhost_user_create_client(vsocket);
+ if (vsocket->reconnect) {
+ create_unix_socket(vsocket);
+ vhost_user_start_client(vsocket);
+ }
}
}
static int
-create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
+create_unix_socket(struct vhost_user_socket *vsocket)
{
int fd;
+ struct sockaddr_un *un = &vsocket->un;
fd = socket(AF_UNIX, SOCK_STREAM, 0);
if (fd < 0)
return -1;
RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
- is_server ? "server" : "client", fd);
+ vsocket->is_server ? "server" : "client", fd);
- if (!is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
+ if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
RTE_LOG(ERR, VHOST_CONFIG,
"vhost-user: can't set nonblocking mode for socket, fd: "
"%d (%s)\n", fd, strerror(errno));
@@ -279,25 +312,21 @@ create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
memset(un, 0, sizeof(*un));
un->sun_family = AF_UNIX;
- strncpy(un->sun_path, path, sizeof(un->sun_path));
+ strncpy(un->sun_path, vsocket->path, sizeof(un->sun_path));
un->sun_path[sizeof(un->sun_path) - 1] = '\0';
- return fd;
+ vsocket->socket_fd = fd;
+ return 0;
}
static int
-vhost_user_create_server(struct vhost_user_socket *vsocket)
+vhost_user_start_server(struct vhost_user_socket *vsocket)
{
- int fd;
int ret;
- struct sockaddr_un un;
+ int fd = vsocket->socket_fd;
const char *path = vsocket->path;
- fd = create_unix_socket(path, &un, vsocket->is_server);
- if (fd < 0)
- return -1;
-
- ret = bind(fd, (struct sockaddr *)&un, sizeof(un));
+ ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to bind to %s: %s; remove it and try again\n",
@@ -310,7 +339,6 @@ vhost_user_create_server(struct vhost_user_socket *vsocket)
if (ret < 0)
goto err;
- vsocket->listenfd = fd;
ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
NULL, vsocket);
if (ret < 0) {
@@ -429,26 +457,21 @@ vhost_user_reconnect_init(void)
}
static int
-vhost_user_create_client(struct vhost_user_socket *vsocket)
+vhost_user_start_client(struct vhost_user_socket *vsocket)
{
- int fd;
int ret;
- struct sockaddr_un un;
+ int fd = vsocket->socket_fd;
const char *path = vsocket->path;
struct vhost_user_reconnect *reconn;
- fd = create_unix_socket(path, &un, vsocket->is_server);
- if (fd < 0)
- return -1;
-
- ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&un,
- sizeof(un));
+ ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&vsocket->un,
+ sizeof(vsocket->un));
if (ret == 0) {
vhost_user_add_connection(fd, vsocket);
return 0;
}
- RTE_LOG(ERR, VHOST_CONFIG,
+ RTE_LOG(WARNING, VHOST_CONFIG,
"failed to connect to %s: %s\n",
path, strerror(errno));
@@ -457,7 +480,7 @@ vhost_user_create_client(struct vhost_user_socket *vsocket)
return -1;
}
- RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path);
+ RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path);
reconn = malloc(sizeof(*reconn));
if (reconn == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
@@ -465,7 +488,7 @@ vhost_user_create_client(struct vhost_user_socket *vsocket)
close(fd);
return -1;
}
- reconn->un = un;
+ reconn->un = vsocket->un;
reconn->fd = fd;
reconn->vsocket = vsocket;
pthread_mutex_lock(&reconn_list.mutex);
@@ -475,6 +498,94 @@ vhost_user_create_client(struct vhost_user_socket *vsocket)
return 0;
}
+static struct vhost_user_socket *
+find_vhost_user_socket(const char *path)
+{
+ int i;
+
+ for (i = 0; i < vhost_user.vsocket_cnt; i++) {
+ struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
+
+ if (!strcmp(vsocket->path, path))
+ return vsocket;
+ }
+
+ return NULL;
+}
+
+int
+rte_vhost_driver_disable_features(const char *path, uint64_t features)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ vsocket->features &= ~features;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_enable_features(const char *path, uint64_t features)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket) {
+ if ((vsocket->supported_features & features) != features) {
+ /*
+ * trying to enable features the driver doesn't
+ * support.
+ */
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return -1;
+ }
+ vsocket->features |= features;
+ }
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_set_features(const char *path, uint64_t features)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket) {
+ vsocket->supported_features = features;
+ vsocket->features = features;
+ }
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_get_features(const char *path, uint64_t *features)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ *features = vsocket->features;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ if (!vsocket) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "socket file %s is not registered yet.\n", path);
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
/*
* Register a new vhost-user socket; here we could act as server
* (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
@@ -502,9 +613,25 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
goto out;
memset(vsocket, 0, sizeof(struct vhost_user_socket));
vsocket->path = strdup(path);
- vsocket->connfd = -1;
+ TAILQ_INIT(&vsocket->conn_list);
+ pthread_mutex_init(&vsocket->conn_mutex, NULL);
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+ /*
+ * Set the supported features correctly for the builtin vhost-user
+ * net driver.
+ *
+ * Applications know nothing about features the builtin virtio net
+ * driver (virtio_net.c) supports, thus it's not possible for them
+ * to invoke rte_vhost_driver_set_features(). To workaround it, here
+ * we set it unconditionally. If the application want to implement
+ * another vhost-user driver (say SCSI), it should call the
+ * rte_vhost_driver_set_features(), which will overwrite following
+ * two values.
+ */
+ vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
+ vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
+
if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
if (vsocket->reconnect && reconn_tid == 0) {
@@ -514,11 +641,10 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
goto out;
}
}
- ret = vhost_user_create_client(vsocket);
} else {
vsocket->is_server = true;
- ret = vhost_user_create_server(vsocket);
}
+ ret = create_unix_socket(vsocket);
if (ret < 0) {
free(vsocket->path);
free(vsocket);
@@ -565,7 +691,7 @@ rte_vhost_driver_unregister(const char *path)
{
int i;
int count;
- struct vhost_user_connection *conn;
+ struct vhost_user_connection *conn, *next;
pthread_mutex_lock(&vhost_user.mutex);
@@ -574,22 +700,29 @@ rte_vhost_driver_unregister(const char *path)
if (!strcmp(vsocket->path, path)) {
if (vsocket->is_server) {
- fdset_del(&vhost_user.fdset, vsocket->listenfd);
- close(vsocket->listenfd);
+ fdset_del(&vhost_user.fdset, vsocket->socket_fd);
+ close(vsocket->socket_fd);
unlink(path);
} else if (vsocket->reconnect) {
vhost_user_remove_reconnect(vsocket);
}
- conn = fdset_del(&vhost_user.fdset, vsocket->connfd);
- if (conn) {
+ pthread_mutex_lock(&vsocket->conn_mutex);
+ for (conn = TAILQ_FIRST(&vsocket->conn_list);
+ conn != NULL;
+ conn = next) {
+ next = TAILQ_NEXT(conn, next);
+
+ fdset_del(&vhost_user.fdset, conn->connfd);
RTE_LOG(INFO, VHOST_CONFIG,
"free connfd = %d for device '%s'\n",
- vsocket->connfd, path);
- close(vsocket->connfd);
+ conn->connfd, path);
+ close(conn->connfd);
vhost_destroy_device(conn->vid);
+ TAILQ_REMOVE(&vsocket->conn_list, conn, next);
free(conn);
}
+ pthread_mutex_unlock(&vsocket->conn_mutex);
free(vsocket->path);
free(vsocket);
@@ -607,9 +740,59 @@ rte_vhost_driver_unregister(const char *path)
return -1;
}
+/*
+ * Register ops so that we can add/remove device to data core.
+ */
+int
+rte_vhost_driver_callback_register(const char *path,
+ struct vhost_device_ops const * const ops)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ vsocket->notify_ops = ops;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+struct vhost_device_ops const *
+vhost_driver_callback_get(const char *path)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? vsocket->notify_ops : NULL;
+}
+
int
-rte_vhost_driver_session_start(void)
+rte_vhost_driver_start(const char *path)
{
- fdset_event_dispatch(&vhost_user.fdset);
- return 0;
+ struct vhost_user_socket *vsocket;
+ static pthread_t fdset_tid;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ if (!vsocket)
+ return -1;
+
+ if (fdset_tid == 0) {
+ int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
+ &vhost_user.fdset);
+ if (ret < 0)
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to create fdset handling thread");
+ }
+
+ if (vsocket->is_server)
+ return vhost_user_start_server(vsocket);
+ else
+ return vhost_user_start_client(vsocket);
}
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index e4150934..0b19d2eb 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -45,36 +45,12 @@
#include <rte_string_fns.h>
#include <rte_memory.h>
#include <rte_malloc.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include "vhost.h"
-#define VHOST_USER_F_PROTOCOL_FEATURES 30
-
-/* Features supported by this lib. */
-#define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
- (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
- (1ULL << VIRTIO_NET_F_CTRL_RX) | \
- (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
- (VHOST_SUPPORTS_MQ) | \
- (1ULL << VIRTIO_F_VERSION_1) | \
- (1ULL << VHOST_F_LOG_ALL) | \
- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
- (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
- (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
- (1ULL << VIRTIO_NET_F_CSUM) | \
- (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
- (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
- (1ULL << VIRTIO_RING_F_INDIRECT_DESC))
-
-uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
-
struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-/* device ops to add/remove device to/from data core. */
-struct virtio_net_device_ops const *notify_ops;
-
struct virtio_net *
get_device(int vid)
{
@@ -108,10 +84,8 @@ cleanup_device(struct virtio_net *dev, int destroy)
vhost_backend_cleanup(dev);
- for (i = 0; i < dev->virt_qp_nb; i++) {
- cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy);
- cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy);
- }
+ for (i = 0; i < dev->nr_vring; i++)
+ cleanup_vq(dev->virtqueue[i], destroy);
}
/*
@@ -121,24 +95,21 @@ static void
free_device(struct virtio_net *dev)
{
uint32_t i;
- struct vhost_virtqueue *rxq, *txq;
+ struct vhost_virtqueue *vq;
- for (i = 0; i < dev->virt_qp_nb; i++) {
- rxq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
- txq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
- rte_free(rxq->shadow_used_ring);
- rte_free(txq->shadow_used_ring);
+ rte_free(vq->shadow_used_ring);
- /* rxq and txq are allocated together as queue-pair */
- rte_free(rxq);
+ rte_free(vq);
}
rte_free(dev);
}
static void
-init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
+init_vring_queue(struct vhost_virtqueue *vq)
{
memset(vq, 0, sizeof(struct vhost_virtqueue));
@@ -148,69 +119,48 @@ init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
/* Backends are set to -1 indicating an inactive device. */
vq->backend = -1;
- /* always set the default vq pair to enabled */
- if (qp_idx == 0)
- vq->enabled = 1;
+ /*
+ * always set the vq to enabled; this is to keep compatibility
+ * with the old QEMU, whereas there is no SET_VRING_ENABLE message.
+ */
+ vq->enabled = 1;
TAILQ_INIT(&vq->zmbuf_list);
}
static void
-init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
-{
- uint32_t base_idx = qp_idx * VIRTIO_QNUM;
-
- init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
- init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
-}
-
-static void
-reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
+reset_vring_queue(struct vhost_virtqueue *vq)
{
int callfd;
callfd = vq->callfd;
- init_vring_queue(vq, qp_idx);
+ init_vring_queue(vq);
vq->callfd = callfd;
}
-static void
-reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
-{
- uint32_t base_idx = qp_idx * VIRTIO_QNUM;
-
- reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
- reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
-}
-
int
-alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
+alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
{
- struct vhost_virtqueue *virtqueue = NULL;
- uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
- uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
+ struct vhost_virtqueue *vq;
- virtqueue = rte_malloc(NULL,
- sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0);
- if (virtqueue == NULL) {
+ vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
+ if (vq == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
- "Failed to allocate memory for virt qp:%d.\n", qp_idx);
+ "Failed to allocate memory for vring:%u.\n", vring_idx);
return -1;
}
- dev->virtqueue[virt_rx_q_idx] = virtqueue;
- dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
-
- init_vring_queue_pair(dev, qp_idx);
+ dev->virtqueue[vring_idx] = vq;
+ init_vring_queue(vq);
- dev->virt_qp_nb += 1;
+ dev->nr_vring += 1;
return 0;
}
/*
* Reset some variables in device structure, while keeping few
- * others untouched, such as vid, ifname, virt_qp_nb: they
+ * others untouched, such as vid, ifname, nr_vring: they
* should be same unless the device is removed.
*/
void
@@ -222,8 +172,8 @@ reset_device(struct virtio_net *dev)
dev->protocol_features = 0;
dev->flags = 0;
- for (i = 0; i < dev->virt_qp_nb; i++)
- reset_vring_queue_pair(dev, i);
+ for (i = 0; i < dev->nr_vring; i++)
+ reset_vring_queue(dev->virtqueue[i]);
}
/*
@@ -274,7 +224,7 @@ vhost_destroy_device(int vid)
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(vid);
+ dev->notify_ops->destroy_device(vid);
}
cleanup_device(dev, 1);
@@ -312,6 +262,25 @@ vhost_enable_dequeue_zero_copy(int vid)
}
int
+rte_vhost_get_mtu(int vid, uint16_t *mtu)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!(dev->flags & VIRTIO_DEV_READY))
+ return -EAGAIN;
+
+ if (!(dev->features & VIRTIO_NET_F_MTU))
+ return -ENOTSUP;
+
+ *mtu = dev->mtu;
+
+ return 0;
+}
+
+int
rte_vhost_get_numa_node(int vid)
{
#ifdef RTE_LIBRTE_VHOST_NUMA
@@ -345,7 +314,18 @@ rte_vhost_get_queue_num(int vid)
if (dev == NULL)
return 0;
- return dev->virt_qp_nb;
+ return dev->nr_vring / 2;
+}
+
+uint16_t
+rte_vhost_get_vring_num(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return 0;
+
+ return dev->nr_vring;
}
int
@@ -364,6 +344,72 @@ rte_vhost_get_ifname(int vid, char *buf, size_t len)
return 0;
}
+int
+rte_vhost_get_negotiated_features(int vid, uint64_t *features)
+{
+ struct virtio_net *dev;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ *features = dev->features;
+ return 0;
+}
+
+int
+rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
+{
+ struct virtio_net *dev;
+ struct rte_vhost_memory *m;
+ size_t size;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
+ m = malloc(size);
+ if (!m)
+ return -1;
+
+ m->nregions = dev->mem->nregions;
+ memcpy(m->regions, dev->mem->regions, size);
+ *mem = m;
+
+ return 0;
+}
+
+int
+rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
+ struct rte_vhost_vring *vring)
+{
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -1;
+
+ if (vring_idx >= VHOST_MAX_VRING)
+ return -1;
+
+ vq = dev->virtqueue[vring_idx];
+ if (!vq)
+ return -1;
+
+ vring->desc = vq->desc;
+ vring->avail = vq->avail;
+ vring->used = vq->used;
+ vring->log_guest_addr = vq->log_guest_addr;
+
+ vring->callfd = vq->callfd;
+ vring->kickfd = vq->kickfd;
+ vring->size = vq->size;
+
+ return 0;
+}
+
uint16_t
rte_vhost_avail_entries(int vid, uint16_t queue_id)
{
@@ -399,33 +445,33 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
return 0;
}
-uint64_t rte_vhost_feature_get(void)
+void
+rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
{
- return VHOST_FEATURES;
-}
+ struct virtio_net *dev = get_device(vid);
-int rte_vhost_feature_disable(uint64_t feature_mask)
-{
- VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
- return 0;
-}
+ if (dev == NULL)
+ return;
-int rte_vhost_feature_enable(uint64_t feature_mask)
-{
- if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
- VHOST_FEATURES = VHOST_FEATURES | feature_mask;
- return 0;
- }
- return -1;
+ vhost_log_write(dev, addr, len);
}
-/*
- * Register ops so that we can add/remove device to data core.
- */
-int
-rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)
+void
+rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
+ uint64_t offset, uint64_t len)
{
- notify_ops = ops;
+ struct virtio_net *dev;
+ struct vhost_virtqueue *vq;
- return 0;
+ dev = get_device(vid);
+ if (dev == NULL)
+ return;
+
+ if (vring_idx >= VHOST_MAX_VRING)
+ return;
+ vq = dev->virtqueue[vring_idx];
+ if (!vq)
+ return;
+
+ vhost_log_used_vring(dev, vq, offset, len);
}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 22564f1c..ddd8a9c4 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -39,13 +39,19 @@
#include <sys/queue.h>
#include <unistd.h>
#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <sys/socket.h>
+#include <linux/if.h>
#include <rte_log.h>
+#include <rte_ether.h>
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
/* Used to indicate that the device is running on a data core */
#define VIRTIO_DEV_RUNNING 1
+/* Used to indicate that the device is ready to operate */
+#define VIRTIO_DEV_READY 2
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
@@ -110,24 +116,20 @@ struct vhost_virtqueue {
uint16_t shadow_used_idx;
} __rte_cache_aligned;
-/* Old kernels have no such macro defined */
+/* Old kernels have no such macros defined */
#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
#define VIRTIO_NET_F_GUEST_ANNOUNCE 21
#endif
+#ifndef VIRTIO_NET_F_MQ
+ #define VIRTIO_NET_F_MQ 22
+#endif
-/*
- * Make an extra wrapper for VIRTIO_NET_F_MQ and
- * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are
- * introduced since kernel v3.8. This makes our
- * code buildable for older kernel.
- */
-#ifdef VIRTIO_NET_F_MQ
- #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
- #define VHOST_SUPPORTS_MQ (1ULL << VIRTIO_NET_F_MQ)
-#else
- #define VHOST_MAX_QUEUE_PAIRS 1
- #define VHOST_SUPPORTS_MQ 0
+#define VHOST_MAX_VRING 0x100
+#define VHOST_MAX_QUEUE_PAIRS 0x80
+
+#ifndef VIRTIO_NET_F_MTU
+ #define VIRTIO_NET_F_MTU 3
#endif
/*
@@ -137,6 +139,27 @@ struct vhost_virtqueue {
#define VIRTIO_F_VERSION_1 32
#endif
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+/* Features supported by this builtin vhost-user net driver. */
+#define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+ (1ULL << VIRTIO_NET_F_CTRL_RX) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+ (1ULL << VIRTIO_NET_F_MQ) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VHOST_F_LOG_ALL) | \
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
+ (1ULL << VIRTIO_NET_F_MTU))
+
+
struct guest_page {
uint64_t guest_phys_addr;
uint64_t host_phys_addr;
@@ -149,7 +172,7 @@ struct guest_page {
*/
struct virtio_net {
/* Frontend (QEMU) memory and memory region information */
- struct virtio_memory *mem;
+ struct rte_vhost_memory *mem;
uint64_t features;
uint64_t protocol_features;
int vid;
@@ -157,7 +180,7 @@ struct virtio_net {
uint16_t vhost_hlen;
/* to tell if we need broadcast rarp packet */
rte_atomic16_t broadcast_rarp;
- uint32_t virt_qp_nb;
+ uint32_t nr_vring;
int dequeue_zero_copy;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
@@ -166,35 +189,52 @@ struct virtio_net {
uint64_t log_base;
uint64_t log_addr;
struct ether_addr mac;
+ uint16_t mtu;
+
+ struct vhost_device_ops const *notify_ops;
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
struct guest_page *guest_pages;
} __rte_cache_aligned;
-/**
- * Information relating to memory regions including offsets to
- * addresses in QEMUs memory file.
- */
-struct virtio_memory_region {
- uint64_t guest_phys_addr;
- uint64_t guest_user_addr;
- uint64_t host_user_addr;
- uint64_t size;
- void *mmap_addr;
- uint64_t mmap_size;
- int fd;
-};
+#define VHOST_LOG_PAGE 4096
-/**
- * Memory structure includes region and mapping information.
- */
-struct virtio_memory {
- uint32_t nregions;
- struct virtio_memory_region regions[0];
-};
+static inline void __attribute__((always_inline))
+vhost_log_page(uint8_t *log_base, uint64_t page)
+{
+ log_base[page / 8] |= 1 << (page % 8);
+}
+
+static inline void __attribute__((always_inline))
+vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+ uint64_t page;
+
+ if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+ !dev->log_base || !len))
+ return;
+
+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+ return;
+
+ /* To make sure guest memory updates are committed before logging */
+ rte_smp_wmb();
+
+ page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len) {
+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+ page += 1;
+ }
+}
+static inline void __attribute__((always_inline))
+vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t offset, uint64_t len)
+{
+ vhost_log_write(dev, vq->log_guest_addr + offset, len);
+}
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
@@ -231,25 +271,6 @@ extern uint64_t VHOST_FEATURES;
#define MAX_VHOST_DEVICE 1024
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-/* Convert guest physical Address to host virtual address */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
-{
- struct virtio_memory_region *reg;
- uint32_t i;
-
- for (i = 0; i < dev->mem->nregions; i++) {
- reg = &dev->mem->regions[i];
- if (gpa >= reg->guest_phys_addr &&
- gpa < reg->guest_phys_addr + reg->size) {
- return gpa - reg->guest_phys_addr +
- reg->host_user_addr;
- }
- }
-
- return 0;
-}
-
/* Convert guest physical address to host physical address */
static inline phys_addr_t __attribute__((always_inline))
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
@@ -270,7 +291,6 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
return 0;
}
-struct virtio_net_device_ops const *notify_ops;
struct virtio_net *get_device(int vid);
int vhost_new_device(void);
@@ -278,11 +298,13 @@ void cleanup_device(struct virtio_net *dev, int destroy);
void reset_device(struct virtio_net *dev);
void vhost_destroy_device(int);
-int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx);
+int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
+struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
+
/*
* Backend-specific cleanup.
*
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 0cb1c677..5c8058b6 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -51,6 +51,9 @@
#include "vhost.h"
#include "vhost_user.h"
+#define VIRTIO_MIN_MTU 68
+#define VIRTIO_MAX_MTU 65535
+
static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_NONE] = "VHOST_USER_NONE",
[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
@@ -72,6 +75,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
[VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
+ [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
};
static uint64_t
@@ -88,7 +92,7 @@ static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
if (!dev || !dev->mem)
return;
@@ -131,7 +135,7 @@ vhost_user_reset_owner(struct virtio_net *dev)
{
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev->vid);
+ dev->notify_ops->destroy_device(dev->vid);
}
cleanup_device(dev, 0);
@@ -143,9 +147,12 @@ vhost_user_reset_owner(struct virtio_net *dev)
* The features that we support are requested.
*/
static uint64_t
-vhost_user_get_features(void)
+vhost_user_get_features(struct virtio_net *dev)
{
- return VHOST_FEATURES;
+ uint64_t features = 0;
+
+ rte_vhost_driver_get_features(dev->ifname, &features);
+ return features;
}
/*
@@ -154,9 +161,17 @@ vhost_user_get_features(void)
static int
vhost_user_set_features(struct virtio_net *dev, uint64_t features)
{
- if (features & ~VHOST_FEATURES)
+ uint64_t vhost_features = 0;
+
+ rte_vhost_driver_get_features(dev->ifname, &vhost_features);
+ if (features & ~vhost_features)
return -1;
+ if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
+ if (dev->notify_ops->features_changed)
+ dev->notify_ops->features_changed(dev->vid, features);
+ }
+
dev->features = features;
if (dev->features &
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
@@ -223,12 +238,7 @@ numa_realloc(struct virtio_net *dev, int index)
struct vhost_virtqueue *old_vq, *vq;
int ret;
- /*
- * vq is allocated on pairs, we should try to do realloc
- * on first queue of one queue pair only.
- */
- if (index % VIRTIO_QNUM != 0)
- return dev;
+ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
old_dev = dev;
vq = old_vq = dev->virtqueue[index];
@@ -247,8 +257,7 @@ numa_realloc(struct virtio_net *dev, int index)
if (oldnode != newnode) {
RTE_LOG(INFO, VHOST_CONFIG,
"reallocate vq from %d to %d node\n", oldnode, newnode);
- vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0,
- newnode);
+ vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
if (!vq)
return dev;
@@ -280,7 +289,6 @@ numa_realloc(struct virtio_net *dev, int index)
out:
dev->virtqueue[index] = vq;
- dev->virtqueue[index + 1] = vq + 1;
vhost_devices[dev->vid] = dev;
return dev;
@@ -300,7 +308,7 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
static uint64_t
qva_to_vva(struct virtio_net *dev, uint64_t qva)
{
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
uint32_t i;
/* Find the region where the address lives. */
@@ -428,7 +436,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
}
static void
-add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg,
+add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t page_size)
{
uint64_t reg_size = reg->size;
@@ -488,7 +496,7 @@ static int
vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
struct VhostUserMemory memory = pmsg->payload.memory;
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
void *mmap_addr;
uint64_t mmap_size;
uint64_t mmap_offset;
@@ -496,12 +504,6 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
uint32_t i;
int fd;
- /* Remove from the data plane. */
- if (dev->flags & VIRTIO_DEV_RUNNING) {
- dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev->vid);
- }
-
if (dev->mem) {
free_mem_region(dev);
rte_free(dev->mem);
@@ -515,8 +517,8 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
sizeof(struct guest_page));
}
- dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct virtio_memory) +
- sizeof(struct virtio_memory_region) * memory.nregions, 0);
+ dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
+ sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
@@ -611,18 +613,17 @@ vq_is_ready(struct vhost_virtqueue *vq)
static int
virtio_is_ready(struct virtio_net *dev)
{
- struct vhost_virtqueue *rvq, *tvq;
+ struct vhost_virtqueue *vq;
uint32_t i;
- for (i = 0; i < dev->virt_qp_nb; i++) {
- rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
- tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
+ if (dev->nr_vring == 0)
+ return 0;
- if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "virtio is not ready for processing.\n");
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
+
+ if (!vq_is_ready(vq))
return 0;
- }
}
RTE_LOG(INFO, VHOST_CONFIG,
@@ -635,7 +636,6 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
- uint32_t cur_qp_idx;
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
@@ -645,29 +645,13 @@ vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
RTE_LOG(INFO, VHOST_CONFIG,
"vring call idx:%d file:%d\n", file.index, file.fd);
- /*
- * FIXME: VHOST_SET_VRING_CALL is the first per-vring message
- * we get, so we do vring queue pair allocation here.
- */
- cur_qp_idx = file.index / VIRTIO_QNUM;
- if (cur_qp_idx + 1 > dev->virt_qp_nb) {
- if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0)
- return;
- }
-
vq = dev->virtqueue[file.index];
- assert(vq != NULL);
-
if (vq->callfd >= 0)
close(vq->callfd);
vq->callfd = file.fd;
}
-/*
- * In vhost-user, when we receive kick message, will test whether virtio
- * device is ready for packet processing.
- */
static void
vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
@@ -686,16 +670,6 @@ vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
-
- if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (dev->dequeue_zero_copy) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "dequeue zero copy is enabled\n");
- }
-
- if (notify_ops->new_device(dev->vid) == 0)
- dev->flags |= VIRTIO_DEV_RUNNING;
- }
}
static void
@@ -726,9 +700,11 @@ vhost_user_get_vring_base(struct virtio_net *dev,
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev->vid);
+ dev->notify_ops->destroy_device(dev->vid);
}
+ dev->flags &= ~VIRTIO_DEV_READY;
+
/* Here we are safe to get the last used index */
state->num = vq->last_used_idx;
@@ -766,8 +742,8 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
"set queue enable: %d to qp idx: %d\n",
enable, state->index);
- if (notify_ops->vring_state_changed)
- notify_ops->vring_state_changed(dev->vid, state->index, enable);
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(dev->vid, state->index, enable);
dev->virtqueue[state->index]->enabled = enable;
@@ -865,6 +841,22 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
return 0;
}
+static int
+vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+ if (msg->payload.u64 < VIRTIO_MIN_MTU ||
+ msg->payload.u64 > VIRTIO_MAX_MTU) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
+ msg->payload.u64);
+
+ return -1;
+ }
+
+ dev->mtu = msg->payload.u64;
+
+ return 0;
+}
+
/* return bytes# of read on success or negative val on failure. */
static int
read_vhost_message(int sockfd, struct VhostUserMsg *msg)
@@ -904,6 +896,7 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
return 0;
msg->flags &= ~VHOST_USER_VERSION_MASK;
+ msg->flags &= ~VHOST_USER_NEED_REPLY;
msg->flags |= VHOST_USER_VERSION;
msg->flags |= VHOST_USER_REPLY_MASK;
@@ -913,6 +906,44 @@ send_vhost_message(int sockfd, struct VhostUserMsg *msg)
return ret;
}
+/*
+ * Allocate a queue pair if it hasn't been allocated yet
+ */
+static int
+vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
+{
+ uint16_t vring_idx;
+
+ switch (msg->request) {
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ break;
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_ENABLE:
+ vring_idx = msg->payload.state.index;
+ break;
+ case VHOST_USER_SET_VRING_ADDR:
+ vring_idx = msg->payload.addr.index;
+ break;
+ default:
+ return 0;
+ }
+
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "invalid vring index: %u\n", vring_idx);
+ return -1;
+ }
+
+ if (dev->virtqueue[vring_idx])
+ return 0;
+
+ return alloc_vring_queue(dev, vring_idx);
+}
+
int
vhost_user_msg_handler(int vid, int fd)
{
@@ -924,6 +955,16 @@ vhost_user_msg_handler(int vid, int fd)
if (dev == NULL)
return -1;
+ if (!dev->notify_ops) {
+ dev->notify_ops = vhost_driver_callback_get(dev->ifname);
+ if (!dev->notify_ops) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get callback ops for driver %s\n",
+ dev->ifname);
+ return -1;
+ }
+ }
+
ret = read_vhost_message(fd, &msg);
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
if (ret < 0)
@@ -939,11 +980,20 @@ vhost_user_msg_handler(int vid, int fd)
return -1;
}
+ ret = 0;
RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
vhost_message_str[msg.request]);
+
+ ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to alloc queue\n");
+ return -1;
+ }
+
switch (msg.request) {
case VHOST_USER_GET_FEATURES:
- msg.payload.u64 = vhost_user_get_features();
+ msg.payload.u64 = vhost_user_get_features(dev);
msg.size = sizeof(msg.payload.u64);
send_vhost_message(fd, &msg);
break;
@@ -968,7 +1018,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_MEM_TABLE:
- vhost_user_set_mem_table(dev, &msg);
+ ret = vhost_user_set_mem_table(dev, &msg);
break;
case VHOST_USER_SET_LOG_BASE:
@@ -994,7 +1044,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_GET_VRING_BASE:
- ret = vhost_user_get_vring_base(dev, &msg.payload.state);
+ vhost_user_get_vring_base(dev, &msg.payload.state);
msg.size = sizeof(msg.payload.state);
send_vhost_message(fd, &msg);
break;
@@ -1025,10 +1075,35 @@ vhost_user_msg_handler(int vid, int fd)
vhost_user_send_rarp(dev, &msg);
break;
+ case VHOST_USER_NET_SET_MTU:
+ ret = vhost_user_net_set_mtu(dev, &msg);
+ break;
+
default:
+ ret = -1;
break;
}
+ if (msg.flags & VHOST_USER_NEED_REPLY) {
+ msg.payload.u64 = !!ret;
+ msg.size = sizeof(msg.payload.u64);
+ send_vhost_message(fd, &msg);
+ }
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
+ dev->flags |= VIRTIO_DEV_READY;
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (dev->dequeue_zero_copy) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "dequeue zero copy is enabled\n");
+ }
+
+ if (dev->notify_ops->new_device(dev->vid) == 0)
+ dev->flags |= VIRTIO_DEV_RUNNING;
+ }
+ }
+
return 0;
}
diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h
index ba78d326..35ebd719 100644
--- a/lib/librte_vhost/vhost_user.h
+++ b/lib/librte_vhost/vhost_user.h
@@ -37,7 +37,7 @@
#include <stdint.h>
#include <linux/vhost.h>
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
/* refer to hw/virtio/vhost-user.c */
@@ -46,10 +46,18 @@
#define VHOST_USER_PROTOCOL_F_MQ 0
#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
#define VHOST_USER_PROTOCOL_F_RARP 2
+#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
+#define VHOST_USER_PROTOCOL_F_NET_MTU 4
+/*
+ * disable REPLY_ACK feature to workaround the buggy QEMU implementation.
+ * Proved buggy QEMU includes v2.7 - v2.9.
+ */
#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
- (1ULL << VHOST_USER_PROTOCOL_F_RARP))
+ (1ULL << VHOST_USER_PROTOCOL_F_RARP) | \
+ (0ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))
typedef enum VhostUserRequest {
VHOST_USER_NONE = 0,
@@ -72,6 +80,7 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_QUEUE_NUM = 17,
VHOST_USER_SET_VRING_ENABLE = 18,
VHOST_USER_SEND_RARP = 19,
+ VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_MAX
} VhostUserRequest;
@@ -98,6 +107,7 @@ typedef struct VhostUserMsg {
#define VHOST_USER_VERSION_MASK 0x3
#define VHOST_USER_REPLY_MASK (0x1 << 2)
+#define VHOST_USER_NEED_REPLY (0x1 << 3)
uint32_t flags;
uint32_t size; /* the following payload size */
union {
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 337470d6..48219e05 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -39,7 +39,7 @@
#include <rte_memcpy.h>
#include <rte_ether.h>
#include <rte_ip.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_sctp.h>
@@ -48,47 +48,11 @@
#include "vhost.h"
#define MAX_PKT_BURST 32
-#define VHOST_LOG_PAGE 4096
-
-static inline void __attribute__((always_inline))
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
- log_base[page / 8] |= 1 << (page % 8);
-}
-
-static inline void __attribute__((always_inline))
-vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
-{
- uint64_t page;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base || !len))
- return;
-
- if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
- return;
-
- /* To make sure guest memory updates are committed before logging */
- rte_smp_wmb();
-
- page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
- page += 1;
- }
-}
-
-static inline void __attribute__((always_inline))
-vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t offset, uint64_t len)
-{
- vhost_log_write(dev, vq->log_guest_addr + offset, len);
-}
static bool
-is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
+is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
- return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
+ return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
static inline void __attribute__((always_inline))
@@ -141,6 +105,12 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
vq->shadow_used_ring[i].len = len;
}
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do { \
+ if ((var) != (val)) \
+ (var) = (val); \
+} while (0)
+
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
@@ -162,6 +132,10 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
cksum));
break;
}
+ } else {
+ ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
}
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
@@ -172,19 +146,13 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
}
}
-static inline void
-copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
- struct virtio_net_hdr_mrg_rxbuf hdr)
-{
- if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
- *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
- else
- *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
-}
-
static inline int __attribute__((always_inline))
copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
@@ -194,12 +162,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
uint32_t cpy_len;
struct vring_desc *desc;
uint64_t desc_addr;
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
/* A counter to avoid desc dead loop chain */
uint16_t nr_desc = 1;
desc = &descs[desc_idx];
- desc_addr = gpa_to_vva(dev, desc->addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
/*
* Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
* performance issue with some versions of gcc (4.8.4 and 5.3.0) which
@@ -210,8 +177,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
rte_prefetch0((void *)(uintptr_t)desc_addr);
- virtio_enqueue_offload(m, &virtio_hdr.hdr);
- copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
vhost_log_write(dev, desc->addr, dev->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
@@ -239,7 +205,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
return -1;
desc = &descs[desc->next];
- desc_addr = gpa_to_vva(dev, desc->addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
if (unlikely(!desc_addr))
return -1;
@@ -283,7 +249,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
uint32_t i, sz;
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
return 0;
@@ -323,7 +289,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
int err;
if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
- descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+ descs = (struct vring_desc *)(uintptr_t)
+ rte_vhost_gpa_to_vva(dev->mem,
vq->desc[desc_idx].addr);
if (unlikely(!descs)) {
count = i;
@@ -383,7 +350,7 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
descs = (struct vring_desc *)(uintptr_t)
- gpa_to_vva(dev, vq->desc[idx].addr);
+ rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
if (unlikely(!descs))
return -1;
@@ -461,7 +428,6 @@ static inline int __attribute__((always_inline))
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
struct buf_vector *buf_vec, uint16_t num_buffers)
{
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint32_t vec_idx = 0;
uint64_t desc_addr;
uint32_t mbuf_offset, mbuf_avail;
@@ -473,7 +439,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
if (unlikely(m == NULL))
return -1;
- desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
return -1;
@@ -482,7 +448,6 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
hdr_phys_addr = buf_vec[vec_idx].buf_addr;
rte_prefetch0((void *)(uintptr_t)hdr_addr);
- virtio_hdr.num_buffers = num_buffers;
LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
dev->vid, num_buffers);
@@ -495,7 +460,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
/* done with current desc buf, get the next one */
if (desc_avail == 0) {
vec_idx++;
- desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem,
+ buf_vec[vec_idx].buf_addr);
if (unlikely(!desc_addr))
return -1;
@@ -514,8 +480,13 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
}
if (hdr_addr) {
- virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr);
- copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
+ hdr_addr;
+ virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
+ ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
+
vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)hdr_addr,
dev->vhost_hlen, 0);
@@ -552,7 +523,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
uint16_t avail_head;
LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
return 0;
@@ -663,14 +634,14 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
switch (ethertype) {
case ETHER_TYPE_IPv4:
- ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
+ ipv4_hdr = l3_hdr;
*l4_proto = ipv4_hdr->next_proto_id;
m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
*l4_hdr = (char *)l3_hdr + m->l3_len;
m->ol_flags |= PKT_TX_IPV4;
break;
case ETHER_TYPE_IPv6:
- ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
+ ipv6_hdr = l3_hdr;
*l4_proto = ipv6_hdr->proto;
m->l3_len = sizeof(struct ipv6_hdr);
*l4_hdr = (char *)l3_hdr + m->l3_len;
@@ -720,7 +691,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- tcp_hdr = (struct tcp_hdr *)l4_hdr;
+ tcp_hdr = l4_hdr;
m->ol_flags |= PKT_TX_TCP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
@@ -798,7 +769,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
(desc->flags & VRING_DESC_F_INDIRECT))
return -1;
- desc_addr = gpa_to_vva(dev, desc->addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
if (unlikely(!desc_addr))
return -1;
@@ -818,7 +789,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
return -1;
- desc_addr = gpa_to_vva(dev, desc->addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
if (unlikely(!desc_addr))
return -1;
@@ -882,7 +853,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
return -1;
- desc_addr = gpa_to_vva(dev, desc->addr);
+ desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
if (unlikely(!desc_addr))
return -1;
@@ -905,6 +876,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
"allocate memory for mbuf.\n");
return -1;
}
+ if (unlikely(dev->dequeue_zero_copy))
+ rte_mbuf_refcnt_update(cur, 1);
prev->next = cur;
prev->data_len = mbuf_offset;
@@ -1017,7 +990,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
if (!dev)
return 0;
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
dev->vid, __func__, queue_id);
return 0;
@@ -1056,9 +1029,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
* array, to looks like that guest actually send such packet.
*
* Check user_send_rarp() for more information.
+ *
+ * broadcast_rarp shares a cacheline in the virtio_net structure
+ * with some fields that are accessed during enqueue and
+ * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
+ * result in false sharing between enqueue and dequeue.
+ *
+ * Prevent unnecessary false sharing by reading broadcast_rarp first
+ * and only performing cmpset if the read indicates it is likely to
+ * be set.
*/
- if (unlikely(rte_atomic16_cmpset((volatile uint16_t *)
- &dev->broadcast_rarp.cnt, 1, 0))) {
+
+ if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
+ rte_atomic16_cmpset((volatile uint16_t *)
+ &dev->broadcast_rarp.cnt, 1, 0))) {
+
rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
if (rarp_mbuf == NULL) {
RTE_LOG(ERR, VHOST_DATA,
@@ -1113,7 +1098,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
- desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+ desc = (struct vring_desc *)(uintptr_t)
+ rte_vhost_gpa_to_vva(dev->mem,
vq->desc[desc_indexes[i]].addr);
if (unlikely(!desc))
break;
diff --git a/mk/internal/rte.install-post.mk b/mk/internal/rte.install-post.mk
index 77addee2..b99e2b2f 100644
--- a/mk/internal/rte.install-post.mk
+++ b/mk/internal/rte.install-post.mk
@@ -59,7 +59,7 @@ define symlink_rule
$(addprefix $(RTE_OUTPUT)/$(1)/,$(notdir $(2))): $(2)
@echo " SYMLINK-FILE $(addprefix $(1)/,$(notdir $(2)))"
@[ -d $(RTE_OUTPUT)/$(1) ] || mkdir -p $(RTE_OUTPUT)/$(1)
- $(Q)ln -nsf `$(RTE_SDK)/scripts/relpath.sh $$(<) $(RTE_OUTPUT)/$(1)` \
+ $(Q)ln -nsf `$(RTE_SDK)/buildtools/relpath.sh $$(<) $(RTE_OUTPUT)/$(1)` \
$(RTE_OUTPUT)/$(1)
endef
diff --git a/mk/machine/armv7a/rte.vars.mk b/mk/machine/armv7a/rte.vars.mk
index 36fa3dea..41c4c408 100644
--- a/mk/machine/armv7a/rte.vars.mk
+++ b/mk/machine/armv7a/rte.vars.mk
@@ -57,7 +57,7 @@
MACHINE_CFLAGS += -march=armv7-a
ifdef CONFIG_RTE_ARCH_ARM_TUNE
-MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE)
+MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
endif
MACHINE_CFLAGS += -mfpu=neon
diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
index 8541633c..a60819f3 100644
--- a/mk/machine/dpaa2/rte.vars.mk
+++ b/mk/machine/dpaa2/rte.vars.mk
@@ -1,6 +1,7 @@
# BSD LICENSE
#
-# Copyright(c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -53,8 +54,8 @@
# CPU_CFLAGS =
# CPU_LDFLAGS =
# CPU_ASFLAGS =
-MACHINE_CFLAGS += -march=armv8-a
+MACHINE_CFLAGS += -march=armv8-a+crc
ifdef CONFIG_RTE_ARCH_ARM_TUNE
-MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE)
+MACHINE_CFLAGS += -mcpu=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
endif
diff --git a/mk/machine/thunderx/rte.vars.mk b/mk/machine/thunderx/rte.vars.mk
index 81da1b4c..ad5a379b 100644
--- a/mk/machine/thunderx/rte.vars.mk
+++ b/mk/machine/thunderx/rte.vars.mk
@@ -47,7 +47,7 @@
#
# ARCH =
-CROSS ?= aarch64-thunderx-linux-gnu-
+# CROSS =
# MACHINE_CFLAGS =
# MACHINE_LDFLAGS =
# MACHINE_ASFLAGS =
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index f75f0e24..bcaf1b38 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# Copyright(c) 2014-2015 6WIND S.A.
# All rights reserved.
#
@@ -34,7 +34,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -59,18 +58,12 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
#
# Order is important: from higher level to lower level
#
-
-ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
-endif
-
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
_LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
-_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lrte_meter
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrte_sched
@@ -80,62 +73,81 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += -lrte_acl
_LDLIBS-$(CONFIG_RTE_LIBRTE_ACL) += --no-whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += -lrte_jobstats
+_LDLIBS-$(CONFIG_RTE_LIBRTE_METRICS) += -lrte_metrics
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BITRATE) += -lrte_bitratestats
+_LDLIBS-$(CONFIG_RTE_LIBRTE_LATENCY_STATS) += -lrte_latencystats
_LDLIBS-$(CONFIG_RTE_LIBRTE_POWER) += -lrte_power
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EFD) += -lrte_efd
+_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
+
_LDLIBS-y += --whole-archive
-_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
_LDLIBS-$(CONFIG_RTE_LIBRTE_HASH) += -lrte_hash
_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lrte_vhost
-
_LDLIBS-$(CONFIG_RTE_LIBRTE_KVARGS) += -lrte_kvargs
_LDLIBS-$(CONFIG_RTE_LIBRTE_MBUF) += -lrte_mbuf
_LDLIBS-$(CONFIG_RTE_LIBRTE_NET) += -lrte_net
_LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lrte_ethdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool
+_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += -lrte_mempool_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_RING) += -lrte_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrte_eal
_LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE) += -lrte_cmdline
-_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
+_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER) += -lrte_reorder
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
+endif
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# plugins (link only if static libraries)
+_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += -lrte_mempool_stack
+
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
+_LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += -lrte_pmd_ark
+_LDLIBS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += -lrte_pmd_avp
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
_LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += -lrte_pmd_cxgbe
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2
_LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += -lrte_pmd_enic
_LDLIBS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += -lrte_pmd_fm10k
_LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += -lrte_pmd_i40e
_LDLIBS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += -lrte_pmd_ixgbe
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += -lrte_pmd_kni
+endif
+_LDLIBS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += -lrte_pmd_lio
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += -lrte_pmd_mpipe -lgxio
_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += -lrte_pmd_null
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap -lpcap
_LDLIBS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += -lrte_pmd_qede
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_RING) += -lrte_pmd_ring
+_LDLIBS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += -lrte_pmd_sfc_efx
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += -lrte_pmd_szedata2 -lsze2
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += -lrte_pmd_tap
_LDLIBS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += -lrte_pmd_thunderx_nicvf -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
endif # $(CONFIG_RTE_LIBRTE_VHOST)
_LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += -lrte_pmd_xenvirt -lxenstore
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lcrypto
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm -lisal_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto
@@ -145,8 +157,27 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += -lrte_pmd_kasumi
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -lrte_pmd_zuc
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -lrte_pmd_armv8
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += -lrte_pmd_crypto_scheduler
+ifeq ($(CONFIG_RTE_LIBRTE_FSLMC_BUS),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_pmd_dpaa2_sec
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_mempool_dpaa2
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_bus_fslmc
+endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
endif # CONFIG_RTE_LIBRTE_CRYPTODEV
+ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += -lrte_pmd_sw_event
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += -lrte_pmd_octeontx_ssovf
+endif # CONFIG_RTE_LIBRTE_EVENTDEV
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_bus_fslmc
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_mempool_dpaa2
+endif # CONFIG_RTE_LIBRTE_DPAA2_PMD
+
endif # !CONFIG_RTE_BUILD_SHARED_LIBS
_LDLIBS-y += --no-whole-archive
@@ -168,10 +199,21 @@ _LDLIBS-y += $(EXECENV_LDLIBS)
LDLIBS += $(_LDLIBS-y) $(CPU_LDLIBS) $(EXTRA_LDLIBS)
-# Eliminate duplicates without sorting
-LDLIBS := $(shell echo $(LDLIBS) | \
- awk '{for (i = 1; i <= NF; i++) { \
- if ($$i !~ /^-l.*/ || !seen[$$i]++) print $$i }}')
+# all the words except the first one
+allbutfirst = $(wordlist 2,$(words $(1)),$(1))
+
+# Eliminate duplicates without sorting, only keep the last occurrence
+filter-libs = \
+ $(if $(1),$(strip\
+ $(if \
+ $(and \
+ $(filter $(firstword $(1)),$(call allbutfirst,$(1))),\
+ $(filter -l%,$(firstword $(1)))),\
+ ,\
+ $(firstword $(1))) \
+ $(call filter-libs,$(call allbutfirst,$(1)))))
+
+LDLIBS := $(call filter-libs,$(LDLIBS))
ifeq ($(RTE_DEVEL_BUILD)$(CONFIG_RTE_BUILD_SHARED_LIB),yy)
LDFLAGS += -rpath=$(RTE_SDK_BIN)/lib
@@ -193,7 +235,7 @@ build: _postbuild
exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
ifeq ($(LINK_USING_CC),1)
-O_TO_EXE = $(CC) -o $@ $(CFLAGS) $(OBJS-y) $(call linkerprefix, \
+O_TO_EXE = $(CC) -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $(OBJS-y) $(call linkerprefix, \
$(LDLIBS) $(LDFLAGS) $(LDFLAGS_$(@)) $(EXTRA_LDFLAGS) \
$(MAPFLAGS))
else
@@ -268,14 +310,13 @@ clean: _postclean
.PHONY: doclean
doclean:
$(Q)rm -rf $(APP) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
- $(CMDS-all) $(INSTALL-FILES-all) .$(APP).cmd
+ $(CMDS-all) $(INSTALL-FILES-all) .$(APP).cmd $(APP).map
include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
ifneq ($(wildcard $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk),)
include $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk
diff --git a/mk/rte.bsdmodule.mk b/mk/rte.bsdmodule.mk
index 86b92ff5..6fc137ad 100644
--- a/mk/rte.bsdmodule.mk
+++ b/mk/rte.bsdmodule.mk
@@ -43,7 +43,6 @@ else
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# DPDK uses a more up-to-date gcc, so clear the override here.
unexport CC
@@ -111,7 +110,6 @@ doclean:
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.cpuflags.mk b/mk/rte.cpuflags.mk
index e634abc3..4288c147 100644
--- a/mk/rte.cpuflags.mk
+++ b/mk/rte.cpuflags.mk
@@ -70,8 +70,10 @@ CPUFLAGS += PCLMULQDQ
endif
ifneq ($(filter $(AUTO_CPUFLAGS),__AVX__),)
+ifeq ($(CONFIG_RTE_ENABLE_AVX),y)
CPUFLAGS += AVX
endif
+endif
ifneq ($(filter $(AUTO_CPUFLAGS),__RDRND__),)
CPUFLAGS += RDRAND
@@ -86,12 +88,16 @@ CPUFLAGS += F16C
endif
ifneq ($(filter $(AUTO_CPUFLAGS),__AVX2__),)
+ifeq ($(CONFIG_RTE_ENABLE_AVX),y)
CPUFLAGS += AVX2
endif
+endif
ifneq ($(filter $(AUTO_CPUFLAGS),__AVX512F__),)
+ifeq ($(CONFIG_RTE_ENABLE_AVX512),y)
CPUFLAGS += AVX512F
endif
+endif
# IBM Power CPU flags
ifneq ($(filter $(AUTO_CPUFLAGS),__PPC64__),)
diff --git a/mk/rte.extsubdir.mk b/mk/rte.extsubdir.mk
index f50f0062..d21791b0 100644
--- a/mk/rte.extsubdir.mk
+++ b/mk/rte.extsubdir.mk
@@ -30,9 +30,11 @@
MAKEFLAGS += --no-print-directory
+ALL_DEPDIRS := $(patsubst DEPDIRS-%,%,$(filter DEPDIRS-%,$(.VARIABLES)))
+
# output directory
-O ?= .
-BASE_OUTPUT ?= $(O)
+O ?= $(CURDIR)
+BASE_OUTPUT ?= $(abspath $(O))
CUR_SUBDIR ?= .
.PHONY: all
@@ -50,4 +52,16 @@ $(DIRS-y):
BASE_OUTPUT=$(BASE_OUTPUT) \
CUR_SUBDIR=$(CUR_SUBDIR)/$(@) \
S=$(CURDIR)/$(@) \
+ DEPDIRS="$(DEPDIRS-$@)" \
$(filter-out $(DIRS-y),$(MAKECMDGOALS))
+
+define depdirs_rule
+$(DEPDIRS-$(1)):
+
+$(1): | $(DEPDIRS-$(1))
+
+$(if $(D),$(info $(1) depends on $(DEPDIRS-$(1))))
+endef
+
+$(foreach dir,$(ALL_DEPDIRS),\
+ $(eval $(call depdirs_rule,$(dir))))
diff --git a/mk/rte.gnuconfigure.mk b/mk/rte.gnuconfigure.mk
index 65b658c1..b5c8df0f 100644
--- a/mk/rte.gnuconfigure.mk
+++ b/mk/rte.gnuconfigure.mk
@@ -32,7 +32,6 @@
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -68,7 +67,6 @@ doclean:
include $(RTE_SDK)/mk/internal/rte.build-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.hostapp.mk b/mk/rte.hostapp.mk
index 07b391c2..5cb4909c 100644
--- a/mk/rte.hostapp.mk
+++ b/mk/rte.hostapp.mk
@@ -35,7 +35,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -117,7 +116,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.hostlib.mk b/mk/rte.hostlib.mk
index fe24049f..7709cff7 100644
--- a/mk/rte.hostlib.mk
+++ b/mk/rte.hostlib.mk
@@ -35,7 +35,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -110,7 +109,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.install.mk b/mk/rte.install.mk
index e7ac4d5b..96144fbb 100644
--- a/mk/rte.install.mk
+++ b/mk/rte.install.mk
@@ -33,7 +33,6 @@
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -55,4 +54,3 @@ doclean:
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
index 33a5f5a1..bc2f2fbe 100644
--- a/mk/rte.lib.mk
+++ b/mk/rte.lib.mk
@@ -33,19 +33,26 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
EXTLIB_BUILD ?= n
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
+ifneq ($(CONFIG_RTE_MAJOR_ABI),)
+ifneq ($(LIBABIVER),)
+LIBABIVER := $(CONFIG_RTE_MAJOR_ABI)
+endif
+endif
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
LIB := $(patsubst %.a,%.so.$(LIBABIVER),$(LIB))
ifeq ($(EXTLIB_BUILD),n)
+ifeq ($(CONFIG_RTE_MAJOR_ABI),)
ifeq ($(CONFIG_RTE_NEXT_ABI),y)
LIB := $(LIB).1
endif
+endif
CPU_LDFLAGS += --version-script=$(SRCDIR)/$(EXPORT_MAP)
endif
endif
@@ -77,12 +84,12 @@ else
_CPU_LDFLAGS := $(CPU_LDFLAGS)
endif
-# Translate DEPDIRS-y into LDLIBS
+# Translate DEPDIRS into LDLIBS
# Ignore (sub)directory dependencies which do not provide an actual library
-_IGNORE_DIRS = lib/librte_eal/% lib/librte_compat
-_DEPDIRS = $(filter-out $(_IGNORE_DIRS),$(DEPDIRS-y))
+_IGNORE_DIRS = librte_eal/% librte_compat
+_DEPDIRS = $(filter-out $(_IGNORE_DIRS),$(DEPDIRS))
_LDDIRS = $(subst librte_ether,librte_ethdev,$(_DEPDIRS))
-LDLIBS += $(subst lib/lib,-l,$(_LDDIRS))
+LDLIBS += $(subst lib,-l,$(_LDDIRS))
O_TO_A = $(AR) crDs $(LIB) $(OBJS-y)
O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight
@@ -156,11 +163,7 @@ $(RTE_OUTPUT)/lib/$(LIB): $(LIB)
@[ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib
$(Q)cp -f $(LIB) $(RTE_OUTPUT)/lib
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
-ifeq ($(CONFIG_RTE_NEXT_ABI)$(EXTLIB_BUILD),yn)
- $(Q)ln -s -f $< $(basename $(basename $@))
-else
- $(Q)ln -s -f $< $(basename $@)
-endif
+ $(Q)ln -s -f $< $(shell echo $@ | sed 's/\.so.*/.so/')
endif
#
@@ -172,14 +175,13 @@ clean: _postclean
.PHONY: doclean
doclean:
$(Q)rm -rf $(LIB) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
- $(CMDS-all) $(INSTALL-FILES-all)
+ $(CMDS-all) .$(LIB).cmd $(INSTALL-FILES-all) *.pmd.c *.pmd.o
$(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.module.mk b/mk/rte.module.mk
index 53ed4fe9..3dd9ac78 100644
--- a/mk/rte.module.mk
+++ b/mk/rte.module.mk
@@ -43,7 +43,6 @@ else
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -108,7 +107,6 @@ doclean:
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.obj.mk b/mk/rte.obj.mk
index 5982227d..9336d5f8 100644
--- a/mk/rte.obj.mk
+++ b/mk/rte.obj.mk
@@ -33,7 +33,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -106,7 +105,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk
index 23fcf1e1..0bf909e9 100644
--- a/mk/rte.sdkbuild.mk
+++ b/mk/rte.sdkbuild.mk
@@ -38,18 +38,10 @@ else
include $(RTE_SDK)/mk/rte.vars.mk
endif
-#
-# include .depdirs and define rules to order priorities between build
-# of directories.
-#
--include $(RTE_OUTPUT)/.depdirs
-
-define depdirs_rule
-$(1): $(sort $(LOCAL_DEPDIRS-$(1)))
-endef
-
-$(foreach d,$(ROOTDIRS-y),$(eval $(call depdirs_rule,$(d))))
-drivers: | buildtools
+buildtools: | lib
+drivers: | lib buildtools
+app: | lib buildtools drivers
+test: | lib buildtools drivers
#
# build and clean targets
@@ -67,14 +59,17 @@ clean: $(CLEANDIRS)
$(RTE_OUTPUT)/lib \
$(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod
@[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include
- @$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \
+ @$(RTE_SDK)/buildtools/gen-config-h.sh $(RTE_OUTPUT)/.config \
> $(RTE_OUTPUT)/include/rte_config.h
$(Q)$(MAKE) -f $(RTE_SDK)/GNUmakefile gcovclean
@echo Clean complete
+.PHONY: test-build
+test-build: test
+
.SECONDEXPANSION:
-.PHONY: $(ROOTDIRS-y)
-$(ROOTDIRS-y):
+.PHONY: $(ROOTDIRS-y) $(ROOTDIRS-)
+$(ROOTDIRS-y) $(ROOTDIRS-):
@[ -d $(BUILDDIR)/$@ ] || mkdir -p $(BUILDDIR)/$@
@echo "== Build $@"
$(Q)$(MAKE) S=$@ -f $(RTE_SRCDIR)/$@/Makefile -C $(BUILDDIR)/$@ all
@@ -90,8 +85,8 @@ $(ROOTDIRS-y):
RTE_MAKE_SUBTARGET ?= all
-%_sub: $(addsuffix _sub,$(FULL_DEPDIRS-$(*)))
- @echo $(addsuffix _sub,$(FULL_DEPDIRS-$(*)))
+%_sub: $(addsuffix _sub,$(*))
+ @echo $(addsuffix _sub,$(*))
@[ -d $(BUILDDIR)/$* ] || mkdir -p $(BUILDDIR)/$*
@echo "== Build $*"
$(Q)$(MAKE) S=$* -f $(RTE_SRCDIR)/$*/Makefile -C $(BUILDDIR)/$* \
diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk
index 5c94edf8..1f2d6bdf 100644
--- a/mk/rte.sdkconfig.mk
+++ b/mk/rte.sdkconfig.mk
@@ -69,7 +69,6 @@ ifeq ($(RTE_CONFIG_TEMPLATE),)
config: notemplate
else
config: $(RTE_OUTPUT)/include/rte_config.h $(RTE_OUTPUT)/Makefile
- $(Q)$(MAKE) depdirs
@echo "Configuration done"
endif
@@ -107,12 +106,12 @@ endif
# generate a Makefile for this build directory
# use a relative path so it will continue to work even if we move the directory
-SDK_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_SRCDIR)) \
+SDK_RELPATH=$(shell $(RTE_SDK)/buildtools/relpath.sh $(abspath $(RTE_SRCDIR)) \
$(abspath $(RTE_OUTPUT)))
-OUTPUT_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_OUTPUT)) \
+OUTPUT_RELPATH=$(shell $(RTE_SDK)/buildtools/relpath.sh $(abspath $(RTE_OUTPUT)) \
$(abspath $(RTE_SRCDIR)))
$(RTE_OUTPUT)/Makefile: | $(RTE_OUTPUT)
- $(Q)$(RTE_SDK)/scripts/gen-build-mk.sh $(SDK_RELPATH) $(OUTPUT_RELPATH) \
+ $(Q)$(RTE_SDK)/buildtools/gen-build-mk.sh $(SDK_RELPATH) $(OUTPUT_RELPATH) \
> $(RTE_OUTPUT)/Makefile
# clean installed files, and generate a new config header file
@@ -122,7 +121,7 @@ $(RTE_OUTPUT)/include/rte_config.h: $(RTE_OUTPUT)/.config
$(RTE_OUTPUT)/lib \
$(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod $(RTE_OUTPUT)/build
$(Q)mkdir -p $(RTE_OUTPUT)/include
- $(Q)$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \
+ $(Q)$(RTE_SDK)/buildtools/gen-config-h.sh $(RTE_OUTPUT)/.config \
> $(RTE_OUTPUT)/include/rte_config.h
# generate the rte_config.h
@@ -140,7 +139,6 @@ checkconfig:
fi
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk \
headerconfig NODOTCONF=1
- $(Q)$(MAKE) -s depdirs
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.sdkdepdirs.mk b/mk/rte.sdkdepdirs.mk
index bebaf2a2..1f27697b 100644
--- a/mk/rte.sdkdepdirs.mk
+++ b/mk/rte.sdkdepdirs.mk
@@ -35,30 +35,3 @@ endif
ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile))
$(error "need a make config first")
endif
-
-# use a "for" in a shell to process dependencies: we don't want this
-# task to be run in parallel.
-.PHONY: depdirs
-depdirs: $(RTE_OUTPUT)/.depdirs
-$(RTE_OUTPUT)/.depdirs: $(RTE_OUTPUT)/.config
- @rm -f $(RTE_OUTPUT)/.depdirs ; \
- for d in $(ROOTDIRS-y); do \
- if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \
- [ -d $(BUILDDIR)/$$d ] || mkdir -p $(BUILDDIR)/$$d ; \
- $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depdirs \
- >> $(RTE_OUTPUT)/.depdirs ; \
- fi ; \
- done
-
-.PHONY: depgraph
-depgraph:
- @echo "digraph unix {" ; \
- echo " size=\"6,6\";" ; \
- echo " node [color=lightblue2, style=filled];" ; \
- for d in $(ROOTDIRS-y); do \
- echo " \"root\" -> \"$$d\"" ; \
- if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \
- $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depgraph ; \
- fi ; \
- done ; \
- echo "}"
diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk
index 21d9bdf2..fb8f9155 100644
--- a/mk/rte.sdkdoc.mk
+++ b/mk/rte.sdkdoc.mk
@@ -54,6 +54,8 @@ RTE_PDF_DPI ?= 300
RTE_GUIDES := $(filter %/, $(wildcard $(RTE_SDK)/doc/guides/*/))
+API_EXAMPLES := $(RTE_OUTPUT)/doc/html/examples.dox
+
.PHONY: help
help:
@cat $(RTE_SDK)/doc/build-sdk-quick.txt
@@ -66,12 +68,13 @@ all: api-html guides-html guides-pdf
clean: api-html-clean guides-html-clean guides-pdf-clean guides-man-clean
.PHONY: api-html
-api-html: api-html-clean
+api-html: $(API_EXAMPLES)
@echo 'doxygen for API...'
$(Q)mkdir -p $(RTE_OUTPUT)/doc/html
$(Q)(cat $(RTE_SDK)/doc/api/doxy-api.conf && \
printf 'PROJECT_NUMBER = ' && \
$(MAKE) -rR showversion && \
+ echo INPUT += $(API_EXAMPLES) && \
echo OUTPUT_DIRECTORY = $(RTE_OUTPUT)/doc && \
echo HTML_OUTPUT = html/api && \
echo GENERATE_HTML = YES && \
@@ -82,9 +85,17 @@ api-html: api-html-clean
.PHONY: api-html-clean
api-html-clean:
+ $(Q)rm -f $(API_EXAMPLES)
$(Q)rm -f $(RTE_OUTPUT)/doc/html/api/*
$(Q)rmdir -p --ignore-fail-on-non-empty $(RTE_OUTPUT)/doc/html/api 2>&- || true
+$(API_EXAMPLES): api-html-clean
+ $(Q)mkdir -p $(@D)
+ @printf '/**\n' > $(API_EXAMPLES)
+ @printf '@page examples DPDK Example Programs\n\n' >> $(API_EXAMPLES)
+ @find examples -type f -name '*.c' -printf '@example %p\n' >> $(API_EXAMPLES)
+ @printf '*/\n' >> $(API_EXAMPLES)
+
guides-pdf-clean: guides-pdf-img-clean
guides-pdf-img-clean:
$(Q)rm -f $(RTE_SDK)/doc/guides/*/img/*.pdf
diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk
index 7b0d8b52..dbac2a27 100644
--- a/mk/rte.sdkinstall.mk
+++ b/mk/rte.sdkinstall.mk
@@ -77,7 +77,7 @@ rte_mkdir = test -d $1 || mkdir -p $1
# Create the relative symbolic link $2 -> $1
# May be replaced with --relative option of ln from coreutils-8.16
-rte_symlink = ln -snf $$($(RTE_SDK)/scripts/relpath.sh $1 $(dir $2)) $2
+rte_symlink = ln -snf $$($(RTE_SDK)/buildtools/relpath.sh $1 $(dir $2)) $2
.PHONY: pre_install
pre_install:
@@ -124,15 +124,11 @@ install-runtime:
tar -xf - -C $(DESTDIR)$(bindir) --strip-components=1 \
--keep-newer-files
$(Q)$(call rte_mkdir, $(DESTDIR)$(datadir))
- $(Q)cp -a $(RTE_SDK)/tools $(DESTDIR)$(datadir)
- $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/dpdk-setup.sh, \
- $(DESTDIR)$(datadir)/tools/setup.sh)
- $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/dpdk-devbind.py, \
- $(DESTDIR)$(datadir)/tools/dpdk_nic_bind.py)
+ $(Q)cp -a $(RTE_SDK)/usertools $(DESTDIR)$(datadir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(sbindir))
- $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/dpdk-devbind.py, \
+ $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/usertools/dpdk-devbind.py, \
$(DESTDIR)$(sbindir)/dpdk-devbind)
- $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/dpdk-pmdinfo.py, \
+ $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/usertools/dpdk-pmdinfo.py, \
$(DESTDIR)$(bindir)/dpdk-pmdinfo)
ifneq ($(wildcard $O/doc/man/*/*.1),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(mandir)/man1)
@@ -156,7 +152,7 @@ install-sdk:
--keep-newer-files
$(Q)$(call rte_mkdir, $(DESTDIR)$(sdkdir))
$(Q)cp -a $(RTE_SDK)/mk $(DESTDIR)$(sdkdir)
- $(Q)cp -a $(RTE_SDK)/scripts $(DESTDIR)$(sdkdir)
+ $(Q)cp -a $(RTE_SDK)/buildtools $(DESTDIR)$(sdkdir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(targetdir)/app)
$(Q)cp -a $O/.config $(DESTDIR)$(targetdir)
$(Q)cp -a $O/app/dpdk-pmdinfogen $(DESTDIR)$(targetdir)/app
diff --git a/mk/rte.sdkroot.mk b/mk/rte.sdkroot.mk
index 04ad523b..2843b7de 100644
--- a/mk/rte.sdkroot.mk
+++ b/mk/rte.sdkroot.mk
@@ -92,10 +92,16 @@ default: all
config showconfigs showversion showversionum:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk $@
-.PHONY: test fast_test ring_test mempool_test perf_test coverage
-test fast_test ring_test mempool_test perf_test coverage:
+.PHONY: cscope gtags tags etags
+cscope gtags tags etags:
+ $(Q)$(RTE_SDK)/devtools/build-tags.sh $@ $T
+
+.PHONY: test test-basic test-fast test-ring test-mempool test-perf coverage
+test test-basic test-fast test-ring test-mempool test-perf coverage:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk $@
+test: test-build
+
.PHONY: install
install:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkinstall.mk pre_install
@@ -109,10 +115,6 @@ help: doc-help
doc-%:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk $*
-.PHONY: depdirs depgraph
-depdirs depgraph:
- $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdepdirs.mk $@
-
.PHONY: gcov gcovclean
gcov gcovclean:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkgcov.mk $@
diff --git a/mk/rte.sdktest.mk b/mk/rte.sdktest.mk
index ddbbbf6b..daeea90f 100644
--- a/mk/rte.sdktest.mk
+++ b/mk/rte.sdktest.mk
@@ -46,23 +46,23 @@ DIR := $(shell basename $(RTE_OUTPUT))
#
# test: launch auto-tests, very simple for now.
#
-.PHONY: test fast_test perf_test coverage
+.PHONY: test test-basic test-fast test-perf coverage
PERFLIST=ring_perf,mempool_perf,memcpy_perf,hash_perf,timer_perf
coverage: BLACKLIST=-$(PERFLIST)
-fast_test: BLACKLIST=-$(PERFLIST)
-perf_test: WHITELIST=$(PERFLIST)
+test-fast: BLACKLIST=-$(PERFLIST)
+test-perf: WHITELIST=$(PERFLIST)
-test fast_test perf_test:
+test test-basic test-fast test-perf:
@mkdir -p $(AUTOTEST_DIR) ; \
cd $(AUTOTEST_DIR) ; \
if [ -f $(RTE_OUTPUT)/app/test ]; then \
- python $(RTE_SDK)/app/test/autotest.py \
+ python $(RTE_SDK)/test/test/autotest.py \
$(RTE_OUTPUT)/app/test \
$(RTE_TARGET) \
$(BLACKLIST) $(WHITELIST); \
else \
- echo "No test found, please do a 'make build' first, or specify O=" ; \
+ echo "No test found, please do a 'make test-build' first, or specify O=" ; \
fi
# this is a special target to ease the pain of running coverage tests
@@ -71,14 +71,14 @@ coverage:
@mkdir -p $(AUTOTEST_DIR) ; \
cd $(AUTOTEST_DIR) ; \
if [ -f $(RTE_OUTPUT)/app/test ]; then \
- python $(RTE_SDK)/app/cmdline_test/cmdline_test.py \
+ python $(RTE_SDK)/test/cmdline_test/cmdline_test.py \
$(RTE_OUTPUT)/app/cmdline_test; \
ulimit -S -n 100 ; \
- python $(RTE_SDK)/app/test/autotest.py \
+ python $(RTE_SDK)/test/test/autotest.py \
$(RTE_OUTPUT)/app/test \
$(RTE_TARGET) \
$(BLACKLIST) $(WHITELIST) ; \
$(RTE_OUTPUT)/app/dpdk-procinfo --file-prefix=ring_perf -- -m; \
else \
- echo "No test found, please do a 'make build' first, or specify O=" ;\
+ echo "No test found, please do a 'make test-build' first, or specify O=" ;\
fi
diff --git a/mk/rte.shared.mk b/mk/rte.shared.mk
index fc6b0b44..87ccf0ba 100644
--- a/mk/rte.shared.mk
+++ b/mk/rte.shared.mk
@@ -32,7 +32,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
# VPATH contains at least SRCDIR
VPATH += $(SRCDIR)
@@ -131,7 +130,6 @@ include $(RTE_SDK)/mk/internal/rte.compile-post.mk
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
include $(RTE_SDK)/mk/internal/rte.build-post.mk
-include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
.PHONY: FORCE
FORCE:
diff --git a/mk/rte.subdir.mk b/mk/rte.subdir.mk
index 256e64e7..92f5de4c 100644
--- a/mk/rte.subdir.mk
+++ b/mk/rte.subdir.mk
@@ -37,6 +37,8 @@ include $(RTE_SDK)/mk/internal/rte.install-pre.mk
include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+ALL_DEPDIRS := $(patsubst DEPDIRS-%,%,$(filter DEPDIRS-%,$(.VARIABLES)))
+
CLEANDIRS = $(addsuffix _clean,$(DIRS-y) $(DIRS-n) $(DIRS-))
VPATH += $(SRCDIR)
@@ -60,7 +62,8 @@ build: _postbuild
$(DIRS-y):
@[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@
@echo "== Build $S/$@"
- @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ all
+ @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ \
+ DEPDIRS="$(DEPDIRS-$@)" all
.PHONY: clean
clean: _postclean
@@ -72,37 +75,16 @@ clean: _postclean
fi
@rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
-#
-# include .depdirs and define rules to order priorities between build
-# of directories.
-#
-include $(RTE_OUTPUT)/.depdirs
-
define depdirs_rule
-$(1): $(sort $(patsubst $(S)/%,%,$(LOCAL_DEPDIRS-$(S)/$(1))))
+$(DEPDIRS-$(1)):
+
+$(1): | $(DEPDIRS-$(1))
+
+$(if $(D),$(info $(1) depends on $(DEPDIRS-$(1))))
endef
-$(foreach d,$(DIRS-y),$(eval $(call depdirs_rule,$(d))))
-
-
-# use a "for" in a shell to process dependencies: we don't want this
-# task to be run in parallel.
-.PHONY: depdirs
-depdirs:
- @for d in $(DIRS-y); do \
- if [ -f $(SRCDIR)/$$d/Makefile ]; then \
- $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depdirs ; \
- fi ; \
- done
-
-.PHONY: depgraph
-depgraph:
- @for d in $(DIRS-y); do \
- echo " \"$(S)\" -> \"$(S)/$$d\"" ; \
- if [ -f $(SRCDIR)/$$d/Makefile ]; then \
- $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depgraph ; \
- fi ; \
- done
+$(foreach dir,$(ALL_DEPDIRS),\
+ $(eval $(call depdirs_rule,$(dir))))
include $(RTE_SDK)/mk/internal/rte.install-post.mk
include $(RTE_SDK)/mk/internal/rte.clean-post.mk
diff --git a/mk/target/generic/rte.vars.mk b/mk/target/generic/rte.vars.mk
index b31e4266..5d22a6a6 100644
--- a/mk/target/generic/rte.vars.mk
+++ b/mk/target/generic/rte.vars.mk
@@ -125,9 +125,6 @@ LDFLAGS += -L$(RTE_OUTPUT)/lib
ifeq ($(BUILDING_RTE_SDK),1)
# building sdk
CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
-ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
-CFLAGS += -include rte_warnings.h
-endif
else
# if we are building an external application, include SDK's lib and
# includes too
@@ -136,9 +133,6 @@ ifneq ($(wildcard $(RTE_OUTPUT)/include/rte_config.h),)
CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
endif
CFLAGS += -include $(RTE_SDK_BIN)/include/rte_config.h
-ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
-CFLAGS += -include rte_warnings.h
-endif
LDFLAGS += -L$(RTE_SDK_BIN)/lib
endif
diff --git a/mk/toolchain/clang/rte.toolchain-compat.mk b/mk/toolchain/clang/rte.toolchain-compat.mk
index b734413b..9e095d38 100644
--- a/mk/toolchain/clang/rte.toolchain-compat.mk
+++ b/mk/toolchain/clang/rte.toolchain-compat.mk
@@ -38,7 +38,8 @@
# find out CLANG version
-CLANG_VERSION := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/")
+CLANG_VERSION := $(shell $(CC) -v 2>&1 | \
+ sed -n "s/.*version \([0-9]*\.[0-9]*\).*/\1/p")
CLANG_MAJOR_VERSION := $(shell echo $(CLANG_VERSION) | cut -f1 -d.)
diff --git a/mk/toolchain/clang/rte.vars.mk b/mk/toolchain/clang/rte.vars.mk
index 7749b991..af34c10a 100644
--- a/mk/toolchain/clang/rte.vars.mk
+++ b/mk/toolchain/clang/rte.vars.mk
@@ -79,5 +79,10 @@ include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk
# workaround clang bug with warning "missing field initializer" for "= {0}"
WERROR_FLAGS += -Wno-missing-field-initializers
+# disable packed member unalign warnings
+ifeq ($(shell test $(CLANG_MAJOR_VERSION) -ge 4 && echo 1), 1)
+WERROR_FLAGS += -Wno-address-of-packed-member
+endif
+
export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk
index 6eed20cc..280dde2a 100644
--- a/mk/toolchain/gcc/rte.toolchain-compat.mk
+++ b/mk/toolchain/gcc/rte.toolchain-compat.mk
@@ -89,4 +89,9 @@ else
ifeq ($(shell test $(GCC_VERSION) -lt 42 && echo 1), 1)
MACHINE_CFLAGS := $(filter-out -march% -mtune% -msse%,$(MACHINE_CFLAGS))
endif
+
+ # Disable thunderx PMD for gcc < 4.7
+ ifeq ($(shell test $(GCC_VERSION) -lt 47 && echo 1), 1)
+ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=d
+ endif
endif
diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk
index ff70f3d9..3834e00c 100644
--- a/mk/toolchain/gcc/rte.vars.mk
+++ b/mk/toolchain/gcc/rte.vars.mk
@@ -81,9 +81,9 @@ ifeq ($(RTE_DEVEL_BUILD),y)
WERROR_FLAGS += -Werror
endif
-# There are many issues reported for ARMv7 architecture
+# There are many issues reported for strict alignment architectures
# which are not necessarily fatal. Report as warnings.
-ifeq ($(CONFIG_RTE_ARCH_ARMv7),y)
+ifeq ($(CONFIG_RTE_ARCH_STRICT_ALIGN),y)
WERROR_FLAGS += -Wno-error=cast-align
endif
@@ -99,5 +99,12 @@ ifeq ($(shell test $(GCC_VERSION) -lt 47 && echo 1), 1)
WERROR_FLAGS += -Wno-uninitialized
endif
+ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
+# Tell GCC only to error for switch fallthroughs without a suitable comment
+WERROR_FLAGS += -Wimplicit-fallthrough=2
+# Ignore errors for snprintf truncation
+WERROR_FLAGS += -Wno-format-truncation
+endif
+
export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/mk/toolchain/icc/rte.toolchain-compat.mk b/mk/toolchain/icc/rte.toolchain-compat.mk
index 41344668..88f1ac92 100644
--- a/mk/toolchain/icc/rte.toolchain-compat.mk
+++ b/mk/toolchain/icc/rte.toolchain-compat.mk
@@ -72,4 +72,9 @@ else
# remove march options
MACHINE_CFLAGS := $(patsubst -march=%,-xSSE3,$(MACHINE_CFLAGS))
endif
+
+ # Disable thunderx PMD for icc <= 16.0
+ ifeq ($(shell test $(ICC_MAJOR_VERSION) -le 16 && echo 1), 1)
+ CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=d
+ endif
endif
diff --git a/mk/toolchain/icc/rte.vars.mk b/mk/toolchain/icc/rte.vars.mk
index ba69f1f8..dd336451 100644
--- a/mk/toolchain/icc/rte.vars.mk
+++ b/mk/toolchain/icc/rte.vars.mk
@@ -69,8 +69,11 @@ TOOLCHAIN_ASFLAGS =
# error #13368: loop was not vectorized with "vector always assert"
# error #15527: loop was not vectorized: function call to fprintf cannot be vectorize
# was declared "deprecated"
+# Warning #11074, 11076: to prevent "inline-max-size" warnings.
WERROR_FLAGS := -Wall -w2 -diag-disable 271 -diag-warning 1478
WERROR_FLAGS += -diag-disable 13368 -diag-disable 15527
+WERROR_FLAGS += -diag-disable 188
+WERROR_FLAGS += -diag-disable 11074 -diag-disable 11076
ifeq ($(RTE_DEVEL_BUILD),y)
WERROR_FLAGS += -Werror-all
@@ -78,10 +81,6 @@ endif
# process cpu flags
include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk
-# disable max-inline params boundaries for ICC compiler for version 15 and greater
-ifeq ($(shell test $(ICC_MAJOR_VERSION) -ge 14 && echo 1), 1)
- TOOLCHAIN_CFLAGS += -no-inline-max-size -no-inline-max-total-size
-endif
export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index 134c2b41..e38e2a2d 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -30,7 +30,7 @@
# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 16.11.1
+Version: 17.05
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
@@ -40,12 +40,21 @@ Summary: Data Plane Development Kit core
Group: System Environment/Libraries
License: BSD and LGPLv2 and GPLv2
-ExclusiveArch: i686, x86_64
+ExclusiveArch: i686 x86_64 aarch64
+%ifarch aarch64
+%global machine armv8a
+%global target arm64-%{machine}-linuxapp-gcc
+%global config arm64-%{machine}-linuxapp-gcc
+%else
%global machine default
%global target %{_arch}-%{machine}-linuxapp-gcc
%global config %{_arch}-native-linuxapp-gcc
+%endif
-BuildRequires: kernel-devel, kernel-headers, libpcap-devel, xen-devel
+BuildRequires: kernel-devel, kernel-headers, libpcap-devel
+%ifarch i686 x86_64
+BuildRequires: xen-devel
+%endif
BuildRequires: doxygen, python-sphinx, inkscape
BuildRequires: texlive-collection-latexextra
@@ -81,7 +90,9 @@ sed -ri 's,(RTE_BUILD_SHARED_LIB=).*,\1y,' %{target}/.config
sed -ri 's,(RTE_NEXT_ABI=).*,\1n,' %{target}/.config
sed -ri 's,(LIBRTE_VHOST=).*,\1y,' %{target}/.config
sed -ri 's,(LIBRTE_PMD_PCAP=).*,\1y,' %{target}/.config
+%ifarch i686 x86_64
sed -ri 's,(LIBRTE_PMD_XENVIRT=).*,\1y,' %{target}/.config
+%endif
make O=%{target} %{?_smp_mflags}
make O=%{target} doc
@@ -94,7 +105,7 @@ make install O=%{target} DESTDIR=%{buildroot} \
%files
%dir %{_datadir}/dpdk
-%{_datadir}/dpdk/tools
+%{_datadir}/dpdk/usertools
/lib/modules/%(uname -r)/extra/*
%{_sbindir}/*
%{_bindir}/*
@@ -103,7 +114,7 @@ make install O=%{target} DESTDIR=%{buildroot} \
%files devel
%{_includedir}/dpdk
%{_datadir}/dpdk/mk
-%{_datadir}/dpdk/scripts
+%{_datadir}/dpdk/buildtools
%{_datadir}/dpdk/%{target}
%{_datadir}/dpdk/examples
diff --git a/mk/internal/rte.depdirs-post.mk b/test/Makefile
index fc6904dd..e996fd8c 100644
--- a/mk/internal/rte.depdirs-post.mk
+++ b/test/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,14 +29,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-.PHONY: depdirs
-depdirs:
- @for d in $(DEPDIRS-y); do \
- $(RTE_SDK)/scripts/depdirs-rule.sh $(S) $$d ; \
- done
+include $(RTE_SDK)/mk/rte.vars.mk
-.PHONY: depgraph
-depgraph:
- @for d in $(DEPDIRS-y); do \
- echo " \"$(S)\" -> \"$$d\"" ; \
- done
+DIRS-$(CONFIG_RTE_APP_TEST) += test
+DIRS-$(CONFIG_RTE_LIBRTE_ACL) += test-acl
+DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += test-pipeline
+DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_test
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/cmdline_test/Makefile b/test/cmdline_test/Makefile
index c6169f56..e9eafd2d 100644
--- a/app/cmdline_test/Makefile
+++ b/test/cmdline_test/Makefile
@@ -47,9 +47,6 @@ SRCS-y += commands.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-# this application needs libraries first
-DEPDIRS-y += lib drivers
-
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/cmdline_test/cmdline_test.c b/test/cmdline_test/cmdline_test.c
index 716b5f16..716b5f16 100644
--- a/app/cmdline_test/cmdline_test.c
+++ b/test/cmdline_test/cmdline_test.c
diff --git a/app/cmdline_test/cmdline_test.h b/test/cmdline_test/cmdline_test.h
index 1c9af122..1c9af122 100644
--- a/app/cmdline_test/cmdline_test.h
+++ b/test/cmdline_test/cmdline_test.h
diff --git a/app/cmdline_test/cmdline_test.py b/test/cmdline_test/cmdline_test.py
index 8efc5ead..229f71f3 100755
--- a/app/cmdline_test/cmdline_test.py
+++ b/test/cmdline_test/cmdline_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# BSD LICENSE
#
@@ -32,17 +32,22 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Script that runs cmdline_test app and feeds keystrokes into it.
+from __future__ import print_function
+import cmdline_test_data
+import os
+import pexpect
+import sys
-import sys, pexpect, string, os, cmdline_test_data
#
# function to run test
#
-def runTest(child,test):
- child.send(test["Sequence"])
- if test["Result"] == None:
- return 0
- child.expect(test["Result"],1)
+def runTest(child, test):
+ child.send(test["Sequence"])
+ if test["Result"] is None:
+ return 0
+ child.expect(test["Result"], 1)
+
#
# history test is a special case
@@ -57,57 +62,57 @@ def runTest(child,test):
# This is a self-contained test, it needs only a pexpect child
#
def runHistoryTest(child):
- # find out history size
- child.sendline(cmdline_test_data.CMD_GET_BUFSIZE)
- child.expect("History buffer size: \\d+", timeout=1)
- history_size = int(child.after[len(cmdline_test_data.BUFSIZE_TEMPLATE):])
- i = 0
+ # find out history size
+ child.sendline(cmdline_test_data.CMD_GET_BUFSIZE)
+ child.expect("History buffer size: \\d+", timeout=1)
+ history_size = int(child.after[len(cmdline_test_data.BUFSIZE_TEMPLATE):])
+ i = 0
- # fill the history with numbers
- while i < history_size / 10:
- # add 1 to prevent from parsing as octals
- child.send("1" + str(i).zfill(8) + cmdline_test_data.ENTER)
- # the app will simply print out the number
- child.expect(str(i + 100000000), timeout=1)
- i += 1
- # scroll back history
- child.send(cmdline_test_data.UP * (i + 2) + cmdline_test_data.ENTER)
- child.expect("100000000", timeout=1)
+ # fill the history with numbers
+ while i < history_size / 10:
+ # add 1 to prevent from parsing as octals
+ child.send("1" + str(i).zfill(8) + cmdline_test_data.ENTER)
+ # the app will simply print out the number
+ child.expect(str(i + 100000000), timeout=1)
+ i += 1
+ # scroll back history
+ child.send(cmdline_test_data.UP * (i + 2) + cmdline_test_data.ENTER)
+ child.expect("100000000", timeout=1)
# the path to cmdline_test executable is supplied via command-line.
if len(sys.argv) < 2:
- print "Error: please supply cmdline_test app path"
- sys.exit(1)
+ print("Error: please supply cmdline_test app path")
+ sys.exit(1)
test_app_path = sys.argv[1]
if not os.path.exists(test_app_path):
- print "Error: please supply cmdline_test app path"
- sys.exit(1)
+ print("Error: please supply cmdline_test app path")
+ sys.exit(1)
child = pexpect.spawn(test_app_path)
-print "Running command-line tests..."
+print("Running command-line tests...")
for test in cmdline_test_data.tests:
- print (test["Name"] + ":").ljust(30),
- try:
- runTest(child,test)
- print "PASS"
- except:
- print "FAIL"
- print child
- sys.exit(1)
+ testname = (test["Name"] + ":").ljust(30)
+ try:
+ runTest(child, test)
+ print(testname, "PASS")
+ except:
+ print(testname, "FAIL")
+ print(child)
+ sys.exit(1)
# since last test quits the app, run new instance
child = pexpect.spawn(test_app_path)
-print ("History fill test:").ljust(30),
+testname = ("History fill test:").ljust(30)
try:
- runHistoryTest(child)
- print "PASS"
+ runHistoryTest(child)
+ print(testname, "PASS")
except:
- print "FAIL"
- print child
- sys.exit(1)
+ print(testname, "FAIL")
+ print(child)
+ sys.exit(1)
child.close()
sys.exit(0)
diff --git a/test/cmdline_test/cmdline_test_data.py b/test/cmdline_test/cmdline_test_data.py
new file mode 100644
index 00000000..28dfefe1
--- /dev/null
+++ b/test/cmdline_test/cmdline_test_data.py
@@ -0,0 +1,310 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# collection of static data
+
+# keycode constants
+CTRL_A = chr(1)
+CTRL_B = chr(2)
+CTRL_C = chr(3)
+CTRL_D = chr(4)
+CTRL_E = chr(5)
+CTRL_F = chr(6)
+CTRL_K = chr(11)
+CTRL_L = chr(12)
+CTRL_N = chr(14)
+CTRL_P = chr(16)
+CTRL_W = chr(23)
+CTRL_Y = chr(25)
+ALT_B = chr(27) + chr(98)
+ALT_D = chr(27) + chr(100)
+ALT_F = chr(27) + chr(102)
+ALT_BKSPACE = chr(27) + chr(127)
+DEL = chr(27) + chr(91) + chr(51) + chr(126)
+TAB = chr(9)
+HELP = chr(63)
+BKSPACE = chr(127)
+RIGHT = chr(27) + chr(91) + chr(67)
+DOWN = chr(27) + chr(91) + chr(66)
+LEFT = chr(27) + chr(91) + chr(68)
+UP = chr(27) + chr(91) + chr(65)
+ENTER2 = '\r'
+ENTER = '\n'
+
+# expected result constants
+NOT_FOUND = "Command not found"
+BAD_ARG = "Bad arguments"
+AMBIG = "Ambiguous command"
+CMD1 = "Command 1 parsed!"
+CMD2 = "Command 2 parsed!"
+SINGLE = "Single word command parsed!"
+SINGLE_LONG = "Single long word command parsed!"
+AUTO1 = "Autocomplete command 1 parsed!"
+AUTO2 = "Autocomplete command 2 parsed!"
+
+# misc defines
+CMD_QUIT = "quit"
+CMD_GET_BUFSIZE = "get_history_bufsize"
+BUFSIZE_TEMPLATE = "History buffer size: "
+PROMPT = "CMDLINE_TEST>>"
+
+# test defines
+# each test tests progressively diverse set of keys. this way for example
+# if we want to use some key sequence in the test, we first need to test
+# that it itself does what it is expected to do. Most of the tests are
+# designed that way.
+#
+# example: "arrows & delete test 1". we enter a partially valid command,
+# then move 3 chars left and use delete three times. this way we get to
+# know that "delete", "left" and "ctrl+B" all work (because if any of
+# them fails, the whole test will fail and next tests won't be run).
+#
+# each test consists of name, character sequence to send to child,
+# and expected output (if any).
+
+tests = [
+ # test basic commands
+ {"Name": "command test 1",
+ "Sequence": "ambiguous first" + ENTER,
+ "Result": CMD1},
+ {"Name": "command test 2",
+ "Sequence": "ambiguous second" + ENTER,
+ "Result": CMD2},
+ {"Name": "command test 3",
+ "Sequence": "ambiguous ambiguous" + ENTER,
+ "Result": AMBIG},
+ {"Name": "command test 4",
+ "Sequence": "ambiguous ambiguous2" + ENTER,
+ "Result": AMBIG},
+
+ {"Name": "invalid command test 1",
+ "Sequence": "ambiguous invalid" + ENTER,
+ "Result": BAD_ARG},
+ # test invalid commands
+ {"Name": "invalid command test 2",
+ "Sequence": "invalid" + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "invalid command test 3",
+ "Sequence": "ambiguousinvalid" + ENTER2,
+ "Result": NOT_FOUND},
+
+ # test arrows and deletes
+ {"Name": "arrows & delete test 1",
+ "Sequence": "singlebad" + LEFT*2 + CTRL_B + DEL*3 + ENTER,
+ "Result": SINGLE},
+ {"Name": "arrows & delete test 2",
+ "Sequence": "singlebad" + LEFT*5 + RIGHT + CTRL_F + DEL*3 + ENTER,
+ "Result": SINGLE},
+
+ # test backspace
+ {"Name": "backspace test",
+ "Sequence": "singlebad" + BKSPACE*3 + ENTER,
+ "Result": SINGLE},
+
+ # test goto left and goto right
+ {"Name": "goto left test",
+ "Sequence": "biguous first" + CTRL_A + "am" + ENTER,
+ "Result": CMD1},
+ {"Name": "goto right test",
+ "Sequence": "biguous fir" + CTRL_A + "am" + CTRL_E + "st" + ENTER,
+ "Result": CMD1},
+
+ # test goto words
+ {"Name": "goto left word test",
+ "Sequence": "ambiguous st" + ALT_B + "fir" + ENTER,
+ "Result": CMD1},
+ {"Name": "goto right word test",
+ "Sequence": "ambig first" + CTRL_A + ALT_F + "uous" + ENTER,
+ "Result": CMD1},
+
+ # test removing words
+ {"Name": "remove left word 1",
+ "Sequence": "single invalid" + CTRL_W + ENTER,
+ "Result": SINGLE},
+ {"Name": "remove left word 2",
+ "Sequence": "single invalid" + ALT_BKSPACE + ENTER,
+ "Result": SINGLE},
+ {"Name": "remove right word",
+ "Sequence": "single invalid" + ALT_B + ALT_D + ENTER,
+ "Result": SINGLE},
+
+ # test kill buffer (copy and paste)
+ {"Name": "killbuffer test 1",
+ "Sequence": "ambiguous" + CTRL_A + CTRL_K + " first" + CTRL_A +
+ CTRL_Y + ENTER,
+ "Result": CMD1},
+ {"Name": "killbuffer test 2",
+ "Sequence": "ambiguous" + CTRL_A + CTRL_K + CTRL_Y*26 + ENTER,
+ "Result": NOT_FOUND},
+
+ # test newline
+ {"Name": "newline test",
+ "Sequence": "invalid" + CTRL_C + "single" + ENTER,
+ "Result": SINGLE},
+
+ # test redisplay (nothing should really happen)
+ {"Name": "redisplay test",
+ "Sequence": "single" + CTRL_L + ENTER,
+ "Result": SINGLE},
+
+ # test autocomplete
+ {"Name": "autocomplete test 1",
+ "Sequence": "si" + TAB + ENTER,
+ "Result": SINGLE},
+ {"Name": "autocomplete test 2",
+ "Sequence": "si" + TAB + "_" + TAB + ENTER,
+ "Result": SINGLE_LONG},
+ {"Name": "autocomplete test 3",
+ "Sequence": "in" + TAB + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "autocomplete test 4",
+ "Sequence": "am" + TAB + ENTER,
+ "Result": BAD_ARG},
+ {"Name": "autocomplete test 5",
+ "Sequence": "am" + TAB + "fir" + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 6",
+ "Sequence": "am" + TAB + "fir" + TAB + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 7",
+ "Sequence": "am" + TAB + "fir" + TAB + " " + TAB + ENTER,
+ "Result": CMD1},
+ {"Name": "autocomplete test 8",
+ "Sequence": "am" + TAB + " am" + TAB + " " + ENTER,
+ "Result": AMBIG},
+ {"Name": "autocomplete test 9",
+ "Sequence": "am" + TAB + "inv" + TAB + ENTER,
+ "Result": BAD_ARG},
+ {"Name": "autocomplete test 10",
+ "Sequence": "au" + TAB + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "autocomplete test 11",
+ "Sequence": "au" + TAB + "1" + ENTER,
+ "Result": AUTO1},
+ {"Name": "autocomplete test 12",
+ "Sequence": "au" + TAB + "2" + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 13",
+ "Sequence": "au" + TAB + "2" + TAB + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 14",
+ "Sequence": "au" + TAB + "2 " + TAB + ENTER,
+ "Result": AUTO2},
+ {"Name": "autocomplete test 15",
+ "Sequence": "24" + TAB + ENTER,
+ "Result": "24"},
+
+ # test history
+ {"Name": "history test 1",
+ "Sequence": "invalid" + ENTER + "single" + ENTER + "invalid" +
+ ENTER + UP + CTRL_P + ENTER,
+ "Result": SINGLE},
+ {"Name": "history test 2",
+ "Sequence": "invalid" + ENTER + "ambiguous first" + ENTER + "invalid" +
+ ENTER + "single" + ENTER + UP * 3 + CTRL_N + DOWN + ENTER,
+ "Result": SINGLE},
+
+ #
+ # tests that improve coverage
+ #
+
+ # empty space tests
+ {"Name": "empty space test 1",
+ "Sequence": RIGHT + LEFT + CTRL_B + CTRL_F + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 2",
+ "Sequence": BKSPACE + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 3",
+ "Sequence": CTRL_E*2 + CTRL_A*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 4",
+ "Sequence": ALT_F*2 + ALT_B*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 5",
+ "Sequence": " " + CTRL_E*2 + CTRL_A*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 6",
+ "Sequence": " " + CTRL_A + ALT_F*2 + ALT_B*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 7",
+ "Sequence": " " + CTRL_A + CTRL_D + CTRL_E + CTRL_D + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 8",
+ "Sequence": " space" + CTRL_W*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 9",
+ "Sequence": " space" + ALT_BKSPACE*2 + ENTER,
+ "Result": PROMPT},
+ {"Name": "empty space test 10",
+ "Sequence": " space " + CTRL_A + ALT_D*3 + ENTER,
+ "Result": PROMPT},
+
+ # non-printable char tests
+ {"Name": "non-printable test 1",
+ "Sequence": chr(27) + chr(47) + ENTER,
+ "Result": PROMPT},
+ {"Name": "non-printable test 2",
+ "Sequence": chr(27) + chr(128) + ENTER*7,
+ "Result": PROMPT},
+ {"Name": "non-printable test 3",
+ "Sequence": chr(27) + chr(91) + chr(127) + ENTER*6,
+ "Result": PROMPT},
+
+ # miscellaneous tests
+ {"Name": "misc test 1",
+ "Sequence": ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 2",
+ "Sequence": "single #comment" + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 3",
+ "Sequence": "#empty line" + ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 4",
+ "Sequence": " single " + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 5",
+ "Sequence": "single#" + ENTER,
+ "Result": SINGLE},
+ {"Name": "misc test 6",
+ "Sequence": 'a' * 257 + ENTER,
+ "Result": NOT_FOUND},
+ {"Name": "misc test 7",
+ "Sequence": "clear_history" + UP*5 + DOWN*5 + ENTER,
+ "Result": PROMPT},
+ {"Name": "misc test 8",
+ "Sequence": "a" + HELP + CTRL_C,
+ "Result": PROMPT},
+ {"Name": "misc test 9",
+ "Sequence": CTRL_D*3,
+ "Result": None},
+]
diff --git a/app/cmdline_test/commands.c b/test/cmdline_test/commands.c
index 404f51af..404f51af 100644
--- a/app/cmdline_test/commands.c
+++ b/test/cmdline_test/commands.c
diff --git a/app/test-acl/Makefile b/test/test-acl/Makefile
index 43dfdcbd..29de80a3 100644
--- a/app/test-acl/Makefile
+++ b/test/test-acl/Makefile
@@ -40,9 +40,6 @@ CFLAGS += $(WERROR_FLAGS)
# all source are stored in SRCS-y
SRCS-y := main.c
-# this application needs libraries first
-DEPDIRS-y += lib
-
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/test-acl/main.c b/test/test-acl/main.c
index 1b2b1760..1b2b1760 100644
--- a/app/test-acl/main.c
+++ b/test/test-acl/main.c
diff --git a/app/test-pipeline/Makefile b/test/test-pipeline/Makefile
index 4bab6dc6..520a319f 100644
--- a/app/test-pipeline/Makefile
+++ b/test/test-pipeline/Makefile
@@ -56,9 +56,6 @@ SRCS-y += pipeline_lpm_ipv6.c
# include ACL lib if available
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += pipeline_acl.c
-# this application needs libraries first
-DEPDIRS-y += lib drivers
-
include $(RTE_SDK)/mk/rte.app.mk
endif
diff --git a/app/test-pipeline/config.c b/test/test-pipeline/config.c
index dd80ed69..1b397c03 100644
--- a/app/test-pipeline/config.c
+++ b/test/test-pipeline/config.c
@@ -259,6 +259,6 @@ app_parse_args(int argc, char **argv)
argv[optind - 1] = prgname;
ret = optind - 1;
- optind = 0; /* reset getopt lib */
+ optind = 1; /* reset getopt lib */
return ret;
}
diff --git a/app/test-pipeline/init.c b/test/test-pipeline/init.c
index aef082fc..00dbc279 100644
--- a/app/test-pipeline/init.c
+++ b/test/test-pipeline/init.c
@@ -105,7 +105,7 @@ static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 1, /* IP checksum offload enabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/app/test-pipeline/main.c b/test/test-pipeline/main.c
index 71ab6ad9..71ab6ad9 100644
--- a/app/test-pipeline/main.c
+++ b/test/test-pipeline/main.c
diff --git a/app/test-pipeline/main.h b/test/test-pipeline/main.h
index 36858492..36858492 100644
--- a/app/test-pipeline/main.h
+++ b/test/test-pipeline/main.h
diff --git a/app/test-pipeline/pipeline_acl.c b/test/test-pipeline/pipeline_acl.c
index 22d5f362..22d5f362 100644
--- a/app/test-pipeline/pipeline_acl.c
+++ b/test/test-pipeline/pipeline_acl.c
diff --git a/app/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 10d28695..991e381e 100644
--- a/app/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -480,7 +480,7 @@ uint64_t test_hash(
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
- uint32_t *k32 = (uint32_t *) key;
+ uint32_t *k32 = key;
uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
uint64_t signature = (ip_dst >> 2) | ((ip_dst & 0x3) << 30);
@@ -546,7 +546,8 @@ app_main_loop_rx_metadata(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
- } while (ret < 0);
+ n_mbufs,
+ NULL);
+ } while (ret == 0);
}
}
diff --git a/app/test-pipeline/pipeline_lpm.c b/test/test-pipeline/pipeline_lpm.c
index ecea6b3b..ecea6b3b 100644
--- a/app/test-pipeline/pipeline_lpm.c
+++ b/test/test-pipeline/pipeline_lpm.c
diff --git a/app/test-pipeline/pipeline_lpm_ipv6.c b/test/test-pipeline/pipeline_lpm_ipv6.c
index 3352e89d..3352e89d 100644
--- a/app/test-pipeline/pipeline_lpm_ipv6.c
+++ b/test/test-pipeline/pipeline_lpm_ipv6.c
diff --git a/app/test-pipeline/pipeline_stub.c b/test/test-pipeline/pipeline_stub.c
index ba710ca6..ba710ca6 100644
--- a/app/test-pipeline/pipeline_stub.c
+++ b/test/test-pipeline/pipeline_stub.c
diff --git a/app/test-pipeline/runtime.c b/test/test-pipeline/runtime.c
index 42a6142c..8970e1c3 100644
--- a/app/test-pipeline/runtime.c
+++ b/test/test-pipeline/runtime.c
@@ -97,8 +97,8 @@ app_main_loop_rx(void) {
ret = rte_ring_sp_enqueue_bulk(
app.rings_rx[i],
(void **) app.mbuf_rx.array,
- n_mbufs);
- } while (ret < 0);
+ n_mbufs, NULL);
+ } while (ret == 0);
}
}
@@ -121,17 +121,19 @@ app_main_loop_worker(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_rx[i],
(void **) worker_mbuf->array,
- app.burst_size_worker_read);
+ app.burst_size_worker_read,
+ NULL);
- if (ret == -ENOENT)
+ if (ret == 0)
continue;
do {
ret = rte_ring_sp_enqueue_bulk(
app.rings_tx[i ^ 1],
(void **) worker_mbuf->array,
- app.burst_size_worker_write);
- } while (ret < 0);
+ app.burst_size_worker_write,
+ NULL);
+ } while (ret == 0);
}
}
@@ -150,9 +152,10 @@ app_main_loop_tx(void) {
ret = rte_ring_sc_dequeue_bulk(
app.rings_tx[i],
(void **) &app.mbuf_tx[i].array[n_mbufs],
- app.burst_size_tx_read);
+ app.burst_size_tx_read,
+ NULL);
- if (ret == -ENOENT)
+ if (ret == 0)
continue;
n_mbufs += app.burst_size_tx_read;
diff --git a/app/test/Makefile b/test/test/Makefile
index 5be023a0..ee240be4 100644
--- a/app/test/Makefile
+++ b/test/test/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -83,8 +83,8 @@ test_resource.res: test_resource.c
@ cp $< $@
$(eval $(call linked_resource,test_resource_c,test_resource.res))
$(eval $(call linked_tar_resource,test_resource_tar,test_resource.c))
-SRCS-$(CONFIG_RTE_APP_TEST_RESOURCE_TAR) += test_pci.c
-$(eval $(call linked_tar_resource,test_pci_sysfs,test_pci_sysfs))
+SRCS-$(CONFIG_RTE_APP_TEST_RESOURCE_TAR) += test_cfgfile.c
+$(eval $(call linked_tar_resource,test_cfgfiles,test_cfgfiles))
SRCS-y += test_prefetch.c
SRCS-y += test_byteorder.c
SRCS-y += test_per_lcore.c
@@ -123,6 +123,9 @@ SRCS-y += test_logs.c
SRCS-y += test_memcpy.c
SRCS-y += test_memcpy_perf.c
+SRCS-$(CONFIG_RTE_LIBRTE_EFD) += test_efd.c
+SRCS-$(CONFIG_RTE_LIBRTE_EFD) += test_efd_perf.c
+
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_thash.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c
@@ -157,6 +160,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_cirbuf.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_string.c
SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += test_cmdline_lib.c
+SRCS-$(CONFIG_RTE_LIBRTE_NET) += test_crc.c
+
ifeq ($(CONFIG_RTE_LIBRTE_SCHED),y)
SRCS-y += test_red.c
SRCS-y += test_sched.c
@@ -185,9 +190,6 @@ endif
ifeq ($(CONFIG_RTE_LIBRTE_PMD_NULL),y)
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += test_link_bonding_rssconf.c
-ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
-LDLIBS += -lrte_pmd_null
-endif
endif
SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring.c
@@ -197,6 +199,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
+ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
+SRCS-y += test_eventdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += test_eventdev_sw.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += test_eventdev_octeontx.c
+endif
+
SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c
CFLAGS += -O3
@@ -211,21 +219,39 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS_test_memcpy.o += -fno-var-tracking-assignments
CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS_test_eventdev_sw.o += -Wno-missing-field-initializers
+endif
endif
endif
-
-# this application needs libraries first
-DEPDIRS-y += lib drivers
# Link against shared libraries when needed
ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
ifneq ($(CONFIG_RTE_LIBRTE_PMD_RING),y)
$(error Link bonding tests require CONFIG_RTE_LIBRTE_PMD_RING=y)
-else
+endif
+endif
+
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
+LDLIBS += -lrte_pmd_bond
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_NULL),y)
+LDLIBS += -lrte_pmd_null
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_RING),y)
LDLIBS += -lrte_pmd_ring
endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
+LDLIBS += -lrte_pmd_crypto_scheduler
endif
+
endif
ifeq ($(CONFIG_RTE_APP_TEST_RESOURCE_TAR),y)
diff --git a/app/test/autotest.py b/test/test/autotest.py
index b9fd6b6f..5c19a022 100644
--- a/app/test/autotest.py
+++ b/test/test/autotest.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# BSD LICENSE
#
@@ -32,45 +32,47 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Script that uses either test app or qemu controlled by python-pexpect
-
-import sys, autotest_data, autotest_runner
-
+from __future__ import print_function
+import autotest_data
+import autotest_runner
+import sys
def usage():
- print"Usage: autotest.py [test app|test iso image]",
- print "[target] [whitelist|-blacklist]"
+ print("Usage: autotest.py [test app|test iso image] ",
+ "[target] [whitelist|-blacklist]")
if len(sys.argv) < 3:
- usage()
- sys.exit(1)
+ usage()
+ sys.exit(1)
target = sys.argv[2]
-test_whitelist=None
-test_blacklist=None
+test_whitelist = None
+test_blacklist = None
# get blacklist/whitelist
if len(sys.argv) > 3:
- testlist = sys.argv[3].split(',')
- testlist = [test.lower() for test in testlist]
- if testlist[0].startswith('-'):
- testlist[0] = testlist[0].lstrip('-')
- test_blacklist = testlist
- else:
- test_whitelist = testlist
+ testlist = sys.argv[3].split(',')
+ testlist = [test.lower() for test in testlist]
+ if testlist[0].startswith('-'):
+ testlist[0] = testlist[0].lstrip('-')
+ test_blacklist = testlist
+ else:
+ test_whitelist = testlist
-cmdline = "%s -c f -n 4"%(sys.argv[1])
+cmdline = "%s -c f -n 4" % (sys.argv[1])
-print cmdline
+print(cmdline)
-runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist, test_whitelist)
+runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
+ test_whitelist)
for test_group in autotest_data.parallel_test_group_list:
- runner.add_parallel_test_group(test_group)
+ runner.add_parallel_test_group(test_group)
for test_group in autotest_data.non_parallel_test_group_list:
- runner.add_non_parallel_test_group(test_group)
+ runner.add_non_parallel_test_group(test_group)
num_fails = runner.run_all_tests()
diff --git a/test/test/autotest_data.py b/test/test/autotest_data.py
new file mode 100644
index 00000000..165ed6c5
--- /dev/null
+++ b/test/test/autotest_data.py
@@ -0,0 +1,495 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Test data for autotests
+
+from glob import glob
+from autotest_test_funcs import *
+
+
+# quick and dirty function to find out number of sockets
+def num_sockets():
+ result = len(glob("/sys/devices/system/node/node*"))
+ if result == 0:
+ return 1
+ return result
+
+
+# Assign given number to each socket
+# e.g. 32 becomes 32,32 or 32,32,32,32
+def per_sockets(num):
+ return ",".join([str(num)] * num_sockets())
+
+# groups of tests that can be run in parallel
+# the grouping has been found largely empirically
+parallel_test_group_list = [
+ {
+ "Prefix": "group_1",
+ "Memory": per_sockets(8),
+ "Tests":
+ [
+ {
+ "Name": "Cycles autotest",
+ "Command": "cycles_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer autotest",
+ "Command": "timer_autotest",
+ "Func": timer_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Debug autotest",
+ "Command": "debug_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Errno autotest",
+ "Command": "errno_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Meter autotest",
+ "Command": "meter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Common autotest",
+ "Command": "common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Resource autotest",
+ "Command": "resource_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_2",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Memory autotest",
+ "Command": "memory_autotest",
+ "Func": memory_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Read/write lock autotest",
+ "Command": "rwlock_autotest",
+ "Func": rwlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Logs autotest",
+ "Command": "logs_autotest",
+ "Func": logs_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "CPU flags autotest",
+ "Command": "cpuflags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Version autotest",
+ "Command": "version_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL filesystem autotest",
+ "Command": "eal_fs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL flags autotest",
+ "Command": "eal_flags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash autotest",
+ "Command": "hash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ],
+ },
+ {
+ "Prefix": "group_3",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "LPM autotest",
+ "Command": "lpm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM6 autotest",
+ "Command": "lpm6_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy autotest",
+ "Command": "memcpy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memzone autotest",
+ "Command": "memzone_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "String autotest",
+ "Command": "string_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Alarm autotest",
+ "Command": "alarm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_4",
+ "Memory": per_sockets(128),
+ "Tests":
+ [
+ {
+ "Name": "PCI autotest",
+ "Command": "pci_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Malloc autotest",
+ "Command": "malloc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Multi-process autotest",
+ "Command": "multiprocess_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mbuf autotest",
+ "Command": "mbuf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Per-lcore autotest",
+ "Command": "per_lcore_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring autotest",
+ "Command": "ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_5",
+ "Memory": "32",
+ "Tests":
+ [
+ {
+ "Name": "Spinlock autotest",
+ "Command": "spinlock_autotest",
+ "Func": spinlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Byte order autotest",
+ "Command": "byteorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "TAILQ autotest",
+ "Command": "tailq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Command-line autotest",
+ "Command": "cmdline_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Interrupts autotest",
+ "Command": "interrupt_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_6",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Function reentrancy autotest",
+ "Command": "func_reentrancy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool autotest",
+ "Command": "mempool_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Atomics autotest",
+ "Command": "atomic_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Prefetch autotest",
+ "Command": "prefetch_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red autotest",
+ "Command": "red_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "group_7",
+ "Memory": "64",
+ "Tests":
+ [
+ {
+ "Name": "PMD ring autotest",
+ "Command": "ring_pmd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Access list control autotest",
+ "Command": "acl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Sched autotest",
+ "Command": "sched_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+]
+
+# tests that should not be run when any other tests are running
+non_parallel_test_group_list = [
+
+ {
+ "Prefix": "eventdev",
+ "Memory": "512",
+ "Tests":
+ [
+ {
+ "Name": "Eventdev common autotest",
+ "Command": "eventdev_common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "eventdev_sw",
+ "Memory": "512",
+ "Tests":
+ [
+ {
+ "Name": "Eventdev sw autotest",
+ "Command": "eventdev_sw_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "kni",
+ "Memory": "512",
+ "Tests":
+ [
+ {
+ "Name": "KNI autotest",
+ "Command": "kni_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "mempool_perf",
+ "Memory": per_sockets(256),
+ "Tests":
+ [
+ {
+ "Name": "Mempool performance autotest",
+ "Command": "mempool_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "memcpy_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Memcpy performance autotest",
+ "Command": "memcpy_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "hash_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Hash performance autotest",
+ "Command": "hash_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power autotest",
+ "Command": "power_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power_acpi_cpufreq",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power ACPI cpufreq autotest",
+ "Command": "power_acpi_cpufreq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "power_kvm_vm",
+ "Memory": "16",
+ "Tests":
+ [
+ {
+ "Name": "Power KVM VM autotest",
+ "Command": "power_kvm_vm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+ {
+ "Prefix": "timer_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Timer performance autotest",
+ "Command": "timer_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+
+ #
+ # Please always make sure that ring_perf is the last test!
+ #
+ {
+ "Prefix": "ring_perf",
+ "Memory": per_sockets(512),
+ "Tests":
+ [
+ {
+ "Name": "Ring performance autotest",
+ "Command": "ring_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ ]
+ },
+]
diff --git a/test/test/autotest_runner.py b/test/test/autotest_runner.py
new file mode 100644
index 00000000..fc882ec0
--- /dev/null
+++ b/test/test/autotest_runner.py
@@ -0,0 +1,428 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The main logic behind running autotests in parallel
+
+import StringIO
+import csv
+import multiprocessing
+import pexpect
+import re
+import subprocess
+import sys
+import time
+
+# wait for prompt
+
+
+def wait_prompt(child):
+ try:
+ child.sendline()
+ result = child.expect(["RTE>>", pexpect.TIMEOUT, pexpect.EOF],
+ timeout=120)
+ except:
+ return False
+ if result == 0:
+ return True
+ else:
+ return False
+
+# run a test group
+# each result tuple in results list consists of:
+# result value (0 or -1)
+# result string
+# test name
+# total test run time (double)
+# raw test log
+# test report (if not available, should be None)
+#
+# this function needs to be outside AutotestRunner class
+# because otherwise Pool won't work (or rather it will require
+# quite a bit of effort to make it work).
+
+
+def run_test_group(cmdline, test_group):
+ results = []
+ child = None
+ start_time = time.time()
+ startuplog = None
+
+ # run test app
+ try:
+ # prepare logging of init
+ startuplog = StringIO.StringIO()
+
+ print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
+ print >>startuplog, "\ncmdline=%s" % cmdline
+
+ child = pexpect.spawn(cmdline, logfile=startuplog)
+
+ # wait for target to boot
+ if not wait_prompt(child):
+ child.close()
+
+ results.append((-1,
+ "Fail [No prompt]",
+ "Start %s" % test_group["Prefix"],
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+
+ # mark all tests as failed
+ for test in test_group["Tests"]:
+ results.append((-1, "Fail [No prompt]", test["Name"],
+ time.time() - start_time, "", None))
+ # exit test
+ return results
+
+ except:
+ results.append((-1,
+ "Fail [Can't run]",
+ "Start %s" % test_group["Prefix"],
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+
+ # mark all tests as failed
+ for t in test_group["Tests"]:
+ results.append((-1, "Fail [Can't run]", t["Name"],
+ time.time() - start_time, "", None))
+ # exit test
+ return results
+
+ # startup was successful
+ results.append((0, "Success", "Start %s" % test_group["Prefix"],
+ time.time() - start_time, startuplog.getvalue(), None))
+
+ # parse the binary for available test commands
+ binary = cmdline.split()[0]
+ stripped = 'not stripped' not in subprocess.check_output(['file', binary])
+ if not stripped:
+ symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
+ avail_cmds = re.findall('test_register_(\w+)', symbols)
+
+ # run all tests in test group
+ for test in test_group["Tests"]:
+
+ # create log buffer for each test
+ # in multiprocessing environment, the logging would be
+ # interleaved and will create a mess, hence the buffering
+ logfile = StringIO.StringIO()
+ child.logfile = logfile
+
+ result = ()
+
+ # make a note when the test started
+ start_time = time.time()
+
+ try:
+ # print test name to log buffer
+ print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
+
+ # run test function associated with the test
+ if stripped or test["Command"] in avail_cmds:
+ result = test["Func"](child, test["Command"])
+ else:
+ result = (0, "Skipped [Not Available]")
+
+ # make a note when the test was finished
+ end_time = time.time()
+
+ # append test data to the result tuple
+ result += (test["Name"], end_time - start_time,
+ logfile.getvalue())
+
+ # call report function, if any defined, and supply it with
+ # target and complete log for test run
+ if test["Report"]:
+ report = test["Report"](self.target, log)
+
+ # append report to results tuple
+ result += (report,)
+ else:
+ # report is None
+ result += (None,)
+ except:
+ # make a note when the test crashed
+ end_time = time.time()
+
+ # mark test as failed
+ result = (-1, "Fail [Crash]", test["Name"],
+ end_time - start_time, logfile.getvalue(), None)
+ finally:
+ # append the results to the results list
+ results.append(result)
+
+ # regardless of whether test has crashed, try quitting it
+ try:
+ child.sendline("quit")
+ child.close()
+ # if the test crashed, just do nothing instead
+ except:
+ # nop
+ pass
+
+ # return test results
+ return results
+
+
+# class representing an instance of autotests run
+class AutotestRunner:
+ cmdline = ""
+ parallel_test_groups = []
+ non_parallel_test_groups = []
+ logfile = None
+ csvwriter = None
+ target = ""
+ start = None
+ n_tests = 0
+ fails = 0
+ log_buffers = []
+ blacklist = []
+ whitelist = []
+
+ def __init__(self, cmdline, target, blacklist, whitelist):
+ self.cmdline = cmdline
+ self.target = target
+ self.blacklist = blacklist
+ self.whitelist = whitelist
+
+ # log file filename
+ logfile = "%s.log" % target
+ csvfile = "%s.csv" % target
+
+ self.logfile = open(logfile, "w")
+ csvfile = open(csvfile, "w")
+ self.csvwriter = csv.writer(csvfile)
+
+ # prepare results table
+ self.csvwriter.writerow(["test_name", "test_result", "result_str"])
+
+ # set up cmdline string
+ def __get_cmdline(self, test):
+ cmdline = self.cmdline
+
+ # append memory limitations for each test
+ # otherwise tests won't run in parallel
+ if "i686" not in self.target:
+ cmdline += " --socket-mem=%s" % test["Memory"]
+ else:
+ # affinitize startup so that tests don't fail on i686
+ cmdline = "taskset 1 " + cmdline
+ cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
+
+ # set group prefix for autotest group
+ # otherwise they won't run in parallel
+ cmdline += " --file-prefix=%s" % test["Prefix"]
+
+ return cmdline
+
+ def add_parallel_test_group(self, test_group):
+ self.parallel_test_groups.append(test_group)
+
+ def add_non_parallel_test_group(self, test_group):
+ self.non_parallel_test_groups.append(test_group)
+
+ def __process_results(self, results):
+ # this iterates over individual test results
+ for i, result in enumerate(results):
+
+ # increase total number of tests that were run
+ # do not include "start" test
+ if i > 0:
+ self.n_tests += 1
+
+ # unpack result tuple
+ test_result, result_str, test_name, \
+ test_time, log, report = result
+
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
+
+ # print results, test run time and total time since start
+ result = ("%s:" % test_name).ljust(30)
+ result += result_str.ljust(29)
+ result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
+
+ # don't print out total time every line, it's the same anyway
+ if i == len(results) - 1:
+ print(result,
+ "[%02dm %02ds]" % (total_time / 60, total_time % 60))
+ else:
+ print(result)
+
+ # if test failed and it wasn't a "start" test
+ if test_result < 0 and not i == 0:
+ self.fails += 1
+
+ # collect logs
+ self.log_buffers.append(log)
+
+ # create report if it exists
+ if report:
+ try:
+ f = open("%s_%s_report.rst" %
+ (self.target, test_name), "w")
+ except IOError:
+ print("Report for %s could not be created!" % test_name)
+ else:
+ with f:
+ f.write(report)
+
+ # write test result to CSV file
+ if i != 0:
+ self.csvwriter.writerow([test_name, test_result, result_str])
+
+ # this function iterates over test groups and removes each
+ # test that is not in whitelist/blacklist
+ def __filter_groups(self, test_groups):
+ groups_to_remove = []
+
+ # filter out tests from parallel test groups
+ for i, test_group in enumerate(test_groups):
+
+ # iterate over a copy so that we could safely delete individual
+ # tests
+ for test in test_group["Tests"][:]:
+ test_id = test["Command"]
+
+ # dump tests are specified in full e.g. "Dump_mempool"
+ if "_autotest" in test_id:
+ test_id = test_id[:-len("_autotest")]
+
+ # filter out blacklisted/whitelisted tests
+ if self.blacklist and test_id in self.blacklist:
+ test_group["Tests"].remove(test)
+ continue
+ if self.whitelist and test_id not in self.whitelist:
+ test_group["Tests"].remove(test)
+ continue
+
+ # modify or remove original group
+ if len(test_group["Tests"]) > 0:
+ test_groups[i] = test_group
+ else:
+ # remember which groups should be deleted
+ # put the numbers backwards so that we start
+ # deleting from the end, not from the beginning
+ groups_to_remove.insert(0, i)
+
+ # remove test groups that need to be removed
+ for i in groups_to_remove:
+ del test_groups[i]
+
+ return test_groups
+
+ # iterate over test groups and run tests associated with them
+ def run_all_tests(self):
+ # filter groups
+ self.parallel_test_groups = \
+ self.__filter_groups(self.parallel_test_groups)
+ self.non_parallel_test_groups = \
+ self.__filter_groups(self.non_parallel_test_groups)
+
+ # create a pool of worker threads
+ pool = multiprocessing.Pool(processes=1)
+
+ results = []
+
+ # whatever happens, try to save as much logs as possible
+ try:
+
+ # create table header
+ print("")
+ print("Test name".ljust(30), "Test result".ljust(29),
+ "Test".center(9), "Total".center(9))
+ print("=" * 80)
+
+ # make a note of tests start time
+ self.start = time.time()
+
+ # assign worker threads to run test groups
+ for test_group in self.parallel_test_groups:
+ result = pool.apply_async(run_test_group,
+ [self.__get_cmdline(test_group),
+ test_group])
+ results.append(result)
+
+ # iterate while we have group execution results to get
+ while len(results) > 0:
+
+ # iterate over a copy to be able to safely delete results
+ # this iterates over a list of group results
+ for group_result in results[:]:
+
+ # if the thread hasn't finished yet, continue
+ if not group_result.ready():
+ continue
+
+ res = group_result.get()
+
+ self.__process_results(res)
+
+ # remove result from results list once we're done with it
+ results.remove(group_result)
+
+ # run non_parallel tests. they are run one by one, synchronously
+ for test_group in self.non_parallel_test_groups:
+ group_result = run_test_group(
+ self.__get_cmdline(test_group), test_group)
+
+ self.__process_results(group_result)
+
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
+
+ # print out summary
+ print("=" * 80)
+ print("Total run time: %02dm %02ds" % (total_time / 60,
+ total_time % 60))
+ if self.fails != 0:
+ print("Number of failed tests: %s" % str(self.fails))
+
+ # write summary to logfile
+ self.logfile.write("Summary\n")
+ self.logfile.write("Target: ".ljust(15) + "%s\n" % self.target)
+ self.logfile.write("Tests: ".ljust(15) + "%i\n" % self.n_tests)
+ self.logfile.write("Failed tests: ".ljust(
+ 15) + "%i\n" % self.fails)
+ except:
+ print("Exception occurred")
+ print(sys.exc_info())
+ self.fails = 1
+
+ # drop logs from all executions to a logfile
+ for buf in self.log_buffers:
+ self.logfile.write(buf.replace("\r", ""))
+
+ return self.fails
diff --git a/test/test/autotest_test_funcs.py b/test/test/autotest_test_funcs.py
new file mode 100644
index 00000000..8da8fcd7
--- /dev/null
+++ b/test/test/autotest_test_funcs.py
@@ -0,0 +1,295 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Test functions
+
+import pexpect
+
+# default autotest, used to run most tests
+# waits for "Test OK"
+
+
+def default_autotest(child, test_name):
+ child.sendline(test_name)
+ result = child.expect(["Test OK", "Test Failed",
+ "Command not found", pexpect.TIMEOUT], timeout=900)
+ if result == 1:
+ return -1, "Fail"
+ elif result == 2:
+ return -1, "Fail [Not found]"
+ elif result == 3:
+ return -1, "Fail [Timeout]"
+ return 0, "Success"
+
+# autotest used to run dump commands
+# just fires the command
+
+
+def dump_autotest(child, test_name):
+ child.sendline(test_name)
+ return 0, "Success"
+
+# memory autotest
+# reads output and waits for Test OK
+
+
+def memory_autotest(child, test_name):
+ child.sendline(test_name)
+ regexp = "phys:0x[0-9a-f]*, len:([0-9]*), virt:0x[0-9a-f]*, " \
+ "socket_id:[0-9]*"
+ index = child.expect([regexp, pexpect.TIMEOUT], timeout=180)
+ if index != 0:
+ return -1, "Fail [Timeout]"
+ size = int(child.match.groups()[0], 16)
+ if size <= 0:
+ return -1, "Fail [Bad size]"
+ index = child.expect(["Test OK", "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+ return 0, "Success"
+
+
+def spinlock_autotest(child, test_name):
+ i = 0
+ ir = 0
+ child.sendline(test_name)
+ while True:
+ index = child.expect(["Test OK",
+ "Test Failed",
+ "Hello from core ([0-9]*) !",
+ "Hello from within recursive locks "
+ "from ([0-9]*) !",
+ pexpect.TIMEOUT], timeout=5)
+ # ok
+ if index == 0:
+ break
+
+ # message, check ordering
+ elif index == 2:
+ if int(child.match.groups()[0]) < i:
+ return -1, "Fail [Bad order]"
+ i = int(child.match.groups()[0])
+ elif index == 3:
+ if int(child.match.groups()[0]) < ir:
+ return -1, "Fail [Bad order]"
+ ir = int(child.match.groups()[0])
+
+ # fail
+ elif index == 4:
+ return -1, "Fail [Timeout]"
+ elif index == 1:
+ return -1, "Fail"
+
+ return 0, "Success"
+
+
+def rwlock_autotest(child, test_name):
+ i = 0
+ child.sendline(test_name)
+ while True:
+ index = child.expect(["Test OK",
+ "Test Failed",
+ "Hello from core ([0-9]*) !",
+ "Global write lock taken on master "
+ "core ([0-9]*)",
+ pexpect.TIMEOUT], timeout=10)
+ # ok
+ if index == 0:
+ if i != 0xffff:
+ return -1, "Fail [Message is missing]"
+ break
+
+ # message, check ordering
+ elif index == 2:
+ if int(child.match.groups()[0]) < i:
+ return -1, "Fail [Bad order]"
+ i = int(child.match.groups()[0])
+
+ # must be the last message, check ordering
+ elif index == 3:
+ i = 0xffff
+
+ elif index == 4:
+ return -1, "Fail [Timeout]"
+
+ # fail
+ else:
+ return -1, "Fail"
+
+ return 0, "Success"
+
+
+def logs_autotest(child, test_name):
+ child.sendline(test_name)
+
+ log_list = [
+ "TESTAPP1: error message",
+ "TESTAPP1: critical message",
+ "TESTAPP2: critical message",
+ "TESTAPP1: error message",
+ ]
+
+ for log_msg in log_list:
+ index = child.expect([log_msg,
+ "Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ if index == 3:
+ return -1, "Fail [Timeout]"
+ # not ok
+ elif index != 0:
+ return -1, "Fail"
+
+ index = child.expect(["Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ return 0, "Success"
+
+
+def timer_autotest(child, test_name):
+ child.sendline(test_name)
+
+ index = child.expect(["Start timer stress tests",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ index = child.expect(["Start timer stress tests 2",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ index = child.expect(["Start timer basic tests",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=5)
+
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ lcore_tim0 = -1
+ lcore_tim1 = -1
+ lcore_tim2 = -1
+ lcore_tim3 = -1
+
+ while True:
+ index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) "
+ "count=([0-9]*) on core ([0-9]*)",
+ "Test OK",
+ "Test Failed",
+ pexpect.TIMEOUT], timeout=10)
+
+ if index == 1:
+ break
+
+ if index == 2:
+ return -1, "Fail"
+ elif index == 3:
+ return -1, "Fail [Timeout]"
+
+ try:
+ id = int(child.match.groups()[1])
+ cnt = int(child.match.groups()[2])
+ lcore = int(child.match.groups()[3])
+ except:
+ return -1, "Fail [Cannot parse]"
+
+ # timer0 always expires on the same core when cnt < 20
+ if id == 0:
+ if lcore_tim0 == -1:
+ lcore_tim0 = lcore
+ elif lcore != lcore_tim0 and cnt < 20:
+ return -1, "Fail [lcore != lcore_tim0 (%d, %d)]" \
+ % (lcore, lcore_tim0)
+ if cnt > 21:
+ return -1, "Fail [tim0 cnt > 21]"
+
+ # timer1 each time expires on a different core
+ if id == 1:
+ if lcore == lcore_tim1:
+ return -1, "Fail [lcore == lcore_tim1 (%d, %d)]" \
+ % (lcore, lcore_tim1)
+ lcore_tim1 = lcore
+ if cnt > 10:
+ return -1, "Fail [tim1 cnt > 30]"
+
+ # timer0 always expires on the same core
+ if id == 2:
+ if lcore_tim2 == -1:
+ lcore_tim2 = lcore
+ elif lcore != lcore_tim2:
+ return -1, "Fail [lcore != lcore_tim2 (%d, %d)]" \
+ % (lcore, lcore_tim2)
+ if cnt > 30:
+ return -1, "Fail [tim2 cnt > 30]"
+
+ # timer0 always expires on the same core
+ if id == 3:
+ if lcore_tim3 == -1:
+ lcore_tim3 = lcore
+ elif lcore != lcore_tim3:
+ return -1, "Fail [lcore_tim3 changed (%d -> %d)]" \
+ % (lcore, lcore_tim3)
+ if cnt > 30:
+ return -1, "Fail [tim3 cnt > 30]"
+
+ # must be 2 different cores
+ if lcore_tim0 == lcore_tim3:
+ return -1, "Fail [lcore_tim0 (%d) == lcore_tim3 (%d)]" \
+ % (lcore_tim0, lcore_tim3)
+
+ return 0, "Success"
+
+
+def ring_autotest(child, test_name):
+ child.sendline(test_name)
+ index = child.expect(["Test OK", "Test Failed",
+ pexpect.TIMEOUT], timeout=2)
+ if index == 1:
+ return -1, "Fail"
+ elif index == 2:
+ return -1, "Fail [Timeout]"
+
+ return 0, "Success"
diff --git a/app/test/commands.c b/test/test/commands.c
index 2df46b05..4097a331 100644
--- a/app/test/commands.c
+++ b/test/test/commands.c
@@ -158,13 +158,15 @@ static void cmd_dump_parsed(void *parsed_result,
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
rte_eal_devargs_dump(stdout);
+ else if (!strcmp(res->dump, "dump_log_types"))
+ rte_log_dump(stdout);
}
cmdline_parse_token_string_t cmd_dump_dump =
TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
"dump_physmem#dump_memzone#"
"dump_struct_sizes#dump_ring#dump_mempool#"
- "dump_devargs");
+ "dump_devargs#dump_log_types");
cmdline_parse_inst_t cmd_dump = {
.f = cmd_dump_parsed, /* function to call */
@@ -228,57 +230,6 @@ cmdline_parse_inst_t cmd_dump_one = {
/****************/
-struct cmd_set_ring_result {
- cmdline_fixed_string_t set;
- cmdline_fixed_string_t name;
- uint32_t value;
-};
-
-static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_set_ring_result *res = parsed_result;
- struct rte_ring *r;
- int ret;
-
- r = rte_ring_lookup(res->name);
- if (r == NULL) {
- cmdline_printf(cl, "Cannot find ring\n");
- return;
- }
-
- if (!strcmp(res->set, "set_watermark")) {
- ret = rte_ring_set_water_mark(r, res->value);
- if (ret != 0)
- cmdline_printf(cl, "Cannot set water mark\n");
- }
-}
-
-cmdline_parse_token_string_t cmd_set_ring_set =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
- "set_watermark");
-
-cmdline_parse_token_string_t cmd_set_ring_name =
- TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
-
-cmdline_parse_token_num_t cmd_set_ring_value =
- TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
-
-cmdline_parse_inst_t cmd_set_ring = {
- .f = cmd_set_ring_parsed, /* function to call */
- .data = NULL, /* 2nd arg of func */
- .help_str = "set watermark: "
- "set_watermark <ring_name> <value>",
- .tokens = { /* token list, NULL terminated */
- (void *)&cmd_set_ring_set,
- (void *)&cmd_set_ring_name,
- (void *)&cmd_set_ring_value,
- NULL,
- },
-};
-
-/****************/
-
struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
@@ -419,7 +370,6 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_autotest,
(cmdline_parse_inst_t *)&cmd_dump,
(cmdline_parse_inst_t *)&cmd_dump_one,
- (cmdline_parse_inst_t *)&cmd_set_ring,
(cmdline_parse_inst_t *)&cmd_quit,
(cmdline_parse_inst_t *)&cmd_set_rxtx,
(cmdline_parse_inst_t *)&cmd_set_rxtx_anchor,
diff --git a/app/test/packet_burst_generator.c b/test/test/packet_burst_generator.c
index a93c3b59..a93c3b59 100644
--- a/app/test/packet_burst_generator.c
+++ b/test/test/packet_burst_generator.c
diff --git a/app/test/packet_burst_generator.h b/test/test/packet_burst_generator.h
index edc10441..edc10441 100644
--- a/app/test/packet_burst_generator.h
+++ b/test/test/packet_burst_generator.h
diff --git a/app/test/process.h b/test/test/process.h
index 4f8d1211..4f8d1211 100644
--- a/app/test/process.h
+++ b/test/test/process.h
diff --git a/app/test/resource.c b/test/test/resource.c
index 0e2b62cd..0e2b62cd 100644
--- a/app/test/resource.c
+++ b/test/test/resource.c
diff --git a/app/test/resource.h b/test/test/resource.h
index 1e961221..1e961221 100644
--- a/app/test/resource.h
+++ b/test/test/resource.h
diff --git a/app/test/test.c b/test/test/test.c
index cd0e7845..c561eb56 100644
--- a/app/test/test.c
+++ b/test/test/test.c
@@ -157,7 +157,9 @@ int
unit_test_suite_runner(struct unit_test_suite *suite)
{
int test_success;
- unsigned total = 0, executed = 0, skipped = 0, succeeded = 0, failed = 0;
+ unsigned int total = 0, executed = 0, skipped = 0;
+ unsigned int succeeded = 0, failed = 0, unsupported = 0;
+ const char *status;
if (suite->suite_name) {
printf(" + ------------------------------------------------------- +\n");
@@ -190,8 +192,12 @@ unit_test_suite_runner(struct unit_test_suite *suite)
test_success = suite->unit_test_cases[total].testcase();
if (test_success == TEST_SUCCESS)
succeeded++;
+ else if (test_success == -ENOTSUP)
+ unsupported++;
else
failed++;
+ } else if (test_success == -ENOTSUP) {
+ unsupported++;
} else {
failed++;
}
@@ -201,15 +207,14 @@ unit_test_suite_runner(struct unit_test_suite *suite)
suite->unit_test_cases[total].teardown();
if (test_success == TEST_SUCCESS)
- printf(" + TestCase [%2d] : %s\n", total,
- suite->unit_test_cases[total].success_msg ?
- suite->unit_test_cases[total].success_msg :
- "passed");
+ status = "succeeded";
+ else if (test_success == -ENOTSUP)
+ status = "unsupported";
else
- printf(" + TestCase [%2d] : %s\n", total,
- suite->unit_test_cases[total].fail_msg ?
- suite->unit_test_cases[total].fail_msg :
- "failed");
+ status = "failed";
+
+ printf(" + TestCase [%2d] : %s %s\n", total,
+ suite->unit_test_cases[total].name, status);
total++;
}
@@ -226,6 +231,7 @@ suite_summary:
printf(" + Tests Total : %2d\n", total);
printf(" + Tests Skipped : %2d\n", skipped);
printf(" + Tests Executed : %2d\n", executed);
+ printf(" + Tests Unsupported: %2d\n", unsupported);
printf(" + Tests Passed : %2d\n", succeeded);
printf(" + Tests Failed : %2d\n", failed);
printf(" + ------------------------------------------------------- +\n");
diff --git a/app/test/test.h b/test/test/test.h
index 82831f4e..08ffe949 100644
--- a/app/test/test.h
+++ b/test/test/test.h
@@ -185,29 +185,24 @@ struct unit_test_case {
int (*setup)(void);
void (*teardown)(void);
int (*testcase)(void);
- const char *success_msg;
- const char *fail_msg;
+ const char *name;
unsigned enabled;
};
-#define TEST_CASE(fn) { NULL, NULL, fn, #fn " succeeded", #fn " failed", 1 }
+#define TEST_CASE(fn) { NULL, NULL, fn, #fn, 1 }
-#define TEST_CASE_NAMED(name, fn) { NULL, NULL, fn, name " succeeded", \
- name " failed", 1 }
+#define TEST_CASE_NAMED(name, fn) { NULL, NULL, fn, name, 1 }
-#define TEST_CASE_ST(setup, teardown, testcase) \
- { setup, teardown, testcase, #testcase " succeeded", \
- #testcase " failed ", 1 }
+#define TEST_CASE_ST(setup, teardown, testcase) \
+ { setup, teardown, testcase, #testcase, 1 }
-#define TEST_CASE_DISABLED(fn) { NULL, NULL, fn, #fn " succeeded", \
- #fn " failed", 0 }
+#define TEST_CASE_DISABLED(fn) { NULL, NULL, fn, #fn, 0 }
-#define TEST_CASE_ST_DISABLED(setup, teardown, testcase) \
- { setup, teardown, testcase, #testcase " succeeded", \
- #testcase " failed ", 0 }
+#define TEST_CASE_ST_DISABLED(setup, teardown, testcase) \
+ { setup, teardown, testcase, #testcase, 0 }
-#define TEST_CASES_END() { NULL, NULL, NULL, NULL, NULL, 0 }
+#define TEST_CASES_END() { NULL, NULL, NULL, NULL, 0 }
#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG
#define TEST_HEXDUMP(file, title, buf, len) rte_hexdump(file, title, buf, len)
@@ -233,9 +228,6 @@ extern const char *prgname;
int commands_init(void);
-int test_pci(void);
-int test_pci_run;
-
int test_mp_secondary(void);
int test_set_rxtx_conf(cmdline_fixed_string_t mode);
diff --git a/app/test/test_acl.c b/test/test/test_acl.c
index 28955f08..c6b511fb 100644
--- a/app/test/test_acl.c
+++ b/test/test/test_acl.c
@@ -1357,19 +1357,6 @@ test_invalid_rules(void)
goto err;
}
- rule.dst_mask_len = 0;
- rule.src_mask_len = 0;
- rule.data.userdata = 0;
-
- /* try adding this rule (it should fail because userdata is invalid) */
- ret = rte_acl_ipv4vlan_add_rules(acx, &rule, 1);
- if (ret == 0) {
- printf("Line %i: Adding a rule with invalid user data "
- "should have failed!\n", __LINE__);
- rte_acl_free(acx);
- return -1;
- }
-
rte_acl_free(acx);
return 0;
@@ -1515,26 +1502,6 @@ test_invalid_parameters(void)
/* free ACL context */
rte_acl_free(acx);
- /* set wrong rule_size so that adding any rules would fail */
- param.rule_size = RTE_ACL_IPV4VLAN_RULE_SZ + 4;
- acx = rte_acl_create(&param);
- if (acx == NULL) {
- printf("Line %i: ACL context creation failed!\n", __LINE__);
- return -1;
- }
-
- /* try adding a rule with size different from context rule_size */
- result = rte_acl_ipv4vlan_add_rules(acx, &rule, 1);
- if (result == 0) {
- printf("Line %i: Adding an invalid sized rule "
- "should have failed!\n", __LINE__);
- rte_acl_free(acx);
- return -1;
- }
-
- /* free ACL context */
- rte_acl_free(acx);
-
/**
* rte_acl_ipv4vlan_build
diff --git a/app/test/test_acl.h b/test/test/test_acl.h
index 421f3109..421f3109 100644
--- a/app/test/test_acl.h
+++ b/test/test/test_acl.h
diff --git a/app/test/test_alarm.c b/test/test/test_alarm.c
index ecb2f6d4..ecb2f6d4 100644
--- a/app/test/test_alarm.c
+++ b/test/test/test_alarm.c
diff --git a/app/test/test_atomic.c b/test/test/test_atomic.c
index b5e7e1b7..b5e7e1b7 100644
--- a/app/test/test_atomic.c
+++ b/test/test/test_atomic.c
diff --git a/app/test/test_byteorder.c b/test/test/test_byteorder.c
index 8ae31142..8ae31142 100644
--- a/app/test/test_byteorder.c
+++ b/test/test/test_byteorder.c
diff --git a/test/test/test_cfgfile.c b/test/test/test_cfgfile.c
new file mode 100644
index 00000000..4cc9b14d
--- /dev/null
+++ b/test/test/test_cfgfile.c
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Wind River Systems Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_cfgfile.h>
+
+#include "test.h"
+#include "resource.h"
+
+
+#define CFG_FILES_ETC "test_cfgfiles/etc"
+
+REGISTER_LINKED_RESOURCE(test_cfgfiles);
+
+static int
+test_cfgfile_setup(void)
+{
+ const struct resource *r;
+ int ret;
+
+ r = resource_find("test_cfgfiles");
+ TEST_ASSERT_NOT_NULL(r, "missing resource test_cfgfiles");
+
+ ret = resource_untar(r);
+ TEST_ASSERT_SUCCESS(ret, "failed to untar %s", r->name);
+
+ return 0;
+}
+
+static int
+test_cfgfile_cleanup(void)
+{
+ const struct resource *r;
+ int ret;
+
+ r = resource_find("test_cfgfiles");
+ TEST_ASSERT_NOT_NULL(r, "missing resource test_cfgfiles");
+
+ ret = resource_rm_by_tar(r);
+ TEST_ASSERT_SUCCESS(ret, "Failed to delete resource %s", r->name);
+
+ return 0;
+}
+
+static int
+_test_cfgfile_sample(struct rte_cfgfile *cfgfile)
+{
+ const char *value;
+ int ret;
+
+ ret = rte_cfgfile_num_sections(cfgfile, NULL, 0);
+ TEST_ASSERT(ret == 2, "Unexpected number of sections: %d", ret);
+
+ ret = rte_cfgfile_has_section(cfgfile, "section1");
+ TEST_ASSERT(ret, "section1 section missing");
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "section1");
+ TEST_ASSERT(ret == 1, "section1 unexpected number of entries: %d", ret);
+
+ value = rte_cfgfile_get_entry(cfgfile, "section1", "key1");
+ TEST_ASSERT(strcmp("value1", value) == 0,
+ "key1 unexpected value: %s", value);
+
+ ret = rte_cfgfile_has_section(cfgfile, "section2");
+ TEST_ASSERT(ret, "section2 section missing");
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "section2");
+ TEST_ASSERT(ret == 2, "section2 unexpected number of entries: %d", ret);
+
+ value = rte_cfgfile_get_entry(cfgfile, "section2", "key2");
+ TEST_ASSERT(strcmp("value2", value) == 0,
+ "key2 unexpected value: %s", value);
+
+ value = rte_cfgfile_get_entry(cfgfile, "section2", "key3");
+ TEST_ASSERT(strcmp("value3", value) == 0,
+ "key3 unexpected value: %s", value);
+
+ return 0;
+}
+
+static int
+test_cfgfile_sample1(void)
+{
+ struct rte_cfgfile *cfgfile;
+ int ret;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/sample1.ini", 0);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to load config file");
+
+ ret = _test_cfgfile_sample(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to validate sample file: %d", ret);
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
+test_cfgfile_sample2(void)
+{
+ struct rte_cfgfile_parameters params;
+ struct rte_cfgfile *cfgfile;
+ int ret;
+
+ /* override comment character */
+ memset(&params, 0, sizeof(params));
+ params.comment_character = '#';
+
+ cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0,
+ &params);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to parse sample2.ini");
+
+ ret = _test_cfgfile_sample(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to validate sample file: %d", ret);
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
+test_cfgfile_invalid_section_header(void)
+{
+ struct rte_cfgfile *cfgfile;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/invalid_section.ini", 0);
+ TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
+
+ return 0;
+}
+
+static int
+test_cfgfile_invalid_comment(void)
+{
+ struct rte_cfgfile_parameters params;
+ struct rte_cfgfile *cfgfile;
+
+ /* override comment character with an invalid one */
+ memset(&params, 0, sizeof(params));
+ params.comment_character = '$';
+
+ cfgfile = rte_cfgfile_load_with_params(CFG_FILES_ETC "/sample2.ini", 0,
+ &params);
+ TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
+
+ return 0;
+}
+
+static int
+test_cfgfile_invalid_key_value_pair(void)
+{
+ struct rte_cfgfile *cfgfile;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini", 0);
+ TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
+
+ return 0;
+}
+
+static int
+test_cfgfile_empty_key_value_pair(void)
+{
+ struct rte_cfgfile *cfgfile;
+ const char *value;
+ int ret;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty_key_value.ini",
+ CFG_FLAG_EMPTY_VALUES);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to parse empty_key_value.ini");
+
+ ret = rte_cfgfile_num_sections(cfgfile, NULL, 0);
+ TEST_ASSERT(ret == 1, "Unexpected number of sections: %d", ret);
+
+ ret = rte_cfgfile_has_section(cfgfile, "section1");
+ TEST_ASSERT(ret, "section1 missing");
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "section1");
+ TEST_ASSERT(ret == 1, "section1 unexpected number of entries: %d", ret);
+
+ value = rte_cfgfile_get_entry(cfgfile, "section1", "key");
+ TEST_ASSERT(strlen(value) == 0, "key unexpected value: %s", value);
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
+test_cfgfile_missing_section(void)
+{
+ struct rte_cfgfile *cfgfile;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini", 0);
+ TEST_ASSERT_NULL(cfgfile, "Expected failured did not occur");
+
+ return 0;
+}
+
+static int
+test_cfgfile_global_properties(void)
+{
+ struct rte_cfgfile *cfgfile;
+ const char *value;
+ int ret;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/missing_section.ini",
+ CFG_FLAG_GLOBAL_SECTION);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to load config file");
+
+ ret = rte_cfgfile_num_sections(cfgfile, NULL, 0);
+ TEST_ASSERT(ret == 1, "Unexpected number of sections: %d", ret);
+
+ ret = rte_cfgfile_has_section(cfgfile, "GLOBAL");
+ TEST_ASSERT(ret, "global section missing");
+
+ ret = rte_cfgfile_section_num_entries(cfgfile, "GLOBAL");
+ TEST_ASSERT(ret == 1, "GLOBAL unexpected number of entries: %d", ret);
+
+ value = rte_cfgfile_get_entry(cfgfile, "GLOBAL", "key");
+ TEST_ASSERT(strcmp("value", value) == 0,
+ "key unexpected value: %s", value);
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
+test_cfgfile_empty_file(void)
+{
+ struct rte_cfgfile *cfgfile;
+ int ret;
+
+ cfgfile = rte_cfgfile_load(CFG_FILES_ETC "/empty.ini", 0);
+ TEST_ASSERT_NOT_NULL(cfgfile, "Failed to load config file");
+
+ ret = rte_cfgfile_num_sections(cfgfile, NULL, 0);
+ TEST_ASSERT(ret == 0, "Unexpected number of sections: %d", ret);
+
+ ret = rte_cfgfile_close(cfgfile);
+ TEST_ASSERT_SUCCESS(ret, "Failed to close cfgfile");
+
+ return 0;
+}
+
+static int
+test_cfgfile(void)
+{
+ if (test_cfgfile_setup())
+ return -1;
+
+ if (test_cfgfile_sample1())
+ return -1;
+
+ if (test_cfgfile_sample2())
+ return -1;
+
+ if (test_cfgfile_invalid_section_header())
+ return -1;
+
+ if (test_cfgfile_invalid_comment())
+ return -1;
+
+ if (test_cfgfile_invalid_key_value_pair())
+ return -1;
+
+ if (test_cfgfile_empty_key_value_pair())
+ return -1;
+
+ if (test_cfgfile_missing_section())
+ return -1;
+
+ if (test_cfgfile_global_properties())
+ return -1;
+
+ if (test_cfgfile_empty_file())
+ return -1;
+
+ if (test_cfgfile_cleanup())
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(cfgfile_autotest, test_cfgfile);
diff --git a/test/test/test_cfgfiles/etc/empty.ini b/test/test/test_cfgfiles/etc/empty.ini
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/empty.ini
diff --git a/test/test/test_cfgfiles/etc/empty_key_value.ini b/test/test/test_cfgfiles/etc/empty_key_value.ini
new file mode 100644
index 00000000..53284467
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/empty_key_value.ini
@@ -0,0 +1,3 @@
+[section1]
+; this is section 1
+key=
diff --git a/test/test/test_cfgfiles/etc/invalid_section.ini b/test/test/test_cfgfiles/etc/invalid_section.ini
new file mode 100644
index 00000000..95d68039
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/invalid_section.ini
@@ -0,0 +1,3 @@
+[invalid
+; this is section 1
+key1=value1
diff --git a/test/test/test_cfgfiles/etc/line_too_long.ini b/test/test/test_cfgfiles/etc/line_too_long.ini
new file mode 100644
index 00000000..1dce1648
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/line_too_long.ini
@@ -0,0 +1,3 @@
+[section1]
+; this is section 1
+012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
diff --git a/test/test/test_cfgfiles/etc/missing_section.ini b/test/test/test_cfgfiles/etc/missing_section.ini
new file mode 100644
index 00000000..c78e131b
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/missing_section.ini
@@ -0,0 +1,2 @@
+; no section
+key=value
diff --git a/test/test/test_cfgfiles/etc/sample1.ini b/test/test/test_cfgfiles/etc/sample1.ini
new file mode 100644
index 00000000..aef91c24
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/sample1.ini
@@ -0,0 +1,12 @@
+; this is a global comment
+
+[section1]
+; this is section 1
+key1=value1
+
+[section2]
+; this is section 2
+;key1=value1
+key2=value2
+key3=value3 ; this is key3
+ignore-missing-separator
diff --git a/test/test/test_cfgfiles/etc/sample2.ini b/test/test/test_cfgfiles/etc/sample2.ini
new file mode 100644
index 00000000..21075e97
--- /dev/null
+++ b/test/test/test_cfgfiles/etc/sample2.ini
@@ -0,0 +1,12 @@
+# this is a global comment
+
+[section1]
+# this is section 1
+key1=value1
+
+[section2]
+# this is section 2
+#key1=value1
+key2=value2
+key3=value3 # this is key3
+ignore-missing-separator
diff --git a/app/test/test_cmdline.c b/test/test/test_cmdline.c
index 38c7256f..38c7256f 100644
--- a/app/test/test_cmdline.c
+++ b/test/test/test_cmdline.c
diff --git a/app/test/test_cmdline.h b/test/test/test_cmdline.h
index 0ee91c17..0ee91c17 100644
--- a/app/test/test_cmdline.h
+++ b/test/test/test_cmdline.h
diff --git a/app/test/test_cmdline_cirbuf.c b/test/test/test_cmdline_cirbuf.c
index 87f83cc6..87f83cc6 100644
--- a/app/test/test_cmdline_cirbuf.c
+++ b/test/test/test_cmdline_cirbuf.c
diff --git a/app/test/test_cmdline_etheraddr.c b/test/test/test_cmdline_etheraddr.c
index e4f42317..e4f42317 100644
--- a/app/test/test_cmdline_etheraddr.c
+++ b/test/test/test_cmdline_etheraddr.c
diff --git a/app/test/test_cmdline_ipaddr.c b/test/test/test_cmdline_ipaddr.c
index 471d2ff1..471d2ff1 100644
--- a/app/test/test_cmdline_ipaddr.c
+++ b/test/test/test_cmdline_ipaddr.c
diff --git a/app/test/test_cmdline_lib.c b/test/test/test_cmdline_lib.c
index 65b823a7..65b823a7 100644
--- a/app/test/test_cmdline_lib.c
+++ b/test/test/test_cmdline_lib.c
diff --git a/app/test/test_cmdline_num.c b/test/test/test_cmdline_num.c
index 04263d39..e8f60cfa 100644
--- a/app/test/test_cmdline_num.c
+++ b/test/test/test_cmdline_num.c
@@ -315,6 +315,7 @@ can_parse_signed(int64_t expected_result, enum cmdline_numtype type)
case UINT64:
if (expected_result < 0)
return 0;
+ break;
case INT8:
if (expected_result > INT8_MAX || expected_result < INT8_MIN)
return 0;
diff --git a/app/test/test_cmdline_portlist.c b/test/test/test_cmdline_portlist.c
index b9664b0e..b9664b0e 100644
--- a/app/test/test_cmdline_portlist.c
+++ b/test/test/test_cmdline_portlist.c
diff --git a/app/test/test_cmdline_string.c b/test/test/test_cmdline_string.c
index c5bb9c0c..c5bb9c0c 100644
--- a/app/test/test_cmdline_string.c
+++ b/test/test/test_cmdline_string.c
diff --git a/app/test/test_common.c b/test/test/test_common.c
index 8effa2f9..8effa2f9 100644
--- a/app/test/test_common.c
+++ b/test/test/test_common.c
diff --git a/app/test/test_cpuflags.c b/test/test/test_cpuflags.c
index 0e5ebe78..0e5ebe78 100644
--- a/app/test/test_cpuflags.c
+++ b/test/test/test_cpuflags.c
diff --git a/test/test/test_crc.c b/test/test/test_crc.c
new file mode 100644
index 00000000..cd5af69a
--- /dev/null
+++ b/test/test/test_crc.c
@@ -0,0 +1,184 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_net_crc.h>
+
+#define CRC_VEC_LEN 32
+#define CRC32_VEC_LEN1 1512
+#define CRC32_VEC_LEN2 348
+#define CRC16_VEC_LEN1 12
+#define CRC16_VEC_LEN2 2
+#define LINE_LEN 75
+
+/* CRC test vector */
+static const uint8_t crc_vec[CRC_VEC_LEN] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
+ 'g', 'h', 'i', 'j', 'A', 'B', 'C', 'D',
+ 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L',
+};
+
+/* 32-bit CRC test vector */
+static const uint8_t crc32_vec1[12] = {
+ 0xBE, 0xD7, 0x23, 0x47, 0x6B, 0x8F,
+ 0xB3, 0x14, 0x5E, 0xFB, 0x35, 0x59,
+};
+
+/* 16-bit CRC test vector 1 */
+static const uint8_t crc16_vec1[CRC16_VEC_LEN1] = {
+ 0x0D, 0x01, 0x01, 0x23, 0x45, 0x67,
+ 0x89, 0x01, 0x23, 0x45, 0x00, 0x01,
+};
+
+/* 16-bit CRC test vector 2 */
+static const uint8_t crc16_vec2[CRC16_VEC_LEN2] = {
+ 0x03, 0x3f,
+};
+/** CRC results */
+static const uint32_t crc32_vec_res = 0xb491aab4;
+static const uint32_t crc32_vec1_res = 0xac54d294;
+static const uint32_t crc32_vec2_res = 0xefaae02f;
+static const uint32_t crc16_vec_res = 0x6bec;
+static const uint16_t crc16_vec1_res = 0x8cdd;
+static const uint16_t crc16_vec2_res = 0xec5b;
+
+static int
+crc_calc(const uint8_t *vec,
+ uint32_t vec_len,
+ enum rte_net_crc_type type)
+{
+ /* compute CRC */
+ uint32_t ret = rte_net_crc_calc(vec, vec_len, type);
+
+ /* dump data on console */
+ TEST_HEXDUMP(stdout, NULL, vec, vec_len);
+
+ return ret;
+}
+
+static int
+test_crc_calc(void)
+{
+ uint32_t i;
+ enum rte_net_crc_type type;
+ uint8_t *test_data;
+ uint32_t result;
+ int error;
+
+ /* 32-bit ethernet CRC: Test 1 */
+ type = RTE_NET_CRC32_ETH;
+
+ result = crc_calc(crc_vec, CRC_VEC_LEN, type);
+ if (result != crc32_vec_res)
+ return -1;
+
+ /* 32-bit ethernet CRC: Test 2 */
+ test_data = rte_zmalloc(NULL, CRC32_VEC_LEN1, 0);
+
+ for (i = 0; i < CRC32_VEC_LEN1; i += 12)
+ rte_memcpy(&test_data[i], crc32_vec1, 12);
+
+ result = crc_calc(test_data, CRC32_VEC_LEN1, type);
+ if (result != crc32_vec1_res) {
+ error = -2;
+ goto fail;
+ }
+
+ /* 32-bit ethernet CRC: Test 3 */
+ for (i = 0; i < CRC32_VEC_LEN2; i += 12)
+ rte_memcpy(&test_data[i], crc32_vec1, 12);
+
+ result = crc_calc(test_data, CRC32_VEC_LEN2, type);
+ if (result != crc32_vec2_res) {
+ error = -3;
+ goto fail;
+ }
+
+ /* 16-bit CCITT CRC: Test 4 */
+ type = RTE_NET_CRC16_CCITT;
+ result = crc_calc(crc_vec, CRC_VEC_LEN, type);
+ if (result != crc16_vec_res) {
+ error = -4;
+ goto fail;
+ }
+ /* 16-bit CCITT CRC: Test 5 */
+ result = crc_calc(crc16_vec1, CRC16_VEC_LEN1, type);
+ if (result != crc16_vec1_res) {
+ error = -5;
+ goto fail;
+ }
+ /* 16-bit CCITT CRC: Test 6 */
+ result = crc_calc(crc16_vec2, CRC16_VEC_LEN2, type);
+ if (result != crc16_vec2_res) {
+ error = -6;
+ goto fail;
+ }
+
+ rte_free(test_data);
+ return 0;
+
+fail:
+ rte_free(test_data);
+ return error;
+}
+
+static int
+test_crc(void)
+{
+ int ret;
+ /* set CRC scalar mode */
+ rte_net_crc_set_alg(RTE_NET_CRC_SCALAR);
+
+ ret = test_crc_calc();
+ if (ret < 0) {
+ printf("test_crc (scalar): failed (%d)\n", ret);
+ return ret;
+ }
+ /* set CRC sse4.2 mode */
+ rte_net_crc_set_alg(RTE_NET_CRC_SSE42);
+
+ ret = test_crc_calc();
+ if (ret < 0) {
+ printf("test_crc (x86_64_SSE4.2): failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(crc_autotest, test_crc);
diff --git a/app/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 872f8b43..029ce8a0 100644
--- a/app/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -40,6 +40,11 @@
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#include <rte_cryptodev_scheduler_operations.h>
+#endif
+
#include "test.h"
#include "test_cryptodev.h"
@@ -52,7 +57,6 @@
#include "test_cryptodev_snow3g_test_vectors.h"
#include "test_cryptodev_snow3g_hash_test_vectors.h"
#include "test_cryptodev_zuc_test_vectors.h"
-#include "test_cryptodev_zuc_hash_test_vectors.h"
#include "test_cryptodev_gcm_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
@@ -159,7 +163,7 @@ testsuite_setup(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct rte_cryptodev_info info;
- unsigned i, nb_devs, dev_id;
+ uint32_t i = 0, nb_devs, dev_id;
int ret;
uint16_t qp_id;
@@ -205,7 +209,7 @@ testsuite_setup(void)
return TEST_FAILED;
}
- /* Create 2 AESNI MB devices if required */
+ /* Create an AESNI MB device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_MB_PMD) {
#ifndef RTE_LIBRTE_PMD_AESNI_MB
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
@@ -214,20 +218,18 @@ testsuite_setup(void)
#endif
nb_devs = rte_cryptodev_count_devtype(
RTE_CRYPTODEV_AESNI_MB_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
}
}
- /* Create 2 AESNI GCM devices if required */
+ /* Create an AESNI GCM device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_AESNI_GCM_PMD) {
#ifndef RTE_LIBRTE_PMD_AESNI_GCM
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM must be"
@@ -236,18 +238,16 @@ testsuite_setup(void)
#endif
nb_devs = rte_cryptodev_count_devtype(
RTE_CRYPTODEV_AESNI_GCM_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL),
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
- }
+ if (nb_devs < 1) {
+ TEST_ASSERT_SUCCESS(rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL),
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
}
}
- /* Create 2 SNOW 3G devices if required */
+ /* Create a SNOW 3G device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_SNOW3G_PMD) {
#ifndef RTE_LIBRTE_PMD_SNOW3G
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_SNOW3G must be"
@@ -255,18 +255,16 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL),
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
- }
+ if (nb_devs < 1) {
+ TEST_ASSERT_SUCCESS(rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL),
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
}
}
- /* Create 2 KASUMI devices if required */
+ /* Create a KASUMI device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_KASUMI_PMD) {
#ifndef RTE_LIBRTE_PMD_KASUMI
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_KASUMI must be"
@@ -274,18 +272,16 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_KASUMI_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), NULL),
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_KASUMI_PMD));
- }
+ if (nb_devs < 1) {
+ TEST_ASSERT_SUCCESS(rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), NULL),
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD));
}
}
- /* Create 2 ZUC devices if required */
+ /* Create a ZUC device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_ZUC_PMD) {
#ifndef RTE_LIBRTE_PMD_ZUC
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ZUC must be"
@@ -293,18 +289,16 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_ZUC_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- TEST_ASSERT_SUCCESS(rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), NULL),
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_ZUC_PMD));
- }
+ if (nb_devs < 1) {
+ TEST_ASSERT_SUCCESS(rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), NULL),
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD));
}
}
- /* Create 2 NULL devices if required */
+ /* Create a NULL device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_NULL_PMD) {
#ifndef RTE_LIBRTE_PMD_NULL_CRYPTO
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO must be"
@@ -313,20 +307,18 @@ testsuite_setup(void)
#endif
nb_devs = rte_cryptodev_count_devtype(
RTE_CRYPTODEV_NULL_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- int dev_id = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
-
- TEST_ASSERT(dev_id >= 0,
- "Failed to create instance %u of"
- " pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_NULL_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance of"
+ " pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD));
}
}
- /* Create 2 OPENSSL devices if required */
+ /* Create an OPENSSL device if required */
if (gbl_cryptodev_type == RTE_CRYPTODEV_OPENSSL_PMD) {
#ifndef RTE_LIBRTE_PMD_OPENSSL
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_OPENSSL must be"
@@ -335,19 +327,60 @@ testsuite_setup(void)
#endif
nb_devs = rte_cryptodev_count_devtype(
RTE_CRYPTODEV_OPENSSL_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
- NULL);
-
- TEST_ASSERT(ret == 0, "Failed to create "
- "instance %u of pmd : %s", i,
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
}
}
+ /* Create a ARMv8 device if required */
+ if (gbl_cryptodev_type == RTE_CRYPTODEV_ARMV8_PMD) {
+#ifndef RTE_LIBRTE_PMD_ARMV8_CRYPTO
+ RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO must be"
+ " enabled in config file to run this testsuite.\n");
+ return TEST_FAILED;
+#endif
+ nb_devs = rte_cryptodev_count_devtype(
+ RTE_CRYPTODEV_ARMV8_PMD);
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
+ }
+ }
+
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ if (gbl_cryptodev_type == RTE_CRYPTODEV_SCHEDULER_PMD) {
+
+#ifndef RTE_LIBRTE_PMD_AESNI_MB
+ RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
+ " enabled in config file to run this testsuite.\n");
+ return TEST_FAILED;
+#endif
+ nb_devs = rte_cryptodev_count_devtype(
+ RTE_CRYPTODEV_SCHEDULER_PMD);
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance %u of"
+ " pmd : %s",
+ i, RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
+ }
+ }
+#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
+
#ifndef RTE_LIBRTE_PMD_QAT
if (gbl_cryptodev_type == RTE_CRYPTODEV_QAT_SYM_PMD) {
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_QAT must be enabled "
@@ -1466,6 +1499,86 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
}
static int
+test_AES_cipheronly_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_AESNI_MB_PMD,
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_docsis_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_AESNI_MB_PMD,
+ BLKCIPHER_AES_DOCSIS_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_docsis_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD,
+ BLKCIPHER_AES_DOCSIS_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_DES_docsis_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD,
+ BLKCIPHER_DES_DOCSIS_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_authonly_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_AESNI_MB_PMD,
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_mb_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1481,6 +1594,58 @@ test_AES_chain_mb_all(void)
return TEST_SUCCESS;
}
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+
+static int
+test_AES_cipheronly_scheduler_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_SCHEDULER_PMD,
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_chain_scheduler_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_SCHEDULER_PMD,
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_authonly_scheduler_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_SCHEDULER_PMD,
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
+
static int
test_AES_chain_openssl_all(void)
{
@@ -1530,6 +1695,54 @@ test_AES_chain_qat_all(void)
}
static int
+test_AES_cipheronly_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD,
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_chain_dpaa2_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_dpaa2_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_authonly_openssl_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1545,6 +1758,22 @@ test_authonly_openssl_all(void)
return TEST_SUCCESS;
}
+static int
+test_AES_chain_armv8_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_ARMV8_PMD,
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
/* ***** SNOW 3G Tests ***** */
static int
create_wireless_algo_hash_session(uint8_t dev_id,
@@ -1688,6 +1917,10 @@ create_wireless_algo_cipher_operation_oop(const uint8_t *iv, const uint8_t iv_le
TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+ /* For OOP operation both buffers must have the same size */
+ if (ut_params->obuf)
+ rte_pktmbuf_prepend(ut_params->obuf, iv_pad_len);
+
memset(sym_op->cipher.iv.data, 0, iv_pad_len);
sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
sym_op->cipher.iv.length = iv_pad_len;
@@ -1746,6 +1979,65 @@ create_wireless_algo_cipher_auth_session(uint8_t dev_id,
}
static int
+create_wireless_cipher_auth_session(uint8_t dev_id,
+ enum rte_crypto_cipher_operation cipher_op,
+ enum rte_crypto_auth_operation auth_op,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo,
+ const struct wireless_test_data *tdata)
+{
+ const uint8_t key_len = tdata->key.len;
+ uint8_t cipher_auth_key[key_len];
+
+ struct crypto_unittest_params *ut_params = &unittest_params;
+ const uint8_t *key = tdata->key.data;
+ const uint8_t aad_len = tdata->aad.len;
+ const uint8_t auth_len = tdata->digest.len;
+
+ memcpy(cipher_auth_key, key, key_len);
+
+ /* Setup Authentication Parameters */
+ ut_params->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ut_params->auth_xform.next = NULL;
+
+ ut_params->auth_xform.auth.op = auth_op;
+ ut_params->auth_xform.auth.algo = auth_algo;
+ ut_params->auth_xform.auth.key.length = key_len;
+ /* Hash key = cipher key */
+ ut_params->auth_xform.auth.key.data = cipher_auth_key;
+ ut_params->auth_xform.auth.digest_length = auth_len;
+ ut_params->auth_xform.auth.add_auth_data_length = aad_len;
+
+ /* Setup Cipher Parameters */
+ ut_params->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ ut_params->cipher_xform.next = &ut_params->auth_xform;
+
+ ut_params->cipher_xform.cipher.algo = cipher_algo;
+ ut_params->cipher_xform.cipher.op = cipher_op;
+ ut_params->cipher_xform.cipher.key.data = cipher_auth_key;
+ ut_params->cipher_xform.cipher.key.length = key_len;
+
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
+ /* Create Crypto session*/
+ ut_params->sess = rte_cryptodev_sym_session_create(dev_id,
+ &ut_params->cipher_xform);
+
+ TEST_ASSERT_NOT_NULL(ut_params->sess, "Session creation failed");
+ return 0;
+}
+
+static int
+create_zuc_cipher_auth_encrypt_generate_session(uint8_t dev_id,
+ const struct wireless_test_data *tdata)
+{
+ return create_wireless_cipher_auth_session(dev_id,
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_AUTH_OP_GENERATE, RTE_CRYPTO_AUTH_ZUC_EIA3,
+ RTE_CRYPTO_CIPHER_ZUC_EEA3, tdata);
+}
+
+static int
create_wireless_algo_auth_cipher_session(uint8_t dev_id,
enum rte_crypto_cipher_operation cipher_op,
enum rte_crypto_auth_operation auth_op,
@@ -1870,6 +2162,120 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
}
static int
+create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
+ enum rte_crypto_auth_operation op,
+ enum rte_crypto_auth_algorithm auth_algo,
+ enum rte_crypto_cipher_algorithm cipher_algo)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ const uint8_t *auth_tag = tdata->digest.data;
+ const unsigned int auth_tag_len = tdata->digest.len;
+ const uint8_t *aad = tdata->aad.data;
+ const uint8_t aad_len = tdata->aad.len;
+ unsigned int plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ unsigned int data_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+
+ const uint8_t *iv = tdata->iv.data;
+ const uint8_t iv_len = tdata->iv.len;
+ const unsigned int cipher_len = tdata->validCipherLenInBits.len;
+ const unsigned int cipher_offset =
+ tdata->validCipherOffsetLenInBits.len;
+ const unsigned int auth_len = tdata->validAuthLenInBits.len;
+ const unsigned int auth_offset = tdata->validAuthOffsetLenInBits.len;
+
+ unsigned int iv_pad_len = 0;
+ unsigned int aad_buffer_len;
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate pktmbuf offload");
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ /* set crypto operation source mbuf */
+ sym_op->m_src = ut_params->ibuf;
+
+ /* digest */
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, auth_tag_len);
+
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append auth tag");
+ ut_params->digest = sym_op->auth.digest.data;
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf, data_pad_len);
+ sym_op->auth.digest.length = auth_tag_len;
+ if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ memset(sym_op->auth.digest.data, 0, auth_tag_len);
+ else
+ rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
+
+ TEST_HEXDUMP(stdout, "digest:",
+ sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+
+ /* aad */
+ /*
+ * Always allocate the aad up to the block size.
+ * The cryptodev API calls out -
+ * - the array must be big enough to hold the AAD, plus any
+ * space to round this up to the nearest multiple of the
+ * block size (8 bytes for KASUMI and 16 bytes for SNOW 3G).
+ */
+ if (auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9)
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 8);
+ else
+ aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+ sym_op->auth.aad.data =
+ (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, aad_buffer_len);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+ "no room to prepend aad");
+ sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+ ut_params->ibuf);
+ sym_op->auth.aad.length = aad_len;
+ memset(sym_op->auth.aad.data, 0, aad_buffer_len);
+ rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
+ TEST_HEXDUMP(stdout, "aad:", sym_op->auth.aad.data, aad_len);
+
+ /* iv */
+ if (cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8)
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 8);
+ else
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, iv_pad_len);
+
+ TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
+ memset(sym_op->cipher.iv.data, 0, iv_pad_len);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = iv_pad_len;
+ rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+ sym_op->cipher.data.length = cipher_len;
+ sym_op->cipher.data.offset = cipher_offset + auth_offset;
+ sym_op->auth.data.length = auth_len;
+ sym_op->auth.data.offset = auth_offset + cipher_offset;
+
+ return 0;
+}
+
+static int
+create_zuc_cipher_hash_generate_operation(
+ const struct wireless_test_data *tdata)
+{
+ return create_wireless_cipher_hash_operation(tdata,
+ RTE_CRYPTO_AUTH_OP_GENERATE,
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ RTE_CRYPTO_CIPHER_ZUC_EEA3);
+}
+
+static int
create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
const unsigned auth_tag_len,
const uint8_t *aad, const uint8_t aad_len,
@@ -2509,6 +2915,83 @@ test_kasumi_encryption(const struct kasumi_test_data *tdata)
}
static int
+test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+
+ unsigned int plaintext_pad_len;
+ unsigned int plaintext_len;
+
+ uint8_t buffer[10000];
+ const uint8_t *ciphertext;
+
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
+ printf("Device doesn't support scatter-gather. "
+ "Test Skipped.\n");
+ return 0;
+ }
+
+ /* Create KASUMI session */
+ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+
+
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+
+ ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 10, 0);
+
+ pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
+
+ /* Create KASUMI operation */
+ retval = create_wireless_algo_cipher_operation(tdata->iv.data,
+ tdata->iv.len,
+ tdata->plaintext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ plaintext_len, buffer);
+ else
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ plaintext_len, buffer);
+
+ /* Validate obuf */
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Ciphertext data not as expected");
+ return 0;
+}
+
+static int
test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -2577,6 +3060,81 @@ test_kasumi_encryption_oop(const struct kasumi_test_data *tdata)
}
static int
+test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ unsigned int plaintext_pad_len;
+ unsigned int plaintext_len;
+
+ const uint8_t *ciphertext;
+ uint8_t buffer[2048];
+
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
+ printf("Device doesn't support scatter-gather. "
+ "Test Skipped.\n");
+ return 0;
+ }
+
+ /* Create KASUMI session */
+ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_KASUMI_F8,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+
+ ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 10, 0);
+ ut_params->obuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 3, 0);
+
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
+
+ /* Create KASUMI operation */
+ retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
+ tdata->iv.len,
+ tdata->plaintext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_KASUMI_F8);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ plaintext_pad_len, buffer);
+ else
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ plaintext_pad_len, buffer);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validCipherLenInBits.len,
+ "KASUMI Ciphertext data not as expected");
+ return 0;
+}
+
+
+static int
test_kasumi_decryption_oop(const struct kasumi_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -2849,6 +3407,85 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata)
return 0;
}
+static int
+test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ unsigned int plaintext_pad_len;
+ unsigned int plaintext_len;
+ uint8_t buffer[10000];
+ const uint8_t *ciphertext;
+
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
+ printf("Device doesn't support scatter-gather. "
+ "Test Skipped.\n");
+ return 0;
+ }
+
+ /* Create SNOW 3G session */
+ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+
+ ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 10, 0);
+ ut_params->obuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 3, 0);
+
+ TEST_ASSERT_NOT_NULL(ut_params->ibuf,
+ "Failed to allocate input buffer in mempool");
+ TEST_ASSERT_NOT_NULL(ut_params->obuf,
+ "Failed to allocate output buffer in mempool");
+
+ pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
+
+ /* Create SNOW 3G operation */
+ retval = create_wireless_algo_cipher_operation_oop(tdata->iv.data,
+ tdata->iv.len,
+ tdata->validCipherLenInBits.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_SNOW3G_UEA2);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_read(ut_params->obuf, tdata->iv.len,
+ plaintext_len, buffer);
+ else
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf, tdata->iv.len,
+ plaintext_len, buffer);
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validDataLenInBits.len,
+ "SNOW 3G Ciphertext data not as expected");
+
+ return 0;
+}
+
/* Shift right a buffer by "offset" bits, "offset" < 8 */
static void
buffer_shift_right(uint8_t *buffer, uint32_t length, uint8_t offset)
@@ -3102,6 +3739,93 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata)
}
static int
+test_zuc_cipher_auth(const struct wireless_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+
+ uint8_t *plaintext, *ciphertext;
+ unsigned int plaintext_pad_len;
+ unsigned int plaintext_len;
+
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+
+ /* Check if device supports ZUC EEA3 */
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3;
+
+ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+ &cap_idx) == NULL)
+ return -ENOTSUP;
+
+ /* Check if device supports ZUC EIA3 */
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3;
+
+ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+ &cap_idx) == NULL)
+ return -ENOTSUP;
+
+ /* Create ZUC session */
+ retval = create_zuc_cipher_auth_encrypt_generate_session(
+ ts_params->valid_devs[0],
+ tdata);
+ if (retval < 0)
+ return retval;
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+ /* Append data which is padded to a multiple of */
+ /* the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len);
+
+ /* Create ZUC operation */
+ retval = create_zuc_cipher_hash_generate_operation(tdata);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+ ut_params->obuf = ut_params->op->sym->m_src;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + tdata->iv.len + tdata->aad.len;
+ else
+ ciphertext = plaintext;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validDataLenInBits.len,
+ "ZUC Ciphertext data not as expected");
+
+ ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *)
+ + plaintext_pad_len + tdata->aad.len + tdata->iv.len;
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ut_params->digest,
+ tdata->digest.data,
+ 4,
+ "ZUC Generated auth tag not as expected");
+ return 0;
+}
+
+static int
test_snow3g_cipher_auth(const struct snow3g_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -3439,7 +4163,7 @@ test_kasumi_cipher_auth(const struct kasumi_test_data *tdata)
}
static int
-test_zuc_encryption(const struct zuc_test_data *tdata)
+test_zuc_encryption(const struct wireless_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -3449,6 +4173,16 @@ test_zuc_encryption(const struct zuc_test_data *tdata)
unsigned plaintext_pad_len;
unsigned plaintext_len;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+
+ /* Check if device supports ZUC EEA3 */
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3;
+
+ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+ &cap_idx) == NULL)
+ return -ENOTSUP;
+
/* Create ZUC session */
retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
RTE_CRYPTO_CIPHER_OP_ENCRYPT,
@@ -3504,7 +4238,95 @@ test_zuc_encryption(const struct zuc_test_data *tdata)
}
static int
-test_zuc_authentication(const struct zuc_hash_test_data *tdata)
+test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+
+ unsigned int plaintext_pad_len;
+ unsigned int plaintext_len;
+ const uint8_t *ciphertext;
+ uint8_t ciphertext_buffer[2048];
+ struct rte_cryptodev_info dev_info;
+
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+
+ /* Check if device supports ZUC EEA3 */
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_ZUC_EEA3;
+
+ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+ &cap_idx) == NULL)
+ return -ENOTSUP;
+
+ rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
+ if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
+ printf("Device doesn't support scatter-gather. "
+ "Test Skipped.\n");
+ return -ENOTSUP;
+ }
+
+ plaintext_len = ceil_byte_length(tdata->plaintext.len);
+
+ /* Append data which is padded to a multiple */
+ /* of the algorithms block size */
+ plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 8);
+
+ ut_params->ibuf = create_segmented_mbuf(ts_params->mbuf_pool,
+ plaintext_pad_len, 10, 0);
+
+ pktmbuf_write(ut_params->ibuf, 0, plaintext_len,
+ tdata->plaintext.data);
+
+ /* Create ZUC session */
+ retval = create_wireless_algo_cipher_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ tdata->key.data, tdata->key.len);
+ if (retval < 0)
+ return retval;
+
+ /* Clear mbuf payload */
+
+ pktmbuf_write(ut_params->ibuf, 0, plaintext_len, tdata->plaintext.data);
+
+ /* Create ZUC operation */
+ retval = create_wireless_algo_cipher_operation(tdata->iv.data,
+ tdata->iv.len, tdata->plaintext.len,
+ tdata->validCipherOffsetLenInBits.len,
+ RTE_CRYPTO_CIPHER_ZUC_EEA3);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op = process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op);
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed to retrieve obuf");
+
+ ut_params->obuf = ut_params->op->sym->m_dst;
+ if (ut_params->obuf)
+ ciphertext = rte_pktmbuf_read(ut_params->obuf,
+ tdata->iv.len, plaintext_len, ciphertext_buffer);
+ else
+ ciphertext = rte_pktmbuf_read(ut_params->ibuf,
+ tdata->iv.len, plaintext_len, ciphertext_buffer);
+
+ /* Validate obuf */
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->validCipherLenInBits.len,
+ "ZUC Ciphertext data not as expected");
+
+ return 0;
+}
+
+static int
+test_zuc_authentication(const struct wireless_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
@@ -3514,6 +4336,16 @@ test_zuc_authentication(const struct zuc_hash_test_data *tdata)
unsigned plaintext_len;
uint8_t *plaintext;
+ struct rte_cryptodev_sym_capability_idx cap_idx;
+
+ /* Check if device supports ZUC EIA3 */
+ cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ cap_idx.algo.auth = RTE_CRYPTO_AUTH_ZUC_EIA3;
+
+ if (rte_cryptodev_sym_capability_get(ts_params->valid_devs[0],
+ &cap_idx) == NULL)
+ return -ENOTSUP;
+
/* Create ZUC session */
retval = create_wireless_algo_hash_session(ts_params->valid_devs[0],
tdata->key.data, tdata->key.len,
@@ -3571,12 +4403,24 @@ test_kasumi_encryption_test_case_1(void)
}
static int
+test_kasumi_encryption_test_case_1_sgl(void)
+{
+ return test_kasumi_encryption_sgl(&kasumi_test_case_1);
+}
+
+static int
test_kasumi_encryption_test_case_1_oop(void)
{
return test_kasumi_encryption_oop(&kasumi_test_case_1);
}
static int
+test_kasumi_encryption_test_case_1_oop_sgl(void)
+{
+ return test_kasumi_encryption_oop_sgl(&kasumi_test_case_1);
+}
+
+static int
test_kasumi_encryption_test_case_2(void)
{
return test_kasumi_encryption(&kasumi_test_case_2);
@@ -3648,6 +4492,13 @@ test_snow3g_encryption_test_case_1_oop(void)
}
static int
+test_snow3g_encryption_test_case_1_oop_sgl(void)
+{
+ return test_snow3g_encryption_oop_sgl(&snow3g_test_case_1);
+}
+
+
+static int
test_snow3g_encryption_test_case_1_offset_oop(void)
{
return test_snow3g_encryption_offset_oop(&snow3g_test_case_1);
@@ -3739,61 +4590,97 @@ test_kasumi_cipher_auth_test_case_1(void)
static int
test_zuc_encryption_test_case_1(void)
{
- return test_zuc_encryption(&zuc_test_case_1);
+ return test_zuc_encryption(&zuc_test_case_cipher_193b);
}
static int
test_zuc_encryption_test_case_2(void)
{
- return test_zuc_encryption(&zuc_test_case_2);
+ return test_zuc_encryption(&zuc_test_case_cipher_800b);
}
static int
test_zuc_encryption_test_case_3(void)
{
- return test_zuc_encryption(&zuc_test_case_3);
+ return test_zuc_encryption(&zuc_test_case_cipher_1570b);
}
static int
test_zuc_encryption_test_case_4(void)
{
- return test_zuc_encryption(&zuc_test_case_4);
+ return test_zuc_encryption(&zuc_test_case_cipher_2798b);
}
static int
test_zuc_encryption_test_case_5(void)
{
- return test_zuc_encryption(&zuc_test_case_5);
+ return test_zuc_encryption(&zuc_test_case_cipher_4019b);
+}
+
+static int
+test_zuc_encryption_test_case_6_sgl(void)
+{
+ return test_zuc_encryption_sgl(&zuc_test_case_cipher_193b);
}
static int
test_zuc_hash_generate_test_case_1(void)
{
- return test_zuc_authentication(&zuc_hash_test_case_1);
+ return test_zuc_authentication(&zuc_test_case_auth_1b);
}
static int
test_zuc_hash_generate_test_case_2(void)
{
- return test_zuc_authentication(&zuc_hash_test_case_2);
+ return test_zuc_authentication(&zuc_test_case_auth_90b);
}
static int
test_zuc_hash_generate_test_case_3(void)
{
- return test_zuc_authentication(&zuc_hash_test_case_3);
+ return test_zuc_authentication(&zuc_test_case_auth_577b);
}
static int
test_zuc_hash_generate_test_case_4(void)
{
- return test_zuc_authentication(&zuc_hash_test_case_4);
+ return test_zuc_authentication(&zuc_test_case_auth_2079b);
}
static int
test_zuc_hash_generate_test_case_5(void)
{
- return test_zuc_authentication(&zuc_hash_test_case_5);
+ return test_zuc_authentication(&zuc_test_auth_5670b);
+}
+
+static int
+test_zuc_hash_generate_test_case_6(void)
+{
+ return test_zuc_authentication(&zuc_test_case_auth_128b);
+}
+
+static int
+test_zuc_hash_generate_test_case_7(void)
+{
+ return test_zuc_authentication(&zuc_test_case_auth_2080b);
+}
+
+static int
+test_zuc_hash_generate_test_case_8(void)
+{
+ return test_zuc_authentication(&zuc_test_case_auth_584b);
+}
+
+static int
+test_zuc_cipher_auth_test_case_1(void)
+{
+ return test_zuc_cipher_auth(&zuc_test_case_cipher_200b_auth_200b);
+}
+
+static int
+test_zuc_cipher_auth_test_case_2(void)
+{
+ return test_zuc_cipher_auth(&zuc_test_case_cipher_800b_auth_120b);
}
static int
@@ -3813,6 +4700,70 @@ test_3DES_chain_qat_all(void)
}
static int
+test_DES_cipheronly_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD,
+ BLKCIPHER_DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_DES_docsis_openssl_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_OPENSSL_PMD,
+ BLKCIPHER_DES_DOCSIS_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_chain_dpaa2_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ BLKCIPHER_3DES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_dpaa2_sec_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_DPAA2_SEC_PMD,
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_3DES_cipheronly_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -3915,16 +4866,48 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op,
}
static int
+create_gcm_xforms(struct rte_crypto_op *op,
+ enum rte_crypto_cipher_operation cipher_op,
+ uint8_t *key, const uint8_t key_len,
+ const uint8_t aad_len, const uint8_t auth_len,
+ enum rte_crypto_auth_operation auth_op)
+{
+ TEST_ASSERT_NOT_NULL(rte_crypto_op_sym_xforms_alloc(op, 2),
+ "failed to allocate space for crypto transforms");
+
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ /* Setup Cipher Parameters */
+ sym_op->xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ sym_op->xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+ sym_op->xform->cipher.op = cipher_op;
+ sym_op->xform->cipher.key.data = key;
+ sym_op->xform->cipher.key.length = key_len;
+
+ TEST_HEXDUMP(stdout, "key:", key, key_len);
+
+ /* Setup Authentication Parameters */
+ sym_op->xform->next->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ sym_op->xform->next->auth.algo = RTE_CRYPTO_AUTH_AES_GCM;
+ sym_op->xform->next->auth.op = auth_op;
+ sym_op->xform->next->auth.digest_length = auth_len;
+ sym_op->xform->next->auth.add_auth_data_length = aad_len;
+ sym_op->xform->next->auth.key.length = 0;
+ sym_op->xform->next->auth.key.data = NULL;
+ sym_op->xform->next->next = NULL;
+
+ return 0;
+}
+
+static int
create_gcm_operation(enum rte_crypto_cipher_operation op,
- const uint8_t *auth_tag, const unsigned auth_tag_len,
- const uint8_t *iv, const unsigned iv_len,
- const uint8_t *aad, const unsigned aad_len,
- const unsigned data_len, unsigned data_pad_len)
+ const struct gcm_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
- unsigned iv_pad_len = 0, aad_buffer_len;
+ uint8_t *plaintext, *ciphertext;
+ unsigned int iv_pad_len, aad_pad_len, plaintext_pad_len;
/* Generate Crypto op data structure */
ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
@@ -3934,63 +4917,118 @@ create_gcm_operation(enum rte_crypto_cipher_operation op,
struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
- sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
- ut_params->ibuf, auth_tag_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
- "no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
- ut_params->ibuf, data_pad_len);
- sym_op->auth.digest.length = auth_tag_len;
-
- if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- rte_memcpy(sym_op->auth.digest.data, auth_tag, auth_tag_len);
- TEST_HEXDUMP(stdout, "digest:",
- sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- }
+ /* Append aad data */
+ aad_pad_len = RTE_ALIGN_CEIL(tdata->aad.len, 16);
+ sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ aad_pad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+ "no room to append aad");
- /* iv */
- iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+ sym_op->auth.aad.length = tdata->aad.len;
+ sym_op->auth.aad.phys_addr =
+ rte_pktmbuf_mtophys(ut_params->ibuf);
+ memcpy(sym_op->auth.aad.data, tdata->aad.data, tdata->aad.len);
+ TEST_HEXDUMP(stdout, "aad:", sym_op->auth.aad.data,
+ sym_op->auth.aad.length);
+ /* Prepend iv */
+ iv_pad_len = RTE_ALIGN_CEIL(tdata->iv.len, 16);
sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
ut_params->ibuf, iv_pad_len);
TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data, "no room to prepend iv");
memset(sym_op->cipher.iv.data, 0, iv_pad_len);
sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
- sym_op->cipher.iv.length = iv_len;
+ sym_op->cipher.iv.length = tdata->iv.len;
- rte_memcpy(sym_op->cipher.iv.data, iv, iv_len);
+ rte_memcpy(sym_op->cipher.iv.data, tdata->iv.data, tdata->iv.len);
+ TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data,
+ sym_op->cipher.iv.length);
- /*
- * Always allocate the aad up to the block size.
- * The cryptodev API calls out -
- * - the array must be big enough to hold the AAD, plus any
- * space to round this up to the nearest multiple of the
- * block size (16 bytes).
- */
- aad_buffer_len = ALIGN_POW2_ROUNDUP(aad_len, 16);
+ /* Append plaintext/ciphertext */
+ if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ TEST_ASSERT_NOT_NULL(plaintext, "no room to append plaintext");
- sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
- ut_params->ibuf, aad_buffer_len);
- TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
- "no room to prepend aad");
- sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
- ut_params->ibuf);
- sym_op->auth.aad.length = aad_len;
+ memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext,
+ tdata->plaintext.len);
- memset(sym_op->auth.aad.data, 0, aad_buffer_len);
- rte_memcpy(sym_op->auth.aad.data, aad, aad_len);
+ if (ut_params->obuf) {
+ ciphertext = (uint8_t *)rte_pktmbuf_append(
+ ut_params->obuf,
+ plaintext_pad_len + aad_pad_len +
+ iv_pad_len);
+ TEST_ASSERT_NOT_NULL(ciphertext,
+ "no room to append ciphertext");
- TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data, iv_pad_len);
- TEST_HEXDUMP(stdout, "aad:",
- sym_op->auth.aad.data, aad_len);
+ memset(ciphertext + aad_pad_len + iv_pad_len, 0,
+ tdata->ciphertext.len);
+ }
+ } else {
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
+ ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_pad_len);
+ TEST_ASSERT_NOT_NULL(ciphertext,
+ "no room to append ciphertext");
+
+ memcpy(ciphertext, tdata->ciphertext.data,
+ tdata->ciphertext.len);
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext,
+ tdata->ciphertext.len);
- sym_op->cipher.data.length = data_len;
- sym_op->cipher.data.offset = aad_buffer_len + iv_pad_len;
+ if (ut_params->obuf) {
+ plaintext = (uint8_t *)rte_pktmbuf_append(
+ ut_params->obuf,
+ plaintext_pad_len + aad_pad_len +
+ iv_pad_len);
+ TEST_ASSERT_NOT_NULL(plaintext,
+ "no room to append plaintext");
- sym_op->auth.data.offset = aad_buffer_len + iv_pad_len;
- sym_op->auth.data.length = data_len;
+ memset(plaintext + aad_pad_len + iv_pad_len, 0,
+ tdata->plaintext.len);
+ }
+ }
+
+ /* Append digest data */
+ if (op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->obuf ? ut_params->obuf :
+ ut_params->ibuf,
+ tdata->auth_tag.len);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+ memset(sym_op->auth.digest.data, 0, tdata->auth_tag.len);
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->obuf ? ut_params->obuf :
+ ut_params->ibuf,
+ plaintext_pad_len +
+ aad_pad_len + iv_pad_len);
+ sym_op->auth.digest.length = tdata->auth_tag.len;
+ } else {
+ sym_op->auth.digest.data = (uint8_t *)rte_pktmbuf_append(
+ ut_params->ibuf, tdata->auth_tag.len);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ ut_params->ibuf,
+ plaintext_pad_len + aad_pad_len + iv_pad_len);
+ sym_op->auth.digest.length = tdata->auth_tag.len;
+
+ rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
+ tdata->auth_tag.len);
+ TEST_HEXDUMP(stdout, "digest:",
+ sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ }
+
+ sym_op->cipher.data.length = tdata->plaintext.len;
+ sym_op->cipher.data.offset = aad_pad_len + iv_pad_len;
+
+ sym_op->auth.data.length = tdata->plaintext.len;
+ sym_op->auth.data.offset = aad_pad_len + iv_pad_len;
return 0;
}
@@ -4002,9 +5040,9 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
struct crypto_unittest_params *ut_params = &unittest_params;
int retval;
-
- uint8_t *plaintext, *ciphertext, *auth_tag;
+ uint8_t *ciphertext, *auth_tag;
uint16_t plaintext_pad_len;
+ uint32_t i;
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
@@ -4015,31 +5053,20 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
if (retval < 0)
return retval;
-
- ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ if (tdata->aad.len > MBUF_SIZE) {
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
+ /* Populate full size of add data */
+ for (i = 32; i < GCM_MAX_AAD_LENGTH; i += 32)
+ memcpy(&tdata->aad.data[i], &tdata->aad.data[0], 32);
+ } else
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
/* clear mbuf payload */
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- /*
- * Append data which is padded to a multiple
- * of the algorithms block size
- */
- plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
-
- plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- plaintext_pad_len);
- memcpy(plaintext, tdata->plaintext.data, tdata->plaintext.len);
-
- TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len);
-
- /* Create GCM opertaion */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
- tdata->auth_tag.data, tdata->auth_tag.len,
- tdata->iv.data, tdata->iv.len,
- tdata->aad.data, tdata->aad.len,
- tdata->plaintext.len, plaintext_pad_len);
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
if (retval < 0)
return retval;
@@ -4054,14 +5081,18 @@ test_mb_AES_GCM_authenticated_encryption(const struct gcm_test_data *tdata)
TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
"crypto op processing failed");
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
if (ut_params->op->sym->m_dst) {
ciphertext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
uint8_t *);
auth_tag = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
uint8_t *, plaintext_pad_len);
} else {
- ciphertext = plaintext;
- auth_tag = plaintext + plaintext_pad_len;
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
+ auth_tag = ciphertext + plaintext_pad_len;
}
TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
@@ -4127,15 +5158,68 @@ test_mb_AES_GCM_authenticated_encryption_test_case_7(void)
}
static int
+test_mb_AES_GCM_auth_encryption_test_case_256_1(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_1);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_2(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_2);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_3(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_3);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_4(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_4);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_5(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_5);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_6(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_6);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_256_7(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_256_7);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_aad_1(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_aad_1);
+}
+
+static int
+test_mb_AES_GCM_auth_encryption_test_case_aad_2(void)
+{
+ return test_mb_AES_GCM_authenticated_encryption(&gcm_test_case_aad_2);
+}
+
+static int
test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
struct crypto_unittest_params *ut_params = &unittest_params;
int retval;
-
- uint8_t *plaintext, *ciphertext;
- uint16_t ciphertext_pad_len;
+ uint8_t *plaintext;
+ uint32_t i;
/* Create GCM session */
retval = create_gcm_session(ts_params->valid_devs[0],
@@ -4146,31 +5230,23 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
if (retval < 0)
return retval;
-
/* alloc mbuf and set payload */
- ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ if (tdata->aad.len > MBUF_SIZE) {
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->large_mbuf_pool);
+ /* Populate full size of add data */
+ for (i = 32; i < GCM_MAX_AAD_LENGTH; i += 32)
+ memcpy(&tdata->aad.data[i], &tdata->aad.data[0], 32);
+ } else
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
rte_pktmbuf_tailroom(ut_params->ibuf));
- ciphertext_pad_len = RTE_ALIGN_CEIL(tdata->ciphertext.len, 16);
-
- ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
- ciphertext_pad_len);
- memcpy(ciphertext, tdata->ciphertext.data, tdata->ciphertext.len);
-
- TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
-
- /* Create GCM opertaion */
- retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT,
- tdata->auth_tag.data, tdata->auth_tag.len,
- tdata->iv.data, tdata->iv.len,
- tdata->aad.data, tdata->aad.len,
- tdata->ciphertext.len, ciphertext_pad_len);
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
if (retval < 0)
return retval;
-
rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
ut_params->op->sym->m_src = ut_params->ibuf;
@@ -4186,7 +5262,9 @@ test_mb_AES_GCM_authenticated_decryption(const struct gcm_test_data *tdata)
plaintext = rte_pktmbuf_mtod(ut_params->op->sym->m_dst,
uint8_t *);
else
- plaintext = ciphertext;
+ plaintext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
@@ -4246,6 +5324,358 @@ test_mb_AES_GCM_authenticated_decryption_test_case_7(void)
}
static int
+test_mb_AES_GCM_auth_decryption_test_case_256_1(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_1);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_2(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_2);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_3(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_3);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_4(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_4);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_5(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_5);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_6(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_6);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_256_7(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_256_7);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_aad_1(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_aad_1);
+}
+
+static int
+test_mb_AES_GCM_auth_decryption_test_case_aad_2(void)
+{
+ return test_mb_AES_GCM_authenticated_decryption(&gcm_test_case_aad_2);
+}
+
+static int
+test_AES_GCM_authenticated_encryption_oop(const struct gcm_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *ciphertext, *auth_tag;
+ uint16_t plaintext_pad_len;
+
+ /* Create GCM session */
+ retval = create_gcm_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ tdata->key.data, tdata->key.len,
+ tdata->aad.len, tdata->auth_tag.len,
+ RTE_CRYPTO_AUTH_OP_GENERATE);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->obuf));
+
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
+ if (retval < 0)
+ return retval;
+
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ ut_params->op->sym->m_src = ut_params->ibuf;
+ ut_params->op->sym->m_dst = ut_params->obuf;
+
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->obuf, uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
+ auth_tag = ciphertext + plaintext_pad_len;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+ TEST_HEXDUMP(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->ciphertext.len,
+ "GCM Ciphertext data not as expected");
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ auth_tag,
+ tdata->auth_tag.data,
+ tdata->auth_tag.len,
+ "GCM Generated auth tag not as expected");
+
+ return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_oop(void)
+{
+ return test_AES_GCM_authenticated_encryption_oop(&gcm_test_case_5);
+}
+
+static int
+test_AES_GCM_authenticated_decryption_oop(const struct gcm_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *plaintext;
+
+ /* Create GCM session */
+ retval = create_gcm_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ tdata->key.data, tdata->key.len,
+ tdata->aad.len, tdata->auth_tag.len,
+ RTE_CRYPTO_AUTH_OP_VERIFY);
+ if (retval < 0)
+ return retval;
+
+ /* alloc mbuf and set payload */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+ memset(rte_pktmbuf_mtod(ut_params->obuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->obuf));
+
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
+ if (retval < 0)
+ return retval;
+
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ ut_params->op->sym->m_src = ut_params->ibuf;
+ ut_params->op->sym->m_dst = ut_params->obuf;
+
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+ plaintext = rte_pktmbuf_mtod_offset(ut_params->obuf, uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ plaintext,
+ tdata->plaintext.data,
+ tdata->plaintext.len,
+ "GCM plaintext data not as expected");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status,
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "GCM authentication failed");
+ return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_oop(void)
+{
+ return test_AES_GCM_authenticated_decryption_oop(&gcm_test_case_5);
+}
+
+static int
+test_AES_GCM_authenticated_encryption_sessionless(
+ const struct gcm_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *ciphertext, *auth_tag;
+ uint16_t plaintext_pad_len;
+ uint8_t key[tdata->key.len + 1];
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_ENCRYPT, tdata);
+ if (retval < 0)
+ return retval;
+
+ /* Create GCM xforms */
+ memcpy(key, tdata->key.data, tdata->key.len);
+ retval = create_gcm_xforms(ut_params->op,
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ key, tdata->key.len,
+ tdata->aad.len, tdata->auth_tag.len,
+ RTE_CRYPTO_AUTH_OP_GENERATE);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op->sym->m_src = ut_params->ibuf;
+
+ TEST_ASSERT_EQUAL(ut_params->op->sym->sess_type,
+ RTE_CRYPTO_SYM_OP_SESSIONLESS,
+ "crypto op session type not sessionless");
+
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed crypto process");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op status not success");
+
+ plaintext_pad_len = RTE_ALIGN_CEIL(tdata->plaintext.len, 16);
+
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->ibuf, uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
+ auth_tag = ciphertext + plaintext_pad_len;
+
+ TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len);
+ TEST_HEXDUMP(stdout, "auth tag:", auth_tag, tdata->auth_tag.len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ciphertext,
+ tdata->ciphertext.data,
+ tdata->ciphertext.len,
+ "GCM Ciphertext data not as expected");
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ auth_tag,
+ tdata->auth_tag.data,
+ tdata->auth_tag.len,
+ "GCM Generated auth tag not as expected");
+
+ return 0;
+
+}
+
+static int
+test_mb_AES_GCM_authenticated_encryption_sessionless(void)
+{
+ return test_AES_GCM_authenticated_encryption_sessionless(
+ &gcm_test_case_5);
+}
+
+static int
+test_AES_GCM_authenticated_decryption_sessionless(
+ const struct gcm_test_data *tdata)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ int retval;
+ uint8_t *plaintext;
+ uint8_t key[tdata->key.len + 1];
+
+ /* alloc mbuf and set payload */
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ /* Create GCM operation */
+ retval = create_gcm_operation(RTE_CRYPTO_CIPHER_OP_DECRYPT, tdata);
+ if (retval < 0)
+ return retval;
+
+ /* Create GCM xforms */
+ memcpy(key, tdata->key.data, tdata->key.len);
+ retval = create_gcm_xforms(ut_params->op,
+ RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ key, tdata->key.len,
+ tdata->aad.len, tdata->auth_tag.len,
+ RTE_CRYPTO_AUTH_OP_VERIFY);
+ if (retval < 0)
+ return retval;
+
+ ut_params->op->sym->m_src = ut_params->ibuf;
+
+ TEST_ASSERT_EQUAL(ut_params->op->sym->sess_type,
+ RTE_CRYPTO_SYM_OP_SESSIONLESS,
+ "crypto op session type not sessionless");
+
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_NOT_NULL(ut_params->op, "failed crypto process");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op status not success");
+
+ plaintext = rte_pktmbuf_mtod_offset(ut_params->ibuf, uint8_t *,
+ ut_params->op->sym->cipher.data.offset);
+
+ TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->ciphertext.len);
+
+ /* Validate obuf */
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ plaintext,
+ tdata->plaintext.data,
+ tdata->plaintext.len,
+ "GCM plaintext data not as expected");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status,
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "GCM authentication failed");
+ return 0;
+}
+
+static int
+test_mb_AES_GCM_authenticated_decryption_sessionless(void)
+{
+ return test_AES_GCM_authenticated_decryption_sessionless(
+ &gcm_test_case_5);
+}
+
+static int
test_stats(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -5908,6 +7338,362 @@ test_authenticated_decryption_fail_when_corruption(
}
static int
+create_gcm_operation_SGL(enum rte_crypto_cipher_operation op,
+ const struct gcm_test_data *tdata,
+ void *digest_mem, uint64_t digest_phys)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+
+ const unsigned int auth_tag_len = tdata->auth_tag.len;
+ const unsigned int iv_len = tdata->iv.len;
+ const unsigned int aad_len = tdata->aad.len;
+
+ unsigned int iv_pad_len = 0;
+
+ /* Generate Crypto op data structure */
+ ut_params->op = rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(ut_params->op,
+ "Failed to allocate symmetric crypto operation struct");
+
+ struct rte_crypto_sym_op *sym_op = ut_params->op->sym;
+
+ sym_op->auth.digest.data = digest_mem;
+
+ TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
+ "no room to append digest");
+
+ sym_op->auth.digest.phys_addr = digest_phys;
+ sym_op->auth.digest.length = auth_tag_len;
+
+ if (op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ rte_memcpy(sym_op->auth.digest.data, tdata->auth_tag.data,
+ auth_tag_len);
+ TEST_HEXDUMP(stdout, "digest:",
+ sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ }
+
+ iv_pad_len = RTE_ALIGN_CEIL(iv_len, 16);
+
+ sym_op->cipher.iv.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, iv_pad_len);
+
+ TEST_ASSERT_NOT_NULL(sym_op->cipher.iv.data,
+ "no room to prepend iv");
+
+ memset(sym_op->cipher.iv.data, 0, iv_pad_len);
+ sym_op->cipher.iv.phys_addr = rte_pktmbuf_mtophys(ut_params->ibuf);
+ sym_op->cipher.iv.length = iv_len;
+
+ rte_memcpy(sym_op->cipher.iv.data, tdata->iv.data, iv_pad_len);
+
+ sym_op->auth.aad.data = (uint8_t *)rte_pktmbuf_prepend(
+ ut_params->ibuf, aad_len);
+ TEST_ASSERT_NOT_NULL(sym_op->auth.aad.data,
+ "no room to prepend aad");
+ sym_op->auth.aad.phys_addr = rte_pktmbuf_mtophys(
+ ut_params->ibuf);
+ sym_op->auth.aad.length = aad_len;
+
+ memset(sym_op->auth.aad.data, 0, aad_len);
+ rte_memcpy(sym_op->auth.aad.data, tdata->aad.data, aad_len);
+
+ TEST_HEXDUMP(stdout, "iv:", sym_op->cipher.iv.data, iv_pad_len);
+ TEST_HEXDUMP(stdout, "aad:",
+ sym_op->auth.aad.data, aad_len);
+
+ sym_op->cipher.data.length = tdata->plaintext.len;
+ sym_op->cipher.data.offset = aad_len + iv_pad_len;
+
+ sym_op->auth.data.offset = aad_len + iv_pad_len;
+ sym_op->auth.data.length = tdata->plaintext.len;
+
+ return 0;
+}
+
+#define SGL_MAX_NO 16
+
+static int
+test_AES_GCM_authenticated_encryption_SGL(const struct gcm_test_data *tdata,
+ const int oop, uint32_t fragsz, uint32_t fragsz_oop)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct crypto_unittest_params *ut_params = &unittest_params;
+ struct rte_mbuf *buf, *buf_oop = NULL, *buf_last_oop = NULL;
+ int retval;
+ int to_trn = 0;
+ int to_trn_tbl[SGL_MAX_NO];
+ int segs = 1;
+ unsigned int trn_data = 0;
+ uint8_t *plaintext, *ciphertext, *auth_tag;
+
+ if (fragsz > tdata->plaintext.len)
+ fragsz = tdata->plaintext.len;
+
+ uint16_t plaintext_len = fragsz;
+ uint16_t frag_size_oop = fragsz_oop ? fragsz_oop : fragsz;
+
+ if (fragsz_oop > tdata->plaintext.len)
+ frag_size_oop = tdata->plaintext.len;
+
+ int ecx = 0;
+ void *digest_mem = NULL;
+
+ uint32_t prepend_len = ALIGN_POW2_ROUNDUP(tdata->iv.len, 16)
+ + tdata->aad.len;
+
+ if (tdata->plaintext.len % fragsz != 0) {
+ if (tdata->plaintext.len / fragsz + 1 > SGL_MAX_NO)
+ return 1;
+ } else {
+ if (tdata->plaintext.len / fragsz > SGL_MAX_NO)
+ return 1;
+ }
+
+ /*
+ * For out-op-place we need to alloc another mbuf
+ */
+ if (oop) {
+ ut_params->obuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ rte_pktmbuf_append(ut_params->obuf,
+ frag_size_oop + prepend_len);
+ buf_oop = ut_params->obuf;
+ }
+
+ /* Create GCM session */
+ retval = create_gcm_session(ts_params->valid_devs[0],
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ tdata->key.data, tdata->key.len,
+ tdata->aad.len, tdata->auth_tag.len,
+ RTE_CRYPTO_AUTH_OP_GENERATE);
+ if (retval < 0)
+ return retval;
+
+ ut_params->ibuf = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+
+ /* clear mbuf payload */
+ memset(rte_pktmbuf_mtod(ut_params->ibuf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(ut_params->ibuf));
+
+ plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ plaintext_len);
+
+ memcpy(plaintext, tdata->plaintext.data, plaintext_len);
+
+ trn_data += plaintext_len;
+
+ buf = ut_params->ibuf;
+
+ /*
+ * Loop until no more fragments
+ */
+
+ while (trn_data < tdata->plaintext.len) {
+ ++segs;
+ to_trn = (tdata->plaintext.len - trn_data < fragsz) ?
+ (tdata->plaintext.len - trn_data) : fragsz;
+
+ to_trn_tbl[ecx++] = to_trn;
+
+ buf->next = rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ buf = buf->next;
+
+ memset(rte_pktmbuf_mtod(buf, uint8_t *), 0,
+ rte_pktmbuf_tailroom(buf));
+
+ /* OOP */
+ if (oop && !fragsz_oop) {
+ buf_last_oop = buf_oop->next =
+ rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ buf_oop = buf_oop->next;
+ memset(rte_pktmbuf_mtod(buf_oop, uint8_t *),
+ 0, rte_pktmbuf_tailroom(buf_oop));
+ rte_pktmbuf_append(buf_oop, to_trn);
+ }
+
+ plaintext = (uint8_t *)rte_pktmbuf_append(buf,
+ to_trn);
+
+ memcpy(plaintext, tdata->plaintext.data + trn_data,
+ to_trn);
+ trn_data += to_trn;
+ if (trn_data == tdata->plaintext.len) {
+ if (oop) {
+ if (!fragsz_oop)
+ digest_mem = rte_pktmbuf_append(buf_oop,
+ tdata->auth_tag.len);
+ } else
+ digest_mem = (uint8_t *)rte_pktmbuf_append(buf,
+ tdata->auth_tag.len);
+ }
+ }
+
+ uint64_t digest_phys = 0;
+
+ ut_params->ibuf->nb_segs = segs;
+
+ segs = 1;
+ if (fragsz_oop && oop) {
+ to_trn = 0;
+ ecx = 0;
+
+ if (frag_size_oop == tdata->plaintext.len) {
+ digest_mem = rte_pktmbuf_append(ut_params->obuf,
+ tdata->auth_tag.len);
+
+ digest_phys = rte_pktmbuf_mtophys_offset(
+ ut_params->obuf,
+ tdata->plaintext.len + prepend_len);
+ }
+
+ trn_data = frag_size_oop;
+ while (trn_data < tdata->plaintext.len) {
+ ++segs;
+ to_trn =
+ (tdata->plaintext.len - trn_data <
+ frag_size_oop) ?
+ (tdata->plaintext.len - trn_data) :
+ frag_size_oop;
+
+ to_trn_tbl[ecx++] = to_trn;
+
+ buf_last_oop = buf_oop->next =
+ rte_pktmbuf_alloc(ts_params->mbuf_pool);
+ buf_oop = buf_oop->next;
+ memset(rte_pktmbuf_mtod(buf_oop, uint8_t *),
+ 0, rte_pktmbuf_tailroom(buf_oop));
+ rte_pktmbuf_append(buf_oop, to_trn);
+
+ trn_data += to_trn;
+
+ if (trn_data == tdata->plaintext.len) {
+ digest_mem = rte_pktmbuf_append(buf_oop,
+ tdata->auth_tag.len);
+ }
+ }
+
+ ut_params->obuf->nb_segs = segs;
+ }
+
+ /*
+ * Place digest at the end of the last buffer
+ */
+ if (!digest_phys)
+ digest_phys = rte_pktmbuf_mtophys(buf) + to_trn;
+ if (oop && buf_last_oop)
+ digest_phys = rte_pktmbuf_mtophys(buf_last_oop) + to_trn;
+
+ if (!digest_mem && !oop) {
+ digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ + tdata->auth_tag.len);
+ digest_phys = rte_pktmbuf_mtophys_offset(ut_params->ibuf,
+ tdata->plaintext.len);
+ }
+
+ /* Create GCM opertaion */
+ retval = create_gcm_operation_SGL(RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ tdata, digest_mem, digest_phys);
+
+ if (retval < 0)
+ return retval;
+
+ rte_crypto_op_attach_sym_session(ut_params->op, ut_params->sess);
+
+ ut_params->op->sym->m_src = ut_params->ibuf;
+ if (oop)
+ ut_params->op->sym->m_dst = ut_params->obuf;
+
+ /* Process crypto operation */
+ TEST_ASSERT_NOT_NULL(process_crypto_request(ts_params->valid_devs[0],
+ ut_params->op), "failed to process sym crypto op");
+
+ TEST_ASSERT_EQUAL(ut_params->op->status, RTE_CRYPTO_OP_STATUS_SUCCESS,
+ "crypto op processing failed");
+
+
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_src,
+ uint8_t *, prepend_len);
+ if (oop) {
+ ciphertext = rte_pktmbuf_mtod_offset(ut_params->op->sym->m_dst,
+ uint8_t *, prepend_len);
+ }
+
+ if (fragsz_oop)
+ fragsz = fragsz_oop;
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ciphertext,
+ tdata->ciphertext.data,
+ fragsz,
+ "GCM Ciphertext data not as expected");
+
+ buf = ut_params->op->sym->m_src->next;
+ if (oop)
+ buf = ut_params->op->sym->m_dst->next;
+
+ unsigned int off = fragsz;
+
+ ecx = 0;
+ while (buf) {
+ ciphertext = rte_pktmbuf_mtod(buf,
+ uint8_t *);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ ciphertext,
+ tdata->ciphertext.data + off,
+ to_trn_tbl[ecx],
+ "GCM Ciphertext data not as expected");
+
+ off += to_trn_tbl[ecx++];
+ buf = buf->next;
+ }
+
+ auth_tag = digest_mem;
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ auth_tag,
+ tdata->auth_tag.data,
+ tdata->auth_tag.len,
+ "GCM Generated auth tag not as expected");
+
+ return 0;
+}
+
+#define IN_PLACE 0
+#define OUT_OF_PLACE 1
+
+static int
+test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B(void)
+{
+ return test_AES_GCM_authenticated_encryption_SGL(
+ &gcm_test_case_SGL_1, OUT_OF_PLACE, 400, 400);
+}
+
+static int
+test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B(void)
+{
+ return test_AES_GCM_authenticated_encryption_SGL(
+ &gcm_test_case_SGL_1, OUT_OF_PLACE, 1500, 2000);
+}
+
+static int
+test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg(void)
+{
+ return test_AES_GCM_authenticated_encryption_SGL(
+ &gcm_test_case_8, OUT_OF_PLACE, 400,
+ gcm_test_case_8.plaintext.len);
+}
+
+static int
+test_AES_GCM_auth_encrypt_SGL_in_place_1500B(void)
+{
+
+ return test_AES_GCM_authenticated_encryption_SGL(
+ &gcm_test_case_SGL_1, IN_PLACE, 1500, 0);
+}
+
+static int
test_authentication_verify_fail_when_data_corrupted(
struct crypto_testsuite_params *ts_params,
struct crypto_unittest_params *ut_params,
@@ -6017,6 +7803,131 @@ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt(void)
&aes128cbc_hmac_sha1_test_vector);
}
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+
+/* global AESNI slave IDs for the scheduler test */
+uint8_t aesni_ids[2];
+
+static int
+test_scheduler_attach_slave_op(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t sched_id = ts_params->valid_devs[0];
+ uint32_t nb_devs, i, nb_devs_attached = 0;
+ int ret;
+ char vdev_name[32];
+
+ /* create 2 AESNI_MB if necessary */
+ nb_devs = rte_cryptodev_count_devtype(
+ RTE_CRYPTODEV_AESNI_MB_PMD);
+ if (nb_devs < 2) {
+ for (i = nb_devs; i < 2; i++) {
+ snprintf(vdev_name, sizeof(vdev_name), "%s_%u",
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD),
+ i);
+ ret = rte_vdev_init(vdev_name, NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance %u of"
+ " pmd : %s",
+ i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+ }
+ }
+
+ /* attach 2 AESNI_MB cdevs */
+ for (i = 0; i < rte_cryptodev_count() && nb_devs_attached < 2;
+ i++) {
+ struct rte_cryptodev_info info;
+
+ rte_cryptodev_info_get(i, &info);
+ if (info.dev_type != RTE_CRYPTODEV_AESNI_MB_PMD)
+ continue;
+
+ ret = rte_cryptodev_scheduler_slave_attach(sched_id,
+ (uint8_t)i);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to attach device %u of pmd : %s", i,
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
+
+ aesni_ids[nb_devs_attached] = (uint8_t)i;
+
+ nb_devs_attached++;
+ }
+
+ return 0;
+}
+
+static int
+test_scheduler_detach_slave_op(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t sched_id = ts_params->valid_devs[0];
+ uint32_t i;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = rte_cryptodev_scheduler_slave_detach(sched_id,
+ aesni_ids[i]);
+ TEST_ASSERT(ret == 0,
+ "Failed to detach device %u", aesni_ids[i]);
+ }
+
+ return 0;
+}
+
+static int
+test_scheduler_mode_op(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t sched_id = ts_params->valid_devs[0];
+ struct rte_cryptodev_scheduler_ops op = {0};
+ struct rte_cryptodev_scheduler dummy_scheduler = {
+ .description = "dummy scheduler to test mode",
+ .name = "dummy scheduler",
+ .mode = CDEV_SCHED_MODE_USERDEFINED,
+ .ops = &op
+ };
+ int ret;
+
+ /* set user defined mode */
+ ret = rte_cryptodev_scheduler_load_user_scheduler(sched_id,
+ &dummy_scheduler);
+ TEST_ASSERT(ret == 0,
+ "Failed to set cdev %u to user defined mode", sched_id);
+
+ /* set round robin mode */
+ ret = rte_cryptodev_scheduler_mode_set(sched_id,
+ CDEV_SCHED_MODE_ROUNDROBIN);
+ TEST_ASSERT(ret == 0,
+ "Failed to set cdev %u to round-robin mode", sched_id);
+ TEST_ASSERT(rte_cryptodev_scheduler_mode_get(sched_id) ==
+ CDEV_SCHED_MODE_ROUNDROBIN, "Scheduling Mode "
+ "not match");
+
+ return 0;
+}
+
+static struct unit_test_suite cryptodev_scheduler_testsuite = {
+ .suite_name = "Crypto Device Scheduler Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+#endif /* RTE_LIBRTE_PMD_CRYPTO_SCHEDULER */
+
static struct unit_test_suite cryptodev_qat_testsuite = {
.suite_name = "Crypto QAT Unit Test Suite",
.setup = testsuite_setup,
@@ -6032,13 +7943,27 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
test_multi_session),
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_3DES_chain_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown,
test_3DES_cipheronly_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_cipheronly_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_docsis_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_docsis_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_stats),
/** AES GCM Authenticated Encryption */
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_in_place_1500B),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_400B),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_out_of_place_1500B_2000B),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_mb_AES_GCM_authenticated_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
test_mb_AES_GCM_authenticated_encryption_test_case_2),
@@ -6128,6 +8053,32 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_auth_cipher_test_case_1),
+ /** ZUC encrypt only (EEA3) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_5),
+
+ /** ZUC authenticate (EIA3) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_hash_generate_test_case_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_hash_generate_test_case_7),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_hash_generate_test_case_8),
+
+ /** ZUC alg-chain (EEA3/EIA3) */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_cipher_auth_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_cipher_auth_test_case_2),
+
/** HMAC_MD5 Authentication */
TEST_CASE_ST(ut_setup, ut_teardown,
test_MD5_HMAC_generate_case_1),
@@ -6185,6 +8136,9 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
.teardown = testsuite_teardown,
.unit_test_cases = {
TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_cipheronly_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_docsis_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_mb_all),
TEST_CASES_END() /**< NULL terminate unit test array */
}
@@ -6207,6 +8161,8 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_3DES_cipheronly_openssl_all),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_DES_docsis_openssl_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_authonly_openssl_all),
/** AES GCM Authenticated Encryption */
@@ -6259,6 +8215,10 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_GMAC_authentication_verify_test_case_4),
+ /** Scatter-Gather */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+
/** Negative tests */
TEST_CASE_ST(ut_setup, ut_teardown,
authentication_verify_HMAC_SHA1_fail_data_corrupt),
@@ -6314,6 +8274,86 @@ static struct unit_test_suite cryptodev_aesni_gcm_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_mb_AES_GCM_authenticated_decryption_test_case_7),
+ /** AES GCM Authenticated Encryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_256_7),
+
+ /** AES GCM Authenticated Decryption 256 bits key */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_6),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_256_7),
+
+ /** AES GCM Authenticated Encryption big aad size */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_aad_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_encryption_test_case_aad_2),
+
+ /** AES GCM Authenticated Decryption big aad size */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_aad_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_auth_decryption_test_case_aad_2),
+
+ /** AES GMAC Authentication */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_verify_test_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_verify_test_case_3),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_test_case_4),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GMAC_authentication_verify_test_case_4),
+
+ /** Negative tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_AES128_GMAC_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_AES128_GMAC_fail_tag_corrupt),
+
+ /** Out of place tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_authenticated_encryption_oop),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_authenticated_decryption_oop),
+
+ /** Session-less tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_authenticated_encryption_sessionless),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_mb_AES_GCM_authenticated_decryption_sessionless),
+
+ /** Scatter-Gather */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_GCM_auth_encrypt_SGL_out_of_place_400B_1seg),
+
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -6327,6 +8367,8 @@ static struct unit_test_suite cryptodev_sw_kasumi_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_encryption_test_case_1),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1_sgl),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_encryption_test_case_2),
TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_encryption_test_case_3),
@@ -6349,6 +8391,10 @@ static struct unit_test_suite cryptodev_sw_kasumi_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_encryption_test_case_1_oop),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_kasumi_encryption_test_case_1_oop_sgl),
+
+
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_kasumi_decryption_test_case_1_oop),
/** KASUMI hash only (UIA1) */
@@ -6401,6 +8447,8 @@ static struct unit_test_suite cryptodev_sw_snow3g_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_encryption_test_case_1_oop),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_snow3g_encryption_test_case_1_oop_sgl),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_snow3g_decryption_test_case_1_oop),
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -6478,6 +8526,41 @@ static struct unit_test_suite cryptodev_sw_zuc_testsuite = {
test_zuc_hash_generate_test_case_4),
TEST_CASE_ST(ut_setup, ut_teardown,
test_zuc_hash_generate_test_case_5),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_zuc_encryption_test_case_6_sgl),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
+ .suite_name = "Crypto DPAA2_SEC Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_device_configure_invalid_dev_id),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_multi_session),
+
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_dpaa2_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_dpaa2_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_dpaa2_sec_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_dpaa2_sec_all),
+
+ /** HMAC_MD5 Authentication */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_MD5_HMAC_generate_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_MD5_HMAC_verify_case_1),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_MD5_HMAC_generate_case_2),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_MD5_HMAC_verify_case_2),
+
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -6504,6 +8587,23 @@ static struct unit_test_suite cryptodev_null_testsuite = {
}
};
+static struct unit_test_suite cryptodev_armv8_testsuite = {
+ .suite_name = "Crypto Device ARMv8 Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_AES_chain_armv8_all),
+
+ /** Negative tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
{
@@ -6567,6 +8667,34 @@ test_cryptodev_sw_zuc(void /*argv __rte_unused, int argc __rte_unused*/)
return unit_test_suite_runner(&cryptodev_sw_zuc_testsuite);
}
+static int
+test_cryptodev_armv8(void)
+{
+ gbl_cryptodev_type = RTE_CRYPTODEV_ARMV8_PMD;
+
+ return unit_test_suite_runner(&cryptodev_armv8_testsuite);
+}
+
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+
+static int
+test_cryptodev_scheduler(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ return unit_test_suite_runner(&cryptodev_scheduler_testsuite);
+}
+
+REGISTER_TEST_COMMAND(cryptodev_scheduler_autotest, test_cryptodev_scheduler);
+
+#endif
+
+static int
+test_cryptodev_dpaa2_sec(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
+}
+
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -6575,3 +8703,5 @@ REGISTER_TEST_COMMAND(cryptodev_null_autotest, test_cryptodev_null);
REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g);
REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi);
REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc);
+REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
diff --git a/app/test/test_cryptodev.h b/test/test/test_cryptodev.h
index a9089aae..67354a95 100644
--- a/app/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -71,4 +71,142 @@
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA384 (24)
#define TRUNCATED_DIGEST_BYTE_LENGTH_SHA512 (32)
+/**
+ * Write (spread) data from buffer to mbuf data
+ *
+ * @param mbuf
+ * Destination mbuf
+ * @param offset
+ * Start offset in mbuf
+ * @param len
+ * Number of bytes to copy
+ * @param buffer
+ * Continuous source buffer
+ */
+static inline void
+pktmbuf_write(struct rte_mbuf *mbuf, int offset, int len, const uint8_t *buffer)
+{
+ int n = len;
+ int l;
+ struct rte_mbuf *m;
+ char *dst;
+
+ for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
+ offset -= m->data_len;
+
+ l = m->data_len - offset;
+
+ /* copy data from first segment */
+ dst = rte_pktmbuf_mtod_offset(m, char *, offset);
+ if (len <= l) {
+ rte_memcpy(dst, buffer, len);
+ return;
+ }
+
+ rte_memcpy(dst, buffer, l);
+ buffer += l;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ dst = rte_pktmbuf_mtod(m, char *);
+ l = m->data_len;
+ if (n < l) {
+ rte_memcpy(dst, buffer, n);
+ return;
+ }
+ rte_memcpy(dst, buffer, l);
+ buffer += l;
+ n -= l;
+ }
+}
+
+static inline uint8_t *
+pktmbuf_mtod_offset(struct rte_mbuf *mbuf, int offset) {
+ struct rte_mbuf *m;
+
+ for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
+ offset -= m->data_len;
+
+ if (m == NULL) {
+ printf("pktmbuf_mtod_offset: offset out of buffer\n");
+ return NULL;
+ }
+ return rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+}
+
+static inline phys_addr_t
+pktmbuf_mtophys_offset(struct rte_mbuf *mbuf, int offset) {
+ struct rte_mbuf *m;
+
+ for (m = mbuf; (m != NULL) && (offset > m->data_len); m = m->next)
+ offset -= m->data_len;
+
+ if (m == NULL) {
+ printf("pktmbuf_mtophys_offset: offset out of buffer\n");
+ return 0;
+ }
+ return rte_pktmbuf_mtophys_offset(m, offset);
+}
+
+static inline struct rte_mbuf *
+create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
+ int nb_segs, uint8_t pattern) {
+
+ struct rte_mbuf *m = NULL, *mbuf = NULL;
+ uint8_t *dst;
+ int data_len = 0;
+ int i, size;
+ int t_len;
+
+ if (pkt_len < 1) {
+ printf("Packet size must be 1 or more (is %d)\n", pkt_len);
+ return NULL;
+ }
+
+ if (nb_segs < 1) {
+ printf("Number of segments must be 1 or more (is %d)\n",
+ nb_segs);
+ return NULL;
+ }
+
+ t_len = pkt_len >= nb_segs ? pkt_len / nb_segs : 1;
+ size = pkt_len;
+
+ /* Create chained mbuf_src and fill it generated data */
+ for (i = 0; size > 0; i++) {
+
+ m = rte_pktmbuf_alloc(mbuf_pool);
+ if (i == 0)
+ mbuf = m;
+
+ if (m == NULL) {
+ printf("Cannot create segment for source mbuf");
+ goto fail;
+ }
+
+ /* Make sure if tailroom is zeroed */
+ memset(m->buf_addr, pattern, m->buf_len);
+
+ data_len = size > t_len ? t_len : size;
+ dst = (uint8_t *)rte_pktmbuf_append(m, data_len);
+ if (dst == NULL) {
+ printf("Cannot append %d bytes to the mbuf\n",
+ data_len);
+ goto fail;
+ }
+
+ if (mbuf != m)
+ rte_pktmbuf_chain(mbuf, m);
+
+ size -= data_len;
+
+ }
+ return mbuf;
+
+fail:
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ return NULL;
+}
+
#endif /* TEST_CRYPTODEV_H_ */
diff --git a/app/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 1c68f93e..07d6eab2 100644
--- a/app/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -56,6 +56,26 @@ static const uint8_t ciphertext64_aes128ctr[] = {
0x79, 0x21, 0x70, 0xA0, 0xF3, 0x00, 0x9C, 0xEE
};
+static const uint8_t plaintext_aes_docsis_bpi_cfb[] = {
+ 0x00, 0x01, 0x02, 0x88, 0xEE, 0x59, 0x7E
+};
+
+static const uint8_t ciphertext_aes_docsis_bpi_cfb[] = {
+ 0xFC, 0x68, 0xA3, 0x55, 0x60, 0x37, 0xDC
+};
+
+static const uint8_t plaintext_aes_docsis_bpi_cbc_cfb[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x91,
+ 0xD2, 0xD1, 0x9F
+};
+
+static const uint8_t ciphertext_aes_docsis_bpi_cbc_cfb[] = {
+ 0x9D, 0xD1, 0x67, 0x4B, 0xBA, 0x61, 0x10, 0x1B,
+ 0x56, 0x75, 0x64, 0x74, 0x36, 0x4F, 0x10, 0x1D,
+ 0x44, 0xD4, 0x73
+};
+
static const uint8_t plaintext_aes192ctr[] = {
0x01, 0x0F, 0x10, 0x1F, 0x20, 0x1C, 0x0E, 0xB8,
0xFB, 0x5C, 0xCD, 0xCC, 0x1F, 0xF9, 0xAF, 0x0B,
@@ -825,6 +845,181 @@ static const struct blockcipher_test_data aes_test_data_11 = {
}
};
+/** AES-128-CBC SHA256 HMAC test vector (160 bytes) */
+static const struct blockcipher_test_data aes_test_data_12 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_aes_common,
+ .len = 160
+ },
+ .ciphertext = {
+ .data = ciphertext512_aes128cbc,
+ .len = 160
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .auth_key = {
+ .data = {
+ 0x42, 0x1A, 0x7D, 0x3D, 0xF5, 0x82, 0x80, 0xF1,
+ 0xF1, 0x35, 0x5C, 0x3B, 0xDD, 0x9A, 0x65, 0xBA,
+ 0x58, 0x34, 0x85, 0x61, 0x1C, 0x42, 0x10, 0x76,
+ 0x9A, 0x4F, 0x88, 0x1B, 0xB6, 0x8F, 0xD8, 0x60
+ },
+ .len = 32
+ },
+ .digest = {
+ .data = {
+ 0x92, 0xEC, 0x65, 0x9A, 0x52, 0xCC, 0x50, 0xA5,
+ 0xEE, 0x0E, 0xDF, 0x1E, 0xA4, 0xC9, 0xC1, 0x04,
+ 0xD5, 0xDC, 0x78, 0x90, 0xF4, 0xE3, 0x35, 0x62,
+ 0xAD, 0x95, 0x45, 0x28, 0x5C, 0xF8, 0x8C, 0x0B
+ },
+ .len = 32,
+ .truncated_len = 16
+ }
+};
+
+/** AES-128-CBC SHA1 HMAC test vector (160 bytes) */
+static const struct blockcipher_test_data aes_test_data_13 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_aes_common,
+ .len = 160
+ },
+ .ciphertext = {
+ .data = ciphertext512_aes128cbc,
+ .len = 160
+ },
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .auth_key = {
+ .data = {
+ 0xF8, 0x2A, 0xC7, 0x54, 0xDB, 0x96, 0x18, 0xAA,
+ 0xC3, 0xA1, 0x53, 0xF6, 0x1F, 0x17, 0x60, 0xBD,
+ 0xDE, 0xF4, 0xDE, 0xAD
+ },
+ .len = 20
+ },
+ .digest = {
+ .data = {
+ 0x4F, 0x16, 0xEA, 0xF7, 0x4A, 0x88, 0xD3, 0xE0,
+ 0x0E, 0x12, 0x8B, 0xE7, 0x05, 0xD0, 0x86, 0x48,
+ 0x22, 0x43, 0x30, 0xA7
+ },
+ .len = 20,
+ .truncated_len = 12
+ }
+};
+
+/* AES-DOCSIS-BPI test vectors */
+
+/* Multiple of AES block size */
+static const struct blockcipher_test_data aes_test_data_docsis_1 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2,
+ 0x49, 0x03, 0xDD, 0xC6, 0xB8, 0xCA, 0x55, 0x7A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_aes_common,
+ .len = 512
+ },
+ .ciphertext = {
+ .data = ciphertext512_aes128cbc,
+ .len = 512
+ }
+};
+
+/* Less than AES block size */
+static const struct blockcipher_test_data aes_test_data_docsis_2 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB,
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A,
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_aes_docsis_bpi_cfb,
+ .len = 7
+ },
+ .ciphertext = {
+ .data = ciphertext_aes_docsis_bpi_cfb,
+ .len = 7
+ }
+};
+
+/* Not multiple of AES block size */
+static const struct blockcipher_test_data aes_test_data_docsis_3 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB,
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A,
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = plaintext_aes_docsis_bpi_cbc_cfb,
+ .len = 19
+ },
+ .ciphertext = {
+ .data = ciphertext_aes_docsis_bpi_cbc_cfb,
+ .len = 19
+ }
+};
+
static const struct blockcipher_test_case aes_chain_test_cases[] = {
{
.test_descr = "AES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -832,7 +1027,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -841,21 +1037,35 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-192-CTR XCBC Encryption Digest",
.test_data = &aes_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-192-CTR XCBC Decryption Digest Verify",
.test_data = &aes_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-192-CTR XCBC Decryption Digest Verify "
+ "Scatter Gather",
+ .test_data = &aes_test_data_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
+ BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Encryption Digest",
@@ -863,7 +1073,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -872,41 +1083,90 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
+ "(short buffers)",
+ .test_data = &aes_test_data_13,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
+ "Scatter Gather",
+ .test_data = &aes_test_data_4,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
+ BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
"Verify",
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+ "Verify (short buffers)",
+ .test_data = &aes_test_data_13,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest",
.test_data = &aes_test_data_5,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
+ "(short buffers)",
+ .test_data = &aes_test_data_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
"Verify",
.test_data = &aes_test_data_5,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
+ "Verify (short buffers)",
+ .test_data = &aes_test_data_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest",
@@ -914,7 +1174,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -926,27 +1187,53 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
{
+ .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
+ "Scatter Gather Sessionless",
+ .test_data = &aes_test_data_6,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS |
+ BLOCKCIPHER_TEST_FEATURE_SG |
+ BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ },
+ {
.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
"Verify",
.test_data = &aes_test_data_6,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
+ "Verify Scatter Gather",
+ .test_data = &aes_test_data_6,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
+ BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC XCBC Encryption Digest",
.test_data = &aes_test_data_7,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC XCBC Decryption Digest Verify",
.test_data = &aes_test_data_7,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -954,7 +1241,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
{
@@ -963,7 +1251,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
{
@@ -972,7 +1261,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -981,7 +1271,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -989,7 +1280,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -998,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1006,7 +1299,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
{
.test_descr =
@@ -1015,7 +1309,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
};
@@ -1024,74 +1319,221 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.test_descr = "AES-128-CBC Encryption",
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CBC Decryption",
.test_data = &aes_test_data_4,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-192-CBC Encryption",
.test_data = &aes_test_data_10,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-192-CBC Encryption Scater gather",
+ .test_data = &aes_test_data_10,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SG |
+ BLOCKCIPHER_TEST_FEATURE_OOP,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
},
{
.test_descr = "AES-192-CBC Decryption",
.test_data = &aes_test_data_10,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CBC Encryption",
.test_data = &aes_test_data_11,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CBC Decryption",
.test_data = &aes_test_data_11,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
+ },
+ {
+ .test_descr = "AES-256-CBC OOP Encryption",
+ .test_data = &aes_test_data_11,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-256-CBC OOP Decryption",
+ .test_data = &aes_test_data_11,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "AES-128-CTR Encryption",
.test_data = &aes_test_data_1,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-128-CTR Decryption",
.test_data = &aes_test_data_1,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-192-CTR Encryption",
.test_data = &aes_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-192-CTR Decryption",
.test_data = &aes_test_data_2,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CTR Encryption",
.test_data = &aes_test_data_3,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "AES-256-CTR Decryption",
.test_data = &aes_test_data_3,
.op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
};
+static const struct blockcipher_test_case aes_docsis_test_cases[] = {
+
+ {
+ .test_descr = "AES-DOCSIS-BPI Full Block Encryption",
+ .test_data = &aes_test_data_docsis_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI Runt Block Encryption",
+ .test_data = &aes_test_data_docsis_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI Uneven Encryption",
+ .test_data = &aes_test_data_docsis_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI Full Block Decryption",
+ .test_data = &aes_test_data_docsis_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI Runt Block Decryption",
+ .test_data = &aes_test_data_docsis_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI Uneven Decryption",
+ .test_data = &aes_test_data_docsis_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Full Block Encryption",
+ .test_data = &aes_test_data_docsis_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Runt Block Encryption",
+ .test_data = &aes_test_data_docsis_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Uneven Block Encryption",
+ .test_data = &aes_test_data_docsis_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Full Block Decryption",
+ .test_data = &aes_test_data_docsis_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Runt Block Decryption",
+ .test_data = &aes_test_data_docsis_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "AES-DOCSIS-BPI OOP Uneven Block Decryption",
+ .test_data = &aes_test_data_docsis_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ }
+};
#endif /* TEST_CRYPTODEV_AES_TEST_VECTORS_H_ */
diff --git a/app/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index 37b10cf3..603c7765 100644
--- a/app/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -41,10 +41,12 @@
#include <rte_cryptodev_pmd.h>
#include "test.h"
+#include "test_cryptodev.h"
#include "test_cryptodev_blockcipher.h"
#include "test_cryptodev_aes_test_vectors.h"
#include "test_cryptodev_des_test_vectors.h"
#include "test_cryptodev_hash_test_vectors.h"
+#include "test_cryptodev.h"
static int
test_blockcipher_one_case(const struct blockcipher_test_case *t,
@@ -63,6 +65,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
struct rte_crypto_sym_op *sym_op = NULL;
struct rte_crypto_op *op = NULL;
struct rte_cryptodev_sym_session *sess = NULL;
+ struct rte_cryptodev_info dev_info;
int status = TEST_SUCCESS;
const struct blockcipher_test_data *tdata = t->test_data;
@@ -71,6 +74,23 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint32_t buf_len = tdata->ciphertext.len;
uint32_t digest_len = 0;
char *buf_p = NULL;
+ uint8_t src_pattern = 0xa5;
+ uint8_t dst_pattern = 0xb6;
+ uint8_t tmp_src_buf[MBUF_SIZE];
+ uint8_t tmp_dst_buf[MBUF_SIZE];
+
+ int nb_segs = 1;
+
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
+ rte_cryptodev_info_get(dev_id, &dev_info);
+ if (!(dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
+ printf("Device doesn't support scatter-gather. "
+ "Test Skipped.\n");
+ return 0;
+ }
+ nb_segs = 3;
+ }
if (tdata->cipher_key.len)
memcpy(cipher_key, tdata->cipher_key.data,
@@ -82,9 +102,11 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
switch (cryptodev_type) {
case RTE_CRYPTODEV_QAT_SYM_PMD:
case RTE_CRYPTODEV_OPENSSL_PMD:
+ case RTE_CRYPTODEV_ARMV8_PMD: /* Fall through */
digest_len = tdata->digest.len;
break;
case RTE_CRYPTODEV_AESNI_MB_PMD:
+ case RTE_CRYPTODEV_SCHEDULER_PMD:
digest_len = tdata->digest.truncated_len;
break;
default:
@@ -96,48 +118,38 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
/* preparing data */
- ibuf = rte_pktmbuf_alloc(mbuf_pool);
- if (!ibuf) {
- snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
- "line %u FAILED: %s",
- __LINE__, "Allocation of rte_mbuf failed");
- status = TEST_FAILED;
- goto error_exit;
- }
-
if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
buf_len += tdata->iv.len;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH)
buf_len += digest_len;
- buf_p = rte_pktmbuf_append(ibuf, buf_len);
- if (!buf_p) {
+ /* for contiguous mbuf, nb_segs is 1 */
+ ibuf = create_segmented_mbuf(mbuf_pool,
+ tdata->ciphertext.len, nb_segs, src_pattern);
+ if (ibuf == NULL) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
"line %u FAILED: %s",
- __LINE__, "No room to append mbuf");
+ __LINE__, "Cannot create source mbuf");
status = TEST_FAILED;
goto error_exit;
}
- if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- rte_memcpy(buf_p, tdata->iv.data, tdata->iv.len);
- buf_p += tdata->iv.len;
- }
-
/* only encryption requires plaintext.data input,
* decryption/(digest gen)/(digest verify) use ciphertext.data
* to be computed
*/
- if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT) {
- rte_memcpy(buf_p, tdata->plaintext.data,
- tdata->plaintext.len);
- buf_p += tdata->plaintext.len;
- } else {
- rte_memcpy(buf_p, tdata->ciphertext.data,
- tdata->ciphertext.len);
- buf_p += tdata->ciphertext.len;
- }
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT)
+ pktmbuf_write(ibuf, 0, tdata->plaintext.len,
+ tdata->plaintext.data);
+ else
+ pktmbuf_write(ibuf, 0, tdata->ciphertext.len,
+ tdata->ciphertext.data);
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
+ rte_memcpy(rte_pktmbuf_prepend(ibuf, tdata->iv.len),
+ tdata->iv.data, tdata->iv.len);
+ }
+ buf_p = rte_pktmbuf_append(ibuf, digest_len);
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY)
rte_memcpy(buf_p, tdata->digest.data, digest_len);
else
@@ -152,6 +164,7 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
status = TEST_FAILED;
goto error_exit;
}
+ memset(obuf->buf_addr, dst_pattern, obuf->buf_len);
buf_p = rte_pktmbuf_append(obuf, buf_len);
if (!buf_p) {
@@ -307,17 +320,17 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN) {
auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- sym_op->auth.digest.data = rte_pktmbuf_mtod_offset
- (iobuf, uint8_t *, digest_offset);
+ sym_op->auth.digest.data = pktmbuf_mtod_offset
+ (iobuf, digest_offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(iobuf,
+ pktmbuf_mtophys_offset(iobuf,
digest_offset);
} else {
auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
- sym_op->auth.digest.data = rte_pktmbuf_mtod_offset
- (sym_op->m_src, uint8_t *, digest_offset);
+ sym_op->auth.digest.data = pktmbuf_mtod_offset
+ (sym_op->m_src, digest_offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(sym_op->m_src,
+ pktmbuf_mtophys_offset(sym_op->m_src,
digest_offset);
}
@@ -342,6 +355,17 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
rte_crypto_op_attach_sym_session(op, sess);
}
+ TEST_HEXDUMP(stdout, "m_src(before):",
+ sym_op->m_src->buf_addr, sym_op->m_src->buf_len);
+ rte_memcpy(tmp_src_buf, sym_op->m_src->buf_addr,
+ sym_op->m_src->buf_len);
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ TEST_HEXDUMP(stdout, "m_dst(before):",
+ sym_op->m_dst->buf_addr, sym_op->m_dst->buf_len);
+ rte_memcpy(tmp_dst_buf, sym_op->m_dst->buf_addr,
+ sym_op->m_dst->buf_len);
+ }
+
/* Process crypto operation */
if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -364,12 +388,11 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
goto error_exit;
}
- TEST_HEXDUMP(stdout, "m_src:",
- rte_pktmbuf_mtod(sym_op->m_src, uint8_t *), buf_len);
+ TEST_HEXDUMP(stdout, "m_src(after):",
+ sym_op->m_src->buf_addr, sym_op->m_src->buf_len);
if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP)
- TEST_HEXDUMP(stdout, "m_dst:",
- rte_pktmbuf_mtod(sym_op->m_dst, uint8_t *),
- buf_len);
+ TEST_HEXDUMP(stdout, "m_dst(after):",
+ sym_op->m_dst->buf_addr, sym_op->m_dst->buf_len);
/* Verify results */
if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
@@ -386,13 +409,10 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- uint8_t *crypto_res;
+ uint8_t buffer[2048];
const uint8_t *compare_ref;
uint32_t compare_len;
- crypto_res = rte_pktmbuf_mtod_offset(iobuf, uint8_t *,
- tdata->iv.len);
-
if (t->op_mask & BLOCKCIPHER_TEST_OP_ENCRYPT) {
compare_ref = tdata->ciphertext.data;
compare_len = tdata->ciphertext.len;
@@ -401,7 +421,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
compare_len = tdata->plaintext.len;
}
- if (memcmp(crypto_res, compare_ref, compare_len)) {
+ if (memcmp(rte_pktmbuf_read(iobuf, tdata->iv.len, compare_len,
+ buffer), compare_ref, compare_len)) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
"FAILED: %s", __LINE__,
"Crypto data not as expected");
@@ -414,12 +435,11 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t *auth_res;
if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER)
- auth_res = rte_pktmbuf_mtod_offset(iobuf,
- uint8_t *,
- tdata->iv.len + tdata->ciphertext.len);
+ auth_res = pktmbuf_mtod_offset(iobuf,
+ tdata->iv.len + tdata->ciphertext.len);
else
- auth_res = rte_pktmbuf_mtod_offset(iobuf,
- uint8_t *, tdata->ciphertext.len);
+ auth_res = pktmbuf_mtod_offset(iobuf,
+ tdata->ciphertext.len);
if (memcmp(auth_res, tdata->digest.data, digest_len)) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "line %u "
@@ -430,6 +450,120 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
}
}
+ /* The only parts that should have changed in the buffer are
+ * plaintext/ciphertext and digest.
+ * In OOP only the dest buffer should change.
+ */
+ if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_OOP) {
+ struct rte_mbuf *mbuf;
+ uint8_t value;
+ uint32_t head_unchanged_len = 0, changed_len = 0;
+ uint32_t i;
+
+ mbuf = sym_op->m_src;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
+ /* white-box test: PMDs use some of the
+ * tailroom as temp storage in verify case
+ */
+ head_unchanged_len = rte_pktmbuf_headroom(mbuf)
+ + rte_pktmbuf_data_len(mbuf);
+ changed_len = digest_len;
+ } else {
+ head_unchanged_len = mbuf->buf_len;
+ changed_len = 0;
+ }
+
+ for (i = 0; i < mbuf->buf_len; i++) {
+ if (i == head_unchanged_len)
+ i += changed_len;
+ value = *((uint8_t *)(mbuf->buf_addr)+i);
+ if (value != tmp_src_buf[i]) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: OOP src outer mbuf data (0x%x) not as expected (0x%x)",
+ __LINE__, value, tmp_src_buf[i]);
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+
+ mbuf = sym_op->m_dst;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH) {
+ head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ sym_op->auth.data.offset;
+ changed_len = sym_op->auth.data.length;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
+ changed_len += sym_op->auth.digest.length;
+ } else {
+ /* cipher-only */
+ head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ sym_op->cipher.data.offset;
+ changed_len = sym_op->cipher.data.length;
+ }
+
+ for (i = 0; i < mbuf->buf_len; i++) {
+ if (i == head_unchanged_len)
+ i += changed_len;
+ value = *((uint8_t *)(mbuf->buf_addr)+i);
+ if (value != tmp_dst_buf[i]) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: OOP dst outer mbuf data "
+ "(0x%x) not as expected (0x%x)",
+ __LINE__, value, tmp_dst_buf[i]);
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+ } else {
+ /* In-place operation */
+ struct rte_mbuf *mbuf;
+ uint8_t value;
+ uint32_t head_unchanged_len = 0, changed_len = 0;
+ uint32_t i;
+
+ mbuf = sym_op->m_src;
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
+ head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ sym_op->cipher.data.offset;
+ changed_len = sym_op->cipher.data.length;
+ } else {
+ /* auth-only */
+ head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ sym_op->auth.data.offset +
+ sym_op->auth.data.length;
+ changed_len = 0;
+ }
+
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
+ changed_len += sym_op->auth.digest.length;
+
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_VERIFY) {
+ /* white-box test: PMDs use some of the
+ * tailroom as temp storage in verify case
+ */
+ if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
+ /* This is simplified, not checking digest*/
+ changed_len += digest_len*2;
+ } else {
+ head_unchanged_len += digest_len;
+ changed_len += digest_len;
+ }
+ }
+
+ for (i = 0; i < mbuf->buf_len; i++) {
+ if (i == head_unchanged_len)
+ i += changed_len;
+ value = *((uint8_t *)(mbuf->buf_addr)+i);
+ if (value != tmp_src_buf[i]) {
+ snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
+ "line %u FAILED: outer mbuf data (0x%x) "
+ "not as expected (0x%x)",
+ __LINE__, value, tmp_src_buf[i]);
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ }
+ }
+
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN, "PASS");
error_exit:
@@ -479,6 +613,11 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
sizeof(aes_cipheronly_test_cases[0]);
tcs = aes_cipheronly_test_cases;
break;
+ case BLKCIPHER_AES_DOCSIS_TYPE:
+ n_test_cases = sizeof(aes_docsis_test_cases) /
+ sizeof(aes_docsis_test_cases[0]);
+ tcs = aes_docsis_test_cases;
+ break;
case BLKCIPHER_3DES_CHAIN_TYPE:
n_test_cases = sizeof(triple_des_chain_test_cases) /
sizeof(triple_des_chain_test_cases[0]);
@@ -489,6 +628,16 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
sizeof(triple_des_cipheronly_test_cases[0]);
tcs = triple_des_cipheronly_test_cases;
break;
+ case BLKCIPHER_DES_CIPHERONLY_TYPE:
+ n_test_cases = sizeof(des_cipheronly_test_cases) /
+ sizeof(des_cipheronly_test_cases[0]);
+ tcs = des_cipheronly_test_cases;
+ break;
+ case BLKCIPHER_DES_DOCSIS_TYPE:
+ n_test_cases = sizeof(des_docsis_test_cases) /
+ sizeof(des_docsis_test_cases[0]);
+ tcs = des_docsis_test_cases;
+ break;
case BLKCIPHER_AUTHONLY_TYPE:
n_test_cases = sizeof(hash_test_cases) /
sizeof(hash_test_cases[0]);
@@ -508,6 +657,15 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
case RTE_CRYPTODEV_OPENSSL_PMD:
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL;
break;
+ case RTE_CRYPTODEV_ARMV8_PMD:
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8;
+ break;
+ case RTE_CRYPTODEV_SCHEDULER_PMD:
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
+ break;
+ case RTE_CRYPTODEV_DPAA2_SEC_PMD:
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+ break;
default:
TEST_ASSERT(0, "Unrecognized cryptodev type");
break;
diff --git a/app/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index 04ff1ee3..004122f2 100644
--- a/app/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,10 +45,14 @@
#define BLOCKCIPHER_TEST_FEATURE_OOP 0x01
#define BLOCKCIPHER_TEST_FEATURE_SESSIONLESS 0x02
#define BLOCKCIPHER_TEST_FEATURE_STOPPER 0x04 /* stop upon failing */
+#define BLOCKCIPHER_TEST_FEATURE_SG 0x08 /* Scatter Gather */
#define BLOCKCIPHER_TEST_TARGET_PMD_MB 0x0001 /* Multi-buffer flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_QAT 0x0002 /* QAT flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL 0x0004 /* SW OPENSSL flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 0x0008 /* ARMv8 flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER 0x0010 /* Scheduler */
+#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
#define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
BLOCKCIPHER_TEST_OP_DECRYPT)
@@ -65,9 +69,12 @@
enum blockcipher_test_type {
BLKCIPHER_AES_CHAIN_TYPE, /* use aes_chain_test_cases[] */
BLKCIPHER_AES_CIPHERONLY_TYPE, /* use aes_cipheronly_test_cases[] */
+ BLKCIPHER_AES_DOCSIS_TYPE, /* use aes_docsis_test_cases[] */
BLKCIPHER_3DES_CHAIN_TYPE, /* use triple_des_chain_test_cases[] */
BLKCIPHER_3DES_CIPHERONLY_TYPE, /* triple_des_cipheronly_test_cases[] */
- BLKCIPHER_AUTHONLY_TYPE /* use hash_test_cases[] */
+ BLKCIPHER_AUTHONLY_TYPE, /* use hash_test_cases[] */
+ BLKCIPHER_DES_CIPHERONLY_TYPE, /* use des_cipheronly_test_cases[] */
+ BLKCIPHER_DES_DOCSIS_TYPE /* use des_docsis_test_cases[] */
};
struct blockcipher_test_case {
diff --git a/app/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index a1d2d978..b226794f 100644
--- a/app/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -474,6 +474,75 @@ static const uint8_t ciphertext512_des128cbc[] = {
0x43, 0xfe, 0xf1, 0x10, 0x14, 0xf1, 0x91, 0xcc
};
+
+static const uint8_t ciphertext512_des[] = {
+ 0x1A, 0x46, 0xDB, 0x69, 0x43, 0x45, 0x0F, 0x2F,
+ 0xDC, 0x27, 0xF9, 0x41, 0x0E, 0x01, 0x58, 0xB4,
+ 0x5E, 0xCC, 0x13, 0xF5, 0x92, 0x99, 0xE4, 0xF2,
+ 0xD5, 0xF9, 0x16, 0xFE, 0x0F, 0x7E, 0xDE, 0xA0,
+ 0xF5, 0x32, 0xFE, 0x20, 0x67, 0x93, 0xCA, 0xE1,
+ 0x8E, 0x4D, 0x72, 0xA3, 0x50, 0x72, 0x14, 0x15,
+ 0x70, 0xE7, 0xAB, 0x49, 0x25, 0x88, 0x0E, 0x01,
+ 0x5C, 0x52, 0x87, 0xE2, 0x27, 0xDC, 0xD4, 0xD1,
+ 0x14, 0x1B, 0x08, 0x9F, 0x42, 0x48, 0x93, 0xA9,
+ 0xD1, 0x2F, 0x2C, 0x69, 0x48, 0x16, 0x59, 0xCF,
+ 0x8B, 0xF6, 0x8B, 0xD9, 0x34, 0xD4, 0xD7, 0xE4,
+ 0xAE, 0x35, 0xFD, 0xDA, 0x73, 0xBE, 0xDC, 0x6B,
+ 0x10, 0x90, 0x75, 0x2D, 0x4C, 0x14, 0x37, 0x8B,
+ 0xC8, 0xC7, 0xDF, 0x6E, 0x6F, 0xED, 0xF3, 0xE3,
+ 0xD3, 0x21, 0x29, 0xCD, 0x06, 0xB6, 0x5B, 0xF4,
+ 0xB9, 0xBD, 0x77, 0xA2, 0xF7, 0x91, 0xF4, 0x95,
+ 0xF0, 0xE0, 0x62, 0x03, 0x46, 0xAE, 0x1B, 0xEB,
+ 0xE2, 0xA9, 0xCF, 0xB9, 0x0E, 0x3B, 0xB9, 0xDA,
+ 0x5C, 0x1B, 0x45, 0x3F, 0xDD, 0xCC, 0xCC, 0xB3,
+ 0xF0, 0xDD, 0x36, 0x26, 0x11, 0x57, 0x97, 0xA7,
+ 0xF6, 0xF4, 0xE1, 0x4F, 0xBB, 0x31, 0xBB, 0x07,
+ 0x4B, 0xA3, 0xB4, 0x83, 0xF9, 0x23, 0xA1, 0xCD,
+ 0x8C, 0x1C, 0x76, 0x92, 0x45, 0xA5, 0xEB, 0x7D,
+ 0xEB, 0x22, 0x88, 0xB1, 0x9F, 0xFB, 0xE9, 0x06,
+ 0x8F, 0x67, 0xA6, 0x8A, 0xB7, 0x0B, 0xCD, 0x8F,
+ 0x34, 0x40, 0x4F, 0x4F, 0xAD, 0xA0, 0xF2, 0xDC,
+ 0x2C, 0x53, 0xE1, 0xCA, 0xA5, 0x7A, 0x03, 0xEF,
+ 0x08, 0x00, 0xCC, 0x52, 0xA6, 0xAB, 0x56, 0xD2,
+ 0xF1, 0xCD, 0xC7, 0xED, 0xBE, 0xCB, 0x78, 0x37,
+ 0x4B, 0x61, 0xA9, 0xD2, 0x3C, 0x8D, 0xCC, 0xFD,
+ 0x21, 0xFD, 0x0F, 0xE4, 0x4E, 0x3D, 0x6F, 0x8F,
+ 0x2A, 0xEC, 0x69, 0xFA, 0x20, 0x50, 0x99, 0x35,
+ 0xA1, 0xCC, 0x3B, 0xFD, 0xD6, 0xAC, 0xE9, 0xBE,
+ 0x14, 0xF1, 0xBC, 0x71, 0x70, 0xFE, 0x13, 0xD1,
+ 0x48, 0xCC, 0xBE, 0x7B, 0xCB, 0xC0, 0x20, 0xD9,
+ 0x28, 0xD7, 0xD4, 0x0F, 0x66, 0x7A, 0x60, 0xAB,
+ 0x20, 0xA9, 0x23, 0x41, 0x03, 0x34, 0xC3, 0x63,
+ 0x91, 0x69, 0x02, 0xD5, 0xBC, 0x41, 0xDA, 0xA8,
+ 0xD1, 0x48, 0xC9, 0x8E, 0x4F, 0xCD, 0x0F, 0x21,
+ 0x5B, 0x4D, 0x5F, 0xF5, 0x1B, 0x2A, 0x44, 0x10,
+ 0x16, 0xA7, 0xFD, 0xC0, 0x55, 0xE1, 0x98, 0xBB,
+ 0x76, 0xB5, 0xAB, 0x39, 0x6B, 0x9B, 0xAB, 0x85,
+ 0x45, 0x4B, 0x9C, 0x64, 0x7D, 0x78, 0x3F, 0x61,
+ 0x22, 0xB1, 0xDE, 0x0E, 0x39, 0x2B, 0x21, 0x26,
+ 0xE2, 0x1D, 0x5A, 0xD7, 0xAC, 0xDF, 0xD4, 0x12,
+ 0x69, 0xD1, 0xE8, 0x9B, 0x1A, 0xCE, 0x6C, 0xA0,
+ 0x3B, 0x23, 0xDC, 0x03, 0x2B, 0x97, 0x16, 0xD0,
+ 0xD0, 0x46, 0x98, 0x36, 0x53, 0xCE, 0x88, 0x6E,
+ 0xCA, 0x2C, 0x15, 0x0E, 0x49, 0xED, 0xBE, 0xE5,
+ 0xBF, 0xBD, 0x7B, 0xC2, 0x21, 0xE1, 0x09, 0xFF,
+ 0x71, 0xA8, 0xBE, 0x8F, 0xB4, 0x1D, 0x25, 0x5C,
+ 0x37, 0xCA, 0x26, 0xD2, 0x1E, 0x63, 0xE1, 0x7F,
+ 0x0D, 0x89, 0x10, 0xEF, 0x78, 0xB0, 0xDB, 0xD0,
+ 0x72, 0x44, 0x60, 0x1D, 0xCF, 0x7C, 0x25, 0x1A,
+ 0xBB, 0xC3, 0x92, 0x53, 0x8E, 0x9F, 0x27, 0xC7,
+ 0xE8, 0x08, 0xFC, 0x5D, 0x50, 0x3E, 0xFC, 0xB0,
+ 0x00, 0xE2, 0x48, 0xB2, 0x4B, 0xF8, 0xF2, 0xE3,
+ 0xD3, 0x8B, 0x71, 0x64, 0xB8, 0xF0, 0x6E, 0x4A,
+ 0x23, 0xA0, 0xA4, 0x88, 0xA4, 0x36, 0x45, 0x6B,
+ 0x5A, 0xE7, 0x57, 0x65, 0xEA, 0xC9, 0xF8, 0xE8,
+ 0x7A, 0x80, 0x22, 0x67, 0x1A, 0x05, 0xF2, 0x78,
+ 0x81, 0x17, 0xCD, 0x87, 0xFB, 0x0D, 0x25, 0x84,
+ 0x49, 0x06, 0x25, 0xCE, 0xFC, 0x38, 0x06, 0x18,
+ 0x2E, 0x1D, 0xE1, 0x33, 0x97, 0xB6, 0x7E, 0xAB,
+};
+
+
static const struct blockcipher_test_data
triple_des128cbc_test_vector = {
.crypto_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
@@ -752,6 +821,237 @@ triple_des192cbc_hmac_sha1_test_vector = {
}
};
+static const struct blockcipher_test_data
+des_cbc_test_vector = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des,
+ .len = 512
+ },
+ .ciphertext = {
+ .data = ciphertext512_des,
+ .len = 512
+ },
+};
+
+static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
+ {
+ .test_descr = "DES-CBC Encryption",
+ .test_data = &des_cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-CBC Decryption",
+ .test_data = &des_cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+
+};
+
+/* DES-DOCSIS-BPI test vectors */
+
+static const uint8_t plaintext_des_docsis_bpi_cfb[] = {
+ 0x00, 0x01, 0x02, 0x88, 0xEE, 0x59, 0x7E
+};
+
+static const uint8_t ciphertext_des_docsis_bpi_cfb[] = {
+ 0x17, 0x86, 0xA8, 0x03, 0xA0, 0x85, 0x75
+};
+
+static const uint8_t plaintext_des_docsis_bpi_cbc_cfb[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x91,
+ 0xD2, 0xD1, 0x9F
+};
+
+static const uint8_t ciphertext_des_docsis_bpi_cbc_cfb[] = {
+ 0x0D, 0xDA, 0x5A, 0xCB, 0xD0, 0x5E, 0x55, 0x67,
+ 0x51, 0x47, 0x46, 0x86, 0x8A, 0x71, 0xE5, 0x77,
+ 0xEF, 0xAC, 0x88
+};
+
+/* Multiple of DES block size */
+static const struct blockcipher_test_data des_test_data_1 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des,
+ .len = 512
+ },
+ .ciphertext = {
+ .data = ciphertext512_des,
+ .len = 512
+ },
+};
+
+/* Less than DES block size */
+static const struct blockcipher_test_data des_test_data_2 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des_docsis_bpi_cfb,
+ .len = 7
+ },
+ .ciphertext = {
+ .data = ciphertext_des_docsis_bpi_cfb,
+ .len = 7
+ }
+};
+
+/* Not multiple of DES block size */
+static const struct blockcipher_test_data des_test_data_3 = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .cipher_key = {
+ .data = {
+ 0xE6, 0x60, 0x0F, 0xD8, 0x85, 0x2E, 0xF5, 0xAB
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x81, 0x0E, 0x52, 0x8E, 0x1C, 0x5F, 0xDA, 0x1A
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des_docsis_bpi_cbc_cfb,
+ .len = 19
+ },
+ .ciphertext = {
+ .data = ciphertext_des_docsis_bpi_cbc_cfb,
+ .len = 19
+ }
+};
+static const struct blockcipher_test_case des_docsis_test_cases[] = {
+ {
+ .test_descr = "DES-DOCSIS-BPI Full Block Encryption",
+ .test_data = &des_test_data_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI Runt Block Encryption",
+ .test_data = &des_test_data_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI Uneven Encryption",
+ .test_data = &des_test_data_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI Full Block Decryption",
+ .test_data = &des_test_data_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI Runt Block Decryption",
+ .test_data = &des_test_data_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI Uneven Decryption",
+ .test_data = &des_test_data_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Full Block Encryption",
+ .test_data = &des_test_data_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Runt Block Encryption",
+ .test_data = &des_test_data_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Uneven Encryption",
+ .test_data = &des_test_data_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Full Block Decryption",
+ .test_data = &des_test_data_1,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Runt Block Decryption",
+ .test_data = &des_test_data_2,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "DES-DOCSIS-BPI OOP Uneven Decryption",
+ .test_data = &des_test_data_3,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_OOP,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ }
+};
+
static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Encryption Digest",
diff --git a/test/test/test_cryptodev_gcm_test_vectors.h b/test/test/test_cryptodev_gcm_test_vectors.h
new file mode 100644
index 00000000..5764edb1
--- /dev/null
+++ b/test/test/test_cryptodev_gcm_test_vectors.h
@@ -0,0 +1,3051 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_GCM_TEST_VECTORS_H_
+
+#define GMAC_LARGE_PLAINTEXT_LENGTH 65344
+#define GCM_MAX_AAD_LENGTH 65536
+#define GCM_LARGE_AAD_LENGTH 65296
+
+static uint8_t gcm_aad_zero_text[GCM_MAX_AAD_LENGTH] = { 0 };
+
+static uint8_t gcm_aad_text[GCM_MAX_AAD_LENGTH] = {
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
+ 0x00, 0xf1, 0xe2, 0xd3, 0xc4, 0xb5, 0xa6, 0x97,
+ 0x88, 0x79, 0x6a, 0x5b, 0x4c, 0x3d, 0x2e, 0x1f };
+
+
+struct gcm_test_data {
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } key;
+
+ struct {
+ uint8_t data[64] __rte_aligned(16);
+ unsigned len;
+ } iv;
+
+ struct {
+ uint8_t *data;
+ unsigned len;
+ } aad;
+
+ struct {
+ uint8_t data[8096];
+ unsigned len;
+ } plaintext;
+
+ struct {
+ uint8_t data[8096];
+ unsigned len;
+ } ciphertext;
+
+ struct {
+ uint8_t data[16];
+ unsigned len;
+ } auth_tag;
+};
+
+struct gmac_test_data {
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } key;
+
+ struct {
+ uint8_t data[64] __rte_aligned(16);
+ unsigned len;
+ } iv;
+
+ struct {
+ uint8_t *data;
+ unsigned len;
+ } aad;
+
+ struct {
+ uint8_t *data;
+ unsigned len;
+ } plaintext;
+
+ struct {
+ uint8_t data[16];
+ unsigned len;
+ } gmac_tag;
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_1 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0x00 },
+ .len = 0
+ },
+ .ciphertext = {
+ .data = {
+ 0x00
+ },
+ .len = 0
+ },
+ .auth_tag = {
+ .data = {
+ 0x58, 0xe2, 0xfc, 0xce, 0xfa, 0x7e, 0x30, 0x61,
+ 0x36, 0x7f, 0x1d, 0x57, 0xa4, 0xe7, 0x45, 0x5a },
+ .len = 16
+ }
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_2 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 16
+ },
+ .ciphertext = {
+ .data = {
+ 0x03, 0x88, 0xda, 0xce, 0x60, 0xb6, 0xa3, 0x92,
+ 0xf3, 0x28, 0xc2, 0xb9, 0x71, 0xb2, 0xfe, 0x78 },
+ .len = 16
+ },
+ .auth_tag = {
+ .data = {
+ 0xab, 0x6e, 0x47, 0xd4, 0x2c, 0xec, 0x13, 0xbd,
+ 0xf5, 0x3a, 0x67, 0xb2, 0x12, 0x57, 0xbd, 0xdf },
+ .len = 16
+ }
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_3 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91, 0x47, 0x3f, 0x59, 0x85
+ },
+ .len = 64
+ },
+ .auth_tag = {
+ .data = {
+ 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+ 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+ .len = 16
+ }
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_4 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xA2, 0xA4, 0x35, 0x75, 0xDC, 0xB0, 0x57, 0x74,
+ 0x07, 0x02, 0x30, 0xC2, 0xE7, 0x52, 0x02, 0x00
+ },
+ .len = 16
+ }
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_5 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xC5, 0x2D, 0xFB, 0x54, 0xAF, 0xBB, 0x07, 0xA1,
+ 0x9A, 0xFF, 0xBE, 0xE0, 0x61, 0x4C, 0xE7, 0xA5
+ },
+ .len = 16
+ }
+
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_6 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0x74, 0xFC, 0xFA, 0x29, 0x3E, 0x60, 0xCC, 0x66,
+ 0x09, 0xD6, 0xFD, 0x00, 0xC8, 0x86, 0xD5, 0x42
+ },
+ .len = 16
+ }
+};
+
+/** AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_7 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39
+ },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1e, 0xc2, 0x21, 0x77, 0x74, 0x24,
+ 0x4b, 0x72, 0x21, 0xb7, 0x84, 0xd0, 0xd4, 0x9c,
+ 0xe3, 0xaa, 0x21, 0x2f, 0x2c, 0x02, 0xa4, 0xe0,
+ 0x35, 0xc1, 0x7e, 0x23, 0x29, 0xac, 0xa1, 0x2e,
+ 0x21, 0xd5, 0x14, 0xb2, 0x54, 0x66, 0x93, 0x1c,
+ 0x7d, 0x8f, 0x6a, 0x5a, 0xac, 0x84, 0xaa, 0x05,
+ 0x1b, 0xa3, 0x0b, 0x39, 0x6a, 0x0a, 0xac, 0x97,
+ 0x3d, 0x58, 0xe0, 0x91
+ },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xE9, 0xE4, 0xAB, 0x76, 0xB7, 0xFF, 0xEA, 0xDC,
+ 0x69, 0x79, 0x38, 0xA2, 0x0D, 0xCA, 0xF5, 0x92
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_8 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xC5, 0x34, 0x2E, 0x83, 0xEB, 0x4C, 0x02, 0x03,
+ 0xF7, 0xB2, 0x57, 0x35, 0x26, 0x81, 0x63, 0xAE,
+ 0x1F, 0xCD, 0x2D, 0x02, 0x91, 0x5A, 0xDB, 0x3A,
+ 0xF1, 0x38, 0xD8, 0x75, 0x86, 0x20, 0xCC, 0x1E,
+ 0xE6, 0xDC, 0xFF, 0xB5, 0xEA, 0x0E, 0x18, 0x7A,
+ 0x86, 0x6C, 0xAB, 0x39, 0x2D, 0x90, 0xAC, 0x77,
+ 0x5D, 0xED, 0x65, 0xB3, 0x05, 0x29, 0xBB, 0x09,
+ 0xD0, 0x21, 0x74, 0x6A, 0x67, 0x1C, 0x95, 0x42,
+ 0x55, 0xAD, 0xC8, 0x91, 0x28, 0xFE, 0x16, 0x9A,
+ 0xE1, 0xCB, 0xCD, 0x68, 0x3B, 0xDF, 0x3E, 0x3A,
+ 0x34, 0xFE, 0x9B, 0xFB, 0xF5, 0x15, 0x2A, 0x29,
+ 0x18, 0x99, 0x24, 0xBF, 0xB6, 0x43, 0xDB, 0xD1,
+ 0x69, 0x26, 0x1E, 0x31, 0x2C, 0x8C, 0x3C, 0x6B,
+ 0x7F, 0x06, 0xA6, 0x03, 0xE2, 0x1A, 0x50, 0xFE,
+ 0x7C, 0x69, 0xE5, 0x5F, 0x35, 0x93, 0xE9, 0x20,
+ 0x14, 0xB1, 0xCA, 0x61, 0xE7, 0x9C, 0x89, 0x08,
+ 0xD6, 0xB1, 0xC2, 0x63, 0x1B, 0x86, 0x5E, 0xF1,
+ 0xF5, 0x23, 0x0E, 0x9B, 0xE5, 0xBD, 0x5D, 0x04,
+ 0xF7, 0xEF, 0x8E, 0x46, 0xB0, 0x11, 0x4F, 0x69,
+ 0x62, 0x35, 0x51, 0xB7, 0x24, 0xA2, 0x31, 0xD0,
+ 0x32, 0x4E, 0xB8, 0x44, 0xC7, 0x59, 0xDE, 0x25,
+ 0xEA, 0x2D, 0x00, 0x0E, 0xF1, 0x07, 0xBA, 0xBB,
+ 0x9A, 0xBC, 0x4F, 0x57, 0xB7, 0x13, 0x57, 0xEF,
+ 0xD9, 0xF6, 0x80, 0x69, 0xEA, 0xE8, 0x47, 0x9C,
+ 0x51, 0x71, 0xE6, 0x8F, 0x69, 0x29, 0xB4, 0x60,
+ 0xE8, 0x50, 0xE5, 0xD0, 0x9B, 0xD2, 0x62, 0x6F,
+ 0x09, 0x5C, 0xD1, 0x4B, 0x85, 0xE2, 0xFD, 0xD3,
+ 0xEB, 0x28, 0x55, 0x77, 0x97, 0xCA, 0xD6, 0xA8,
+ 0xDC, 0x35, 0x68, 0xF7, 0x6A, 0xCF, 0x48, 0x3F,
+ 0x49, 0x31, 0x00, 0x65, 0xB7, 0x31, 0x1A, 0x49,
+ 0x75, 0xDE, 0xCE, 0x7F, 0x18, 0xB5, 0x31, 0x9A,
+ 0x64, 0x6D, 0xE5, 0x49, 0x1D, 0x6D, 0xF2, 0x21,
+ 0x9F, 0xF5, 0xFF, 0x7C, 0x41, 0x30, 0x33, 0x06,
+ 0x7B, 0xA4, 0xD8, 0x99, 0xF6, 0xCC, 0xDF, 0xC4,
+ 0x3F, 0xF3, 0xCD, 0xE7, 0x74, 0xC4, 0x4A, 0x19,
+ 0x5C, 0xCA, 0x42, 0x31, 0xF1, 0x3B, 0x65, 0x1C,
+ 0x3D, 0x56, 0x08, 0xBE, 0x15, 0x37, 0x23, 0x50,
+ 0xD6, 0xA3, 0x57, 0x64, 0x25, 0xBE, 0xDA, 0xC2,
+ 0x4E, 0xF5, 0x1A, 0xAD, 0x6F, 0x43, 0x78, 0x21,
+ 0xF9, 0x36, 0x39, 0x1F, 0x5F, 0xF7, 0x1B, 0xA0,
+ 0xEE, 0x8B, 0x4F, 0x8A, 0x9D, 0xD8, 0xED, 0x37,
+ 0xCE, 0x0D, 0x70, 0xE0, 0x3F, 0xE7, 0x11, 0x30,
+ 0x17, 0x1D, 0x03, 0x5E, 0xA0, 0x3D, 0x3F, 0x9E,
+ 0xF5, 0xD3, 0x74, 0x2E, 0xC1, 0xD6, 0xFF, 0xF7,
+ 0x2E, 0xE7, 0x80, 0x88, 0xCF, 0x0E, 0x7F, 0x12,
+ 0x71, 0x62, 0xC7, 0xF1, 0xC4, 0x2B, 0x64, 0x5D,
+ 0x1C, 0x9A, 0xB4, 0xCB, 0xB8, 0x24, 0xB3, 0x0B,
+ 0x33, 0xF2, 0x8A, 0x8F, 0x76, 0xC8, 0x81, 0xDA,
+ 0x1A, 0x10, 0xB5, 0xA9, 0xCD, 0xDC, 0x1A, 0x02,
+ 0xC1, 0xAE, 0x4F, 0x02, 0x1B, 0x13, 0x96, 0x5A,
+ 0x2E, 0x03, 0xA2, 0x68, 0xB2, 0x29, 0xAC, 0x28,
+ 0xB8, 0xDC, 0xD5, 0x27, 0x55, 0xEC, 0x43, 0xDC,
+ 0xB7, 0x49, 0x1D, 0xE1, 0x30, 0x25, 0x81, 0xA6,
+ 0x90, 0x1F, 0x75, 0xBA, 0x19, 0x1E, 0xF7, 0xC5,
+ 0x77, 0x35, 0xEE, 0x68, 0x71, 0x22, 0xA0, 0xB4,
+ 0xCC, 0x99, 0x86, 0x1B, 0x1B, 0xC8, 0x27, 0xFC,
+ 0x6D, 0x8D, 0xE7, 0x8B, 0xC3, 0x40, 0x3D, 0xA8,
+ 0xCB, 0x9B, 0xC4, 0x12, 0x07, 0xDD, 0xA1, 0x92,
+ 0xE5, 0x80, 0x7A, 0xF4, 0xDB, 0x4C, 0xE6, 0xEE,
+ 0xF9, 0xD5, 0x1C, 0x20, 0x18, 0xD3, 0x8F, 0xDF,
+ 0x1C, 0xD3, 0x51, 0x4E, 0x0E, 0xED, 0x06, 0x61,
+ 0xF7, 0xBA, 0x81, 0x3A, 0x2F, 0xEA, 0xED, 0x70,
+ 0xA9, 0xD9, 0x54, 0x4D, 0xFC, 0x1D, 0x19, 0xEA,
+ 0xA6, 0x39, 0x8C, 0x6C, 0x78, 0xA8, 0x05, 0xEB,
+ 0xF2, 0xB5, 0xDE, 0x06, 0x9D, 0x8A, 0x78, 0x2A,
+ 0xF5, 0x50, 0xA4, 0xBD, 0x9B, 0xDA, 0xCA, 0x66,
+ 0xC0, 0x23, 0xAB, 0xE8, 0x95, 0x7E, 0xC9, 0xD2,
+ 0x6F, 0x09, 0xF2, 0x9A, 0x17, 0x89, 0xDA, 0x47,
+ 0x65, 0x8C, 0x20, 0xFA, 0x4E, 0x86, 0x18, 0xEB,
+ 0x7C, 0x08, 0xEC, 0x8A, 0x05, 0x54, 0x96, 0xD2,
+ 0x7A, 0x8A, 0x81, 0x58, 0x75, 0x8C, 0x7B, 0x02,
+ 0xEE, 0x1F, 0x51, 0x88, 0xD0, 0xD1, 0x90, 0x99,
+ 0x0C, 0xAE, 0x51, 0x2E, 0x54, 0x3E, 0xB1, 0x7D,
+ 0xBC, 0xE8, 0x54, 0x93, 0x6D, 0x10, 0x3C, 0xC6,
+ 0x71, 0xF6, 0xF5, 0x0B, 0x07, 0x0A, 0x6E, 0x59,
+ 0x20, 0x45, 0x21, 0x7D, 0x37, 0x64, 0x92, 0x09,
+ 0xA7, 0xE2, 0x34, 0x6F, 0xFC, 0xCC, 0x66, 0x0E,
+ 0x88, 0x1B, 0x19, 0x86, 0x11, 0xD7, 0x81, 0x25,
+ 0xF1, 0x8A, 0x03, 0xB7, 0x7A, 0xF0, 0x98, 0x4A,
+ 0x5C, 0xA1, 0x6D, 0x85, 0xA4, 0x8C, 0x4B, 0x65,
+ 0x9F, 0x72, 0x64, 0x14, 0xBA, 0x74, 0xEE, 0xA3,
+ 0x88, 0xFE, 0x1B, 0xCF, 0x11, 0x4F, 0xD1, 0xAC,
+ 0xFA, 0x14, 0xC3, 0xA7, 0xDD, 0x06, 0x85, 0x4E,
+ 0x64, 0x06, 0x92, 0x9C, 0xDF, 0x06, 0x09, 0xF1,
+ 0x4D, 0xE8, 0xF8, 0x2F, 0x69, 0xB6, 0x8A, 0xAF,
+ 0x25, 0x21, 0xB5, 0x48, 0x59, 0xF8, 0x9D, 0x60,
+ 0xAE, 0x42, 0x11, 0x7A, 0x68, 0x4D, 0x7E, 0x76,
+ 0xB0, 0xD2, 0xE3, 0xD9, 0x24, 0x16, 0x20, 0x0A,
+ 0xEB, 0xE0, 0x68, 0xCB, 0xBC, 0xAB, 0x67, 0xE4,
+ 0xF3, 0x25, 0x1F, 0xD3, 0x85, 0xA7, 0x1D, 0x7E,
+ 0x3C, 0x63, 0xCB, 0xC2, 0x50, 0x90, 0x0F, 0x4B,
+ 0x6E, 0x68, 0x06, 0x84, 0x65, 0xF7, 0xD0, 0xD4,
+ 0x12, 0xED, 0xFA, 0xC9, 0x40, 0xE2, 0xC0, 0xC9,
+ 0x46, 0x22, 0x47, 0x5E, 0x6D, 0xC1, 0x63, 0xDB,
+ 0x51, 0x98, 0xDA, 0x1A, 0xC4, 0xB9, 0xED, 0xE9,
+ 0x09, 0xB9, 0xCF, 0x91, 0x04, 0x1C, 0x63, 0xD8,
+ 0xC5, 0xA5, 0xAE, 0x53, 0x7B, 0xA1, 0x29, 0x83,
+ 0x37, 0xFB, 0xBF, 0x96, 0xBB, 0x24, 0x3D, 0x77,
+ 0x8C, 0x0F, 0xB3, 0x4B, 0x66, 0x9C, 0x54, 0xBB,
+ 0xF6, 0xDD, 0xD1, 0xB4, 0xD2, 0xF6, 0xAA, 0xED,
+ 0x18, 0x56, 0x63, 0x3E, 0x0B, 0xCA, 0xAB, 0x70,
+ 0xBB, 0x63, 0xEA, 0xB1, 0x00, 0x65, 0x90, 0x18,
+ 0xB8, 0x63, 0xA2, 0xF2, 0xB6, 0x1E, 0x61, 0x7B,
+ 0xD5, 0x01, 0xD9, 0x4D, 0xC9, 0x9D, 0x99, 0xC1,
+ 0x57, 0x9D, 0x6F, 0xAE, 0x64, 0xE4, 0x0C, 0x7E,
+ 0xFA, 0x15, 0x5E, 0xB6, 0x43, 0xB8, 0x8B, 0x89,
+ 0x87, 0xCD, 0x4F, 0xAD, 0x30, 0x1E, 0xA5, 0x03,
+ 0x7A, 0xC2, 0x10, 0x42, 0x14, 0x88, 0xD6, 0x7A,
+ 0x6D, 0x56, 0x52, 0x2E, 0x8D, 0x1B, 0x5D, 0x36,
+ 0x27, 0xA0, 0x21, 0x4B, 0x64, 0xF0, 0xC5, 0x41,
+ 0xAD, 0x05, 0x4A, 0x24, 0xE4, 0x70, 0x88, 0x63,
+ 0x12, 0xD0, 0xBC, 0x05, 0x38, 0xD9, 0x41, 0x68,
+ 0x9F, 0x16, 0x9A, 0x54, 0x09, 0x21, 0x64, 0x36,
+ 0x63, 0x97, 0x3A, 0xB5, 0xE0, 0x25, 0x43, 0x8A,
+ 0x6A, 0x59, 0x97, 0xC1, 0x31, 0xA5, 0x66, 0xD2,
+ 0xF0, 0x1C, 0xDF, 0x97, 0x51, 0xD0, 0x61, 0xBA,
+ 0x55, 0x5F, 0xD7, 0x0D, 0xD4, 0x75, 0x8E, 0x79,
+ 0x04, 0x75, 0x00, 0xB9, 0xC0, 0x7A, 0x66, 0x05,
+ 0x9F, 0x2B, 0x44, 0x42, 0x75, 0x0F, 0xD5, 0x15,
+ 0xD6, 0x16, 0x8F, 0x6C, 0x6E, 0xD4, 0x37, 0xCF,
+ 0xB4, 0xDA, 0x93, 0x00, 0x11, 0xFB, 0xBE, 0xEE,
+ 0x3B, 0x6D, 0x1D, 0xBA, 0x33, 0xD1, 0x52, 0x8B,
+ 0x16, 0x39, 0x42, 0x27, 0xE6, 0x56, 0x4C, 0x41,
+ 0x91, 0xB0, 0x98, 0xAE, 0x9B, 0x2D, 0x9B, 0x23,
+ 0x80, 0x4C, 0xEA, 0x98, 0x57, 0x95, 0x28, 0x94,
+ 0x43, 0xD3, 0x88, 0x12, 0xDF, 0x89, 0x5A, 0x7B,
+ 0xC5, 0xCB, 0x36, 0x54, 0x65, 0x74, 0xB8, 0x4E,
+ 0xE2, 0x4D, 0x01, 0xD5, 0x9C, 0x82, 0xB9, 0x1A,
+ 0x09, 0xD2, 0xCE, 0x04, 0x36, 0xD8, 0x41, 0xAC,
+ 0x4C, 0xAD, 0xC6, 0x52, 0x91, 0x1A, 0x06, 0x6D,
+ 0xFC, 0xAB, 0x29, 0x93, 0x87, 0x88, 0xB9, 0x8C,
+ 0xFA, 0x57, 0x2B, 0x05, 0x03, 0xD0, 0x18, 0xED,
+ 0x7A, 0x7B, 0x81, 0x6A, 0x97, 0x65, 0x5B, 0x90,
+ 0xDE, 0xA9, 0xFC, 0x8F, 0xFC, 0xBB, 0x98, 0xD8,
+ 0xFA, 0x32, 0x3F, 0x3F, 0x7F, 0x74, 0x65, 0x38,
+ 0xC4, 0x28, 0xEC, 0x27, 0x1F, 0x28, 0x01, 0xB1,
+ 0xAF, 0x2B, 0x8A, 0x05, 0x38, 0x7B, 0x77, 0xC9,
+ 0x61, 0x77, 0x34, 0x2C, 0x22, 0xE5, 0xEB, 0xDC,
+ 0x9D, 0x18, 0x6E, 0x23, 0x25, 0x52, 0x69, 0xB7,
+ 0x05, 0xDB, 0x66, 0x5D, 0xEA, 0x76, 0x83, 0x82,
+ 0x97, 0x39, 0xAF, 0xC0, 0x50, 0x81, 0x18, 0x0D,
+ 0x22, 0xFA, 0xB7, 0x44, 0x5C, 0x3F, 0x69, 0xF3,
+ 0xAC, 0xC5, 0x63, 0x9F, 0xD8, 0x72, 0x7E, 0x9A,
+ 0xC2, 0xEB, 0x79, 0xD0, 0x74, 0x65, 0xE8, 0xCA,
+ 0xFD, 0xA8, 0x7D, 0x23, 0x07, 0x99, 0x3E, 0xAF,
+ 0xDB, 0x67, 0x10, 0xC0, 0xE5, 0x61, 0x77, 0xC6,
+ 0x8D, 0xC4, 0x0E, 0xAA, 0x55, 0xE3, 0xC0, 0xC7,
+ 0xA5, 0x36, 0x28, 0x61, 0xDB, 0x16, 0x96, 0x5E,
+ 0x01, 0x47, 0x82, 0xE3, 0xEB, 0x20, 0x3F, 0x10,
+ 0xFA, 0x5A, 0xBC, 0xD3, 0xF9, 0xCE, 0x04, 0x87,
+ 0x51, 0x07, 0xF9, 0xD0, 0xE7, 0x6D, 0xCB, 0xCC,
+ 0xC4, 0x15, 0x00, 0xE2, 0xDC, 0x8E, 0x7B, 0x5C,
+ 0x9A, 0xF2, 0x78, 0x70, 0x4D, 0xA1, 0xAA, 0xB5,
+ 0x13, 0xCC, 0x71, 0x66, 0x5A, 0x79, 0x13, 0x3B,
+ 0x12, 0xCD, 0x40, 0x30, 0x5A, 0x49, 0xD4, 0x20,
+ 0xED, 0xCF, 0x4A, 0x75, 0xE6, 0xD5, 0xDD, 0x0F,
+ 0xD4, 0xBE, 0x98, 0x9F, 0xD7, 0x1F, 0xC0, 0x02,
+ 0x31, 0xFA, 0x67, 0x37, 0x25, 0x86, 0x56, 0x85,
+ 0x2B, 0xA2, 0x57, 0xCD, 0x8E, 0x74, 0xE7, 0x69,
+ 0xEE, 0x33, 0x5A, 0x3F, 0xCD, 0x1E, 0xE3, 0xB9,
+ 0xAA, 0x52, 0xF5, 0x22, 0x4E, 0xE3, 0xFF, 0xC8,
+ 0xE3, 0x13, 0xA3, 0x9A, 0x63, 0x23, 0xC3, 0xD7,
+ 0xE5, 0x88, 0x3E, 0x0A, 0x4B, 0xA5, 0x01, 0xE6,
+ 0x13, 0xCF, 0xED, 0xEE, 0x2A, 0x58, 0x09, 0x3F,
+ 0x2F, 0x28, 0xE7, 0xC4, 0x6B, 0xEC, 0x49, 0x51,
+ 0x79, 0x8F, 0xD5, 0x19, 0x5D, 0xA5, 0x10, 0xCE,
+ 0x8E, 0xF6, 0x26, 0x78, 0x7A, 0xA8, 0x11, 0x52,
+ 0x5F, 0x97, 0x14, 0xC9, 0x29, 0x87, 0xB8, 0xA0,
+ 0x2D, 0xE6, 0xA7, 0x2A, 0xD4, 0xFF, 0xEB, 0xBA,
+ 0xFD, 0x58, 0x39, 0x33, 0xB1, 0xCE, 0x0E, 0x78,
+ 0x67, 0x1E, 0xA1, 0x92, 0x77, 0x63, 0xF8, 0xC0,
+ 0x02, 0x49, 0x73, 0xC0, 0xA1, 0x26, 0x83, 0x04,
+ 0x9A, 0x5D, 0x85, 0x68, 0x2A, 0x2F, 0xCB, 0x88,
+ 0x8D, 0x14, 0xB1, 0x33, 0xFA, 0xFB, 0xE9, 0x05,
+ 0xBE, 0x24, 0x1A, 0x6B, 0x29, 0x2B, 0x3F, 0x52,
+ 0x8F, 0xFB, 0xE6, 0x02, 0x77, 0x50, 0x71, 0xDB,
+ 0xE9, 0x92, 0x3F, 0xE1, 0x20, 0x62, 0x80, 0xAE,
+ 0xA4, 0x98, 0xC6, 0xCD, 0xE0, 0xB1, 0xC3, 0x33,
+ 0xB1, 0xC5, 0x91, 0x3C, 0x19, 0x34, 0xA8, 0xD9,
+ 0xB3, 0x25, 0x69, 0xE3, 0x9C, 0x5F, 0x78, 0xD0,
+ 0x83, 0x1F, 0xAB, 0x85, 0x13, 0x56, 0x69, 0xB5,
+ 0x06, 0x47, 0x62, 0x37, 0x27, 0x15, 0x14, 0x05,
+ 0x4A, 0xF4, 0x6A, 0x68, 0x2A, 0x6A, 0xC3, 0x5A,
+ 0xDF, 0xB5, 0xAE, 0x2F, 0x8D, 0x8F, 0x21, 0xDB,
+ 0x33, 0x00, 0x9B, 0xD4, 0xC4, 0x08, 0x3B, 0x81,
+ 0x63, 0x4C, 0xB0, 0x39, 0x4C, 0x0A, 0xD5, 0x71,
+ 0x3E, 0x5A, 0x50, 0x58, 0x9C, 0x07, 0x89, 0x79,
+ 0x79, 0x2F, 0x0B, 0xD9, 0x50, 0xBC, 0xCF, 0x46,
+ 0x7A, 0x68, 0x5C, 0xBF, 0x1E, 0x49, 0x77, 0x92,
+ 0x85, 0x11, 0x39, 0xA6, 0x2F, 0xDA, 0x7B, 0xFA,
+ 0x72, 0x87, 0x06, 0xCD, 0x84, 0x41, 0x20, 0x1B,
+ 0x66, 0x3F, 0x42, 0x0C, 0x9E, 0x19, 0xD3, 0x18,
+ 0x57, 0xA0, 0xEE, 0x16, 0x3A, 0xC7, 0xF9, 0xD3,
+ 0x8B, 0xC9, 0x24, 0x70, 0x70, 0x51, 0x7C, 0x06,
+ 0x68, 0xD3, 0x29, 0xC9, 0x85, 0x9A, 0x1C, 0xE6,
+ 0x8C, 0x17, 0xF4, 0x88, 0xDF, 0xEA, 0xFF, 0x44,
+ 0x8D, 0x54, 0xBE, 0x22, 0x07, 0xA5, 0x7C, 0x0C,
+ 0xF4, 0x8D, 0xB1, 0x0C, 0x07, 0xED, 0xBD, 0x28,
+ 0x19, 0xDA, 0x07, 0x71, 0xA8, 0xA1, 0xE0, 0xDD,
+ 0xEE, 0x08, 0x18, 0xA5, 0xBD, 0xDD, 0x32, 0x0B,
+ 0x70, 0x1C, 0xD9, 0xEE, 0x19, 0xC2, 0xAE, 0x5C,
+ 0xE3, 0x02, 0x74, 0x70, 0x96, 0x61, 0xB1, 0x73,
+ 0x3B, 0xD6, 0x74, 0xC0, 0x82, 0xA9, 0x1F, 0xE0,
+ 0xF1, 0x22, 0x50, 0xF3, 0x9F, 0xE5, 0x13, 0x92,
+ 0xFC, 0x0A, 0x1A, 0x3C, 0xB4, 0x46, 0xFB, 0x81,
+ 0x00, 0x84, 0xA4, 0x5E, 0x6B, 0x8C, 0x25, 0x6E,
+ 0xD7, 0xB7, 0x3B, 0x01, 0x65, 0xFB, 0x0B, 0x46,
+ 0x67, 0x27, 0x2D, 0x51, 0xAD, 0xB5, 0xE0, 0x85,
+ 0xC2, 0x95, 0xA3, 0xE3, 0x68, 0x4D, 0x9E, 0x8C,
+ 0x11, 0x53, 0xF0, 0xB2, 0x85, 0xFA, 0x52, 0x4E,
+ 0xEC, 0xF9, 0xB7, 0x3C, 0x89, 0x2C, 0x4D, 0x32,
+ 0x9A, 0xCB, 0x17, 0xF3, 0x16, 0xBF, 0x44, 0x40,
+ 0xE9, 0x5E, 0x51, 0x8C, 0x1E, 0x52, 0x0A, 0xC2,
+ 0xCD, 0xA5, 0xAA, 0x03, 0x27, 0xB0, 0x8F, 0x64,
+ 0xDB, 0xD7, 0x03, 0x01, 0x8A, 0x24, 0x28, 0x7E,
+ 0x53, 0x6F, 0x24, 0xFD, 0xAA, 0xE3, 0x78, 0xB6,
+ 0xA5, 0x5D, 0x5A, 0x67, 0x20, 0xE2, 0xBE, 0x3A,
+ 0x2B, 0xE7, 0x86, 0x11, 0xDD, 0x96, 0xCB, 0x09,
+ 0x65, 0xA0, 0x36, 0xF9, 0xB0, 0x20, 0x21, 0x8E,
+ 0xDB, 0xC0, 0x73, 0xC7, 0x79, 0xD8, 0xDA, 0xC2,
+ 0x66, 0x13, 0x64, 0x34, 0x0C, 0xE1, 0x22, 0x24,
+ 0x61, 0x67, 0x08, 0x39, 0x97, 0x3F, 0x33, 0x96,
+ 0xF2, 0x44, 0x18, 0x75, 0xBB, 0xF5, 0x6A, 0x5C,
+ 0x2C, 0xAE, 0x2A, 0x79, 0x3D, 0x47, 0x19, 0x53,
+ 0x50, 0x6C, 0x9F, 0xB3, 0x82, 0x55, 0x09, 0x78,
+ 0x7B, 0xAD, 0xBC, 0x05, 0x6F, 0xC8, 0x3D, 0xB6,
+ 0x7B, 0x30, 0xE6, 0xBB, 0x8B, 0xD0, 0x2F, 0xA6,
+ 0x15, 0xCC, 0x77, 0x8C, 0x21, 0xBA, 0x03, 0xED,
+ 0x56, 0x85, 0x82, 0x4F, 0x97, 0x8C, 0x59, 0x4F,
+ 0x53, 0x5A, 0xD2, 0x70, 0xD9, 0x07, 0xB3, 0xBD,
+ 0x1D, 0x3E, 0x97, 0xD4, 0x7D, 0x93, 0x35, 0xA4,
+ 0x82, 0x6E, 0xEA, 0x4B, 0xC8, 0x6C, 0xF5, 0xE6,
+ 0xEB, 0xAF, 0x11, 0xB0, 0xB4, 0x71, 0x8F, 0x7B,
+ 0xC4, 0x8C, 0xE2, 0x66, 0x51, 0x31, 0x99, 0x01,
+ 0x5B, 0xE7, 0x48, 0xF8, 0x4C, 0xE3, 0x9A, 0x77,
+ 0xF1, 0xC6, 0x09, 0xDE, 0x76, 0xD4, 0xE3, 0x5C,
+ 0xDF, 0xA3, 0xEC, 0x3C, 0x86, 0x7C, 0xA5, 0x3F,
+ 0x8D, 0x2A, 0xF3, 0x0B, 0x54, 0xB7, 0x54, 0xA2,
+ 0xC1, 0x69, 0xC0, 0x6F, 0x1C, 0x1C, 0x76, 0xD8,
+ 0x9F, 0x7A, 0x32, 0xB0, 0xA1, 0xA6, 0x9B, 0xB7,
+ 0x21, 0x56, 0x28, 0x2D, 0xB6, 0x97, 0x03, 0x5E,
+ 0x65, 0xE3, 0x74, 0x9A, 0x96, 0x7A, 0xF9, 0xF5,
+ 0xDD, 0x85, 0xCA, 0x4C, 0xB4, 0x03, 0x6A, 0xCD,
+ 0xB6, 0x01, 0xDC, 0x8B, 0xD8, 0x73, 0x8F, 0x4D,
+ 0x7F, 0xD6, 0x71, 0xEC, 0xD7, 0xC6, 0x0B, 0x5F,
+ 0x09, 0x21, 0xB2, 0x78, 0xA8, 0xAF, 0xAD, 0x2C,
+ 0xD4, 0x93, 0x9F, 0x71, 0xF7, 0x05, 0x89, 0x42,
+ 0xC9, 0x15, 0x6F, 0x2D, 0xE0, 0xBA, 0xC3, 0xD6,
+ 0xBF, 0xAC, 0xF8, 0x24, 0x58, 0x79, 0xA9, 0xC4,
+ 0xB4, 0x49, 0x3E, 0x0B, 0x9E, 0x5E, 0xE4, 0xA6,
+ 0x8B, 0xE8, 0xDE, 0xFB, 0x4A, 0xF1, 0x69, 0x9D,
+ 0x4F, 0x77, 0x83, 0x78, 0x55, 0x19, 0x42, 0x45,
+ 0xBF, 0xBD, 0xBD, 0x12, 0x0F, 0xEF, 0x8D, 0x04,
+ 0xD8, 0x5C, 0xF2, 0xC9, 0xF1, 0xA6, 0xE0, 0x3E,
+ 0x22, 0xA8, 0xA2, 0x5E, 0x66, 0xE9, 0xAB, 0xB4,
+ 0x71, 0xBE, 0x4B, 0x3F, 0xBE, 0xC4, 0xBA, 0x4A
+ },
+ .len = 2048
+ },
+ .ciphertext = {
+ .data = {
+ 0x5E, 0x86, 0x02, 0x64, 0x32, 0xBF, 0x70, 0xC2,
+ 0x19, 0x99, 0x7F, 0x47, 0x0D, 0xA4, 0x91, 0xA8,
+ 0x7A, 0xC0, 0xA5, 0x7E, 0xA8, 0x6C, 0x88, 0x00,
+ 0xEA, 0xB5, 0x96, 0x6B, 0x25, 0xBD, 0xE7, 0x42,
+ 0xDB, 0x35, 0xE7, 0x92, 0x2B, 0x00, 0x82, 0x35,
+ 0xD4, 0x2C, 0xCF, 0x47, 0xC8, 0xB2, 0xB3, 0x57,
+ 0xF7, 0x24, 0x83, 0x7F, 0xC5, 0x2E, 0xF1, 0xC9,
+ 0x57, 0x1A, 0xEF, 0xC2, 0x3A, 0x8C, 0x1E, 0x92,
+ 0x88, 0x05, 0xAF, 0x55, 0xE6, 0x0C, 0xA7, 0x6B,
+ 0x59, 0x62, 0x32, 0x21, 0xF1, 0xFF, 0xB5, 0x5B,
+ 0x22, 0x26, 0x6F, 0x0A, 0x36, 0xDC, 0x0D, 0x16,
+ 0x3B, 0x4E, 0x7C, 0xA3, 0x75, 0x30, 0x3F, 0xB0,
+ 0x99, 0x38, 0x42, 0x8E, 0x89, 0xA3, 0x7C, 0x99,
+ 0x2F, 0x0A, 0xA1, 0xC7, 0xFD, 0x2D, 0x21, 0x8F,
+ 0xBD, 0xD4, 0x11, 0xEA, 0x55, 0xF5, 0x6A, 0x50,
+ 0x90, 0x3B, 0x60, 0x57, 0xE1, 0x86, 0x1E, 0x50,
+ 0x28, 0x67, 0x3F, 0xD2, 0xF3, 0xBD, 0xFA, 0xEE,
+ 0xD6, 0x5A, 0x38, 0x30, 0xA3, 0xDD, 0x78, 0xC4,
+ 0x37, 0x59, 0x52, 0xC0, 0x92, 0x54, 0xC7, 0x53,
+ 0xF0, 0xE6, 0xA9, 0x63, 0x1F, 0x9B, 0x97, 0xFB,
+ 0x40, 0x23, 0xFE, 0x52, 0x6A, 0xF0, 0x3A, 0x94,
+ 0xEB, 0x6A, 0x9E, 0x8F, 0xC5, 0x05, 0x9C, 0x04,
+ 0x1B, 0x00, 0x34, 0x96, 0x12, 0xDA, 0x60, 0xC6,
+ 0xAA, 0x1A, 0x3E, 0xEB, 0x70, 0x17, 0x10, 0xBC,
+ 0xF5, 0xC2, 0xE2, 0x71, 0xF3, 0xB8, 0x1D, 0xCE,
+ 0x47, 0x94, 0x21, 0x71, 0x34, 0x8C, 0xCC, 0xDD,
+ 0x27, 0xCE, 0x6F, 0x68, 0xFF, 0x91, 0x4E, 0xC4,
+ 0xA0, 0xCA, 0xB0, 0x4F, 0x17, 0x53, 0x73, 0x92,
+ 0x6C, 0xA8, 0x16, 0x06, 0xE3, 0xD9, 0x92, 0x99,
+ 0xBE, 0xB0, 0x7D, 0x56, 0xF2, 0x72, 0x30, 0xDA,
+ 0xC4, 0x4E, 0xF4, 0xA6, 0x8F, 0xD2, 0xC7, 0x8A,
+ 0xA2, 0xFC, 0xF5, 0x63, 0x17, 0x48, 0x56, 0x4D,
+ 0xBE, 0x94, 0xFE, 0xF5, 0xB1, 0xA9, 0x96, 0xAB,
+ 0x3F, 0x2D, 0xD4, 0x15, 0xEE, 0x4F, 0xFA, 0x2C,
+ 0xBE, 0x91, 0xB7, 0xBC, 0x18, 0xC8, 0xDB, 0x02,
+ 0x20, 0x29, 0xF1, 0xC1, 0x88, 0x8C, 0x8D, 0xD1,
+ 0xB3, 0x4E, 0x93, 0x96, 0xDD, 0x22, 0xAB, 0x55,
+ 0xB5, 0x9F, 0x8B, 0x20, 0xAE, 0xC6, 0x0E, 0x26,
+ 0xC6, 0xFE, 0x2D, 0x5F, 0x95, 0x89, 0x06, 0x15,
+ 0x3D, 0x88, 0x16, 0xEC, 0x9B, 0x4A, 0x1B, 0x5D,
+ 0x2E, 0xB2, 0x13, 0x56, 0x9F, 0x33, 0xB3, 0x45,
+ 0xBF, 0x5F, 0x25, 0x7E, 0x75, 0x22, 0xD2, 0xE6,
+ 0x9F, 0xAC, 0x2D, 0xFD, 0x99, 0xC2, 0x9B, 0xFC,
+ 0xD7, 0x7A, 0x9B, 0x05, 0x30, 0x0F, 0xB7, 0x4A,
+ 0xFE, 0x24, 0xDD, 0x39, 0x9B, 0xBB, 0x2F, 0xDD,
+ 0xF9, 0xFB, 0xCA, 0x6C, 0x87, 0xBA, 0x73, 0xD4,
+ 0x85, 0x7B, 0xB2, 0x6F, 0x5C, 0xD8, 0xFB, 0xE9,
+ 0x41, 0x24, 0x3A, 0x3B, 0x4F, 0x91, 0x77, 0xA2,
+ 0x35, 0x78, 0xE5, 0x4C, 0xFE, 0x8B, 0x04, 0x03,
+ 0xD3, 0x84, 0xA9, 0x1C, 0xA7, 0x7C, 0x45, 0x13,
+ 0x7D, 0xC5, 0x0A, 0x2F, 0x02, 0xF8, 0x56, 0xD5,
+ 0x5F, 0x35, 0xED, 0x06, 0xBF, 0x67, 0xBA, 0x51,
+ 0x02, 0x95, 0x36, 0xF2, 0x9A, 0xBA, 0x9D, 0xF6,
+ 0xD6, 0x77, 0x50, 0xC9, 0xFC, 0x1E, 0x32, 0xB5,
+ 0x2F, 0xEA, 0x3C, 0x76, 0xB4, 0xE1, 0xCC, 0x42,
+ 0xEB, 0x71, 0x79, 0xD3, 0x7D, 0xB7, 0xC0, 0x88,
+ 0x25, 0x81, 0xE8, 0xC0, 0xB8, 0x38, 0x7E, 0x7B,
+ 0xFD, 0x18, 0xAB, 0x08, 0xB2, 0x71, 0xA5, 0xAD,
+ 0xA7, 0xBE, 0x48, 0x5F, 0x86, 0xE2, 0x41, 0x3D,
+ 0x7C, 0x37, 0x7A, 0xAB, 0xDB, 0xE0, 0x3B, 0x3D,
+ 0xB6, 0xE8, 0x23, 0x7C, 0xF1, 0x8F, 0xBA, 0xB7,
+ 0xE9, 0x78, 0x0B, 0xCA, 0x67, 0xA8, 0x10, 0x36,
+ 0xEB, 0x72, 0xED, 0xDD, 0xF0, 0x5C, 0x74, 0x8E,
+ 0xE5, 0x2A, 0xAE, 0x6E, 0xC4, 0xF1, 0xFC, 0xD8,
+ 0xEE, 0x56, 0x07, 0x88, 0x02, 0xDC, 0x9D, 0xB7,
+ 0xF9, 0x13, 0xE1, 0xE1, 0x9D, 0x89, 0x26, 0x0B,
+ 0x23, 0x74, 0x4A, 0x43, 0xAA, 0xA0, 0xA8, 0x97,
+ 0x85, 0x15, 0x58, 0xAB, 0x2B, 0xB5, 0xDA, 0x1A,
+ 0xBA, 0x29, 0x62, 0xCF, 0xDD, 0xA3, 0xBA, 0x9D,
+ 0x7D, 0x83, 0xA5, 0x18, 0xD4, 0x03, 0x0F, 0x61,
+ 0x9F, 0xB1, 0x7E, 0xEC, 0xD2, 0x6E, 0xAF, 0xCF,
+ 0x1E, 0xC1, 0x88, 0x97, 0x99, 0xD6, 0xBF, 0x47,
+ 0xB9, 0x0A, 0x69, 0x11, 0x3A, 0x55, 0x8B, 0x1D,
+ 0x2D, 0xFF, 0x78, 0xC8, 0xDE, 0x82, 0x29, 0xD6,
+ 0x08, 0x3C, 0xC4, 0xCB, 0x2F, 0x01, 0xD0, 0xE8,
+ 0xB1, 0x75, 0x5E, 0x23, 0xE0, 0x37, 0x7C, 0x1C,
+ 0xB6, 0xD9, 0x47, 0xDE, 0x23, 0x87, 0xD3, 0x68,
+ 0x47, 0x46, 0x78, 0xF3, 0xBF, 0x54, 0xA3, 0xB9,
+ 0x54, 0xD5, 0xC5, 0x0A, 0x7C, 0x92, 0x2A, 0xC2,
+ 0x14, 0x76, 0xA6, 0x5C, 0x6D, 0x0B, 0x94, 0x56,
+ 0x00, 0x6B, 0x5C, 0x27, 0xDE, 0x77, 0x9B, 0xF1,
+ 0xB1, 0x8C, 0xA7, 0x49, 0x77, 0xFC, 0x4E, 0x29,
+ 0x23, 0x8F, 0x2F, 0xF7, 0x83, 0x8D, 0x36, 0xD9,
+ 0xAB, 0x0E, 0x78, 0xF5, 0x90, 0x05, 0xB9, 0x79,
+ 0x70, 0x88, 0x59, 0x6F, 0xE2, 0xC5, 0xD7, 0x80,
+ 0x95, 0x04, 0x29, 0xE0, 0xFA, 0x37, 0xE8, 0x8B,
+ 0xC5, 0x21, 0x51, 0x1A, 0x62, 0xCE, 0x93, 0xAF,
+ 0x1A, 0xFE, 0xC3, 0x6F, 0x86, 0x94, 0x5E, 0x13,
+ 0xA6, 0x9A, 0x26, 0xF0, 0xB5, 0x7C, 0x41, 0x9A,
+ 0x80, 0xB8, 0x84, 0x5A, 0x55, 0xA9, 0xB0, 0x6A,
+ 0xFA, 0xEB, 0x46, 0x32, 0x0B, 0xE2, 0x9C, 0x65,
+ 0x86, 0x11, 0x39, 0x7E, 0xAF, 0x93, 0x19, 0x09,
+ 0x70, 0x40, 0x80, 0x14, 0xBA, 0x1D, 0xB3, 0x62,
+ 0x5B, 0xF3, 0x9A, 0x21, 0x98, 0x7E, 0x63, 0xB6,
+ 0x1A, 0xBD, 0x65, 0x98, 0x35, 0x2A, 0xA9, 0x76,
+ 0x29, 0x59, 0x84, 0x25, 0x81, 0xB8, 0xDE, 0x25,
+ 0x32, 0x10, 0x50, 0xB7, 0xD3, 0xB3, 0x69, 0xC8,
+ 0xE1, 0x33, 0xCB, 0x9E, 0x9C, 0x7A, 0x7C, 0xD2,
+ 0x6C, 0x92, 0x97, 0xA9, 0xFA, 0xAF, 0x30, 0xBA,
+ 0x9A, 0xB3, 0x3D, 0x9A, 0xE5, 0x0A, 0x9B, 0x8D,
+ 0x89, 0xE2, 0x2B, 0xB8, 0xBC, 0xF0, 0x23, 0xFF,
+ 0x7B, 0x0D, 0x00, 0x36, 0xEE, 0x79, 0xCB, 0xA5,
+ 0x70, 0x4C, 0x66, 0x02, 0x79, 0x2E, 0x5B, 0x83,
+ 0xCE, 0x55, 0x8B, 0x89, 0xD6, 0xE3, 0x71, 0x63,
+ 0xBC, 0xB1, 0x5F, 0x67, 0xB4, 0x7E, 0x05, 0x0D,
+ 0xAC, 0x6D, 0x4E, 0x2C, 0xA5, 0xF4, 0x47, 0x89,
+ 0xAC, 0x5E, 0xBE, 0x2F, 0xFC, 0x9B, 0x2F, 0x0B,
+ 0xBE, 0x63, 0x54, 0x97, 0xBB, 0x23, 0x27, 0xCD,
+ 0xB9, 0xB2, 0x28, 0x0D, 0xA4, 0x78, 0x2C, 0xAB,
+ 0xD1, 0xC9, 0x94, 0x40, 0x54, 0xF2, 0x35, 0x61,
+ 0x49, 0x01, 0x87, 0x55, 0xA5, 0xB5, 0x1E, 0x84,
+ 0x92, 0x9E, 0xC1, 0xA4, 0x0B, 0x66, 0x2B, 0xF8,
+ 0xAF, 0xC3, 0x1E, 0xAF, 0x66, 0x3F, 0x6F, 0x5F,
+ 0x70, 0xEC, 0x25, 0x29, 0xE4, 0x65, 0xB2, 0x04,
+ 0x47, 0xF6, 0x3C, 0xB5, 0x5F, 0x66, 0x9F, 0xA4,
+ 0x1B, 0xFC, 0xA2, 0xD5, 0x3E, 0x84, 0xBA, 0x88,
+ 0x0D, 0xF1, 0x6A, 0xF2, 0xF6, 0x1D, 0xF1, 0xA3,
+ 0x45, 0xB2, 0x51, 0xD8, 0xA2, 0x8F, 0x55, 0xA6,
+ 0x89, 0xC4, 0x15, 0xD5, 0x73, 0xA8, 0xB1, 0x31,
+ 0x66, 0x9E, 0xC1, 0x43, 0xE1, 0x5D, 0x4E, 0x04,
+ 0x84, 0x8F, 0xF2, 0xBC, 0xE1, 0x4E, 0x4D, 0x60,
+ 0x81, 0xCA, 0x53, 0x34, 0x95, 0x17, 0x3B, 0xAE,
+ 0x8F, 0x95, 0xA7, 0xC6, 0x47, 0xC6, 0xAC, 0x32,
+ 0x12, 0x39, 0xCA, 0xEF, 0xE0, 0x07, 0xBF, 0x17,
+ 0x4F, 0xDC, 0x1B, 0x4E, 0x3C, 0x84, 0xF1, 0x9F,
+ 0x43, 0x70, 0x19, 0xE6, 0xF3, 0x8B, 0x8B, 0x5D,
+ 0xDB, 0xD2, 0x9D, 0xD4, 0xB2, 0x30, 0x45, 0x55,
+ 0xA2, 0x67, 0xA2, 0x76, 0x4A, 0x74, 0xAD, 0x88,
+ 0x71, 0xE6, 0x3E, 0x13, 0x06, 0x30, 0x17, 0xE1,
+ 0xEF, 0xAC, 0x71, 0xFB, 0x43, 0xCD, 0xF6, 0xFA,
+ 0x0E, 0x4C, 0x4E, 0x16, 0xF6, 0x6A, 0x09, 0x86,
+ 0x6B, 0xEA, 0x47, 0x6C, 0x70, 0xE7, 0xAD, 0xA2,
+ 0xE0, 0xFD, 0x7F, 0xF0, 0x5C, 0x21, 0x53, 0x0F,
+ 0x28, 0xA1, 0x43, 0xE1, 0x06, 0xCA, 0x0B, 0x31,
+ 0x88, 0x22, 0xA6, 0xE6, 0x34, 0x5B, 0xE6, 0xCF,
+ 0x25, 0x81, 0x63, 0xFF, 0x78, 0x66, 0x85, 0x19,
+ 0xE2, 0x0A, 0x7E, 0x81, 0x8A, 0x17, 0x1A, 0x18,
+ 0x8A, 0x5F, 0x5D, 0x9E, 0x82, 0x13, 0x10, 0xB9,
+ 0xD3, 0xE6, 0x93, 0x1C, 0xE4, 0x2C, 0xCB, 0x49,
+ 0x1E, 0xB6, 0x36, 0x13, 0xBF, 0x28, 0xEE, 0xCC,
+ 0x49, 0xF5, 0x79, 0xFC, 0x20, 0x65, 0xBD, 0xE8,
+ 0xF0, 0x1B, 0x4E, 0xC0, 0x0D, 0x3E, 0x89, 0x91,
+ 0xCC, 0x64, 0x10, 0xC0, 0x2A, 0x2B, 0xA3, 0xFA,
+ 0x60, 0x3D, 0xC3, 0x52, 0x2F, 0x93, 0xDE, 0xB7,
+ 0x6E, 0x8A, 0xDF, 0x6C, 0x08, 0xCC, 0x8B, 0x3B,
+ 0xC8, 0x50, 0xEF, 0x58, 0x64, 0x9A, 0x3D, 0x16,
+ 0x70, 0x94, 0x11, 0xD8, 0x94, 0x2B, 0x70, 0x91,
+ 0x10, 0x70, 0x88, 0xF0, 0x40, 0x75, 0x9A, 0x2B,
+ 0x39, 0xA1, 0x27, 0x3F, 0x2E, 0x91, 0xEA, 0xA1,
+ 0xCC, 0x12, 0xC1, 0x7F, 0x73, 0x8C, 0x5C, 0x6B,
+ 0xFC, 0xC5, 0x6A, 0x1C, 0x05, 0xF1, 0x3D, 0x30,
+ 0x82, 0x4A, 0x65, 0x35, 0xCE, 0x80, 0x10, 0xBB,
+ 0x41, 0x94, 0xFB, 0x84, 0x80, 0x7B, 0x91, 0xC4,
+ 0x4D, 0xA3, 0x5F, 0xB9, 0xFB, 0xF9, 0xC9, 0x1D,
+ 0x4F, 0x99, 0x1C, 0x1F, 0x47, 0x44, 0x89, 0x0E,
+ 0xED, 0x6D, 0xB5, 0x85, 0x41, 0x94, 0xEF, 0xF9,
+ 0x2E, 0xA0, 0xC8, 0xCA, 0xFB, 0x44, 0x02, 0xC6,
+ 0xBF, 0x96, 0x87, 0x80, 0x1D, 0xEF, 0x2A, 0x81,
+ 0xAB, 0xB2, 0x56, 0xDF, 0x54, 0x8B, 0xAB, 0xAF,
+ 0xFE, 0x18, 0x8C, 0xAA, 0xD4, 0x00, 0x17, 0xBE,
+ 0xCF, 0x06, 0xE5, 0xA6, 0xBF, 0x5A, 0x52, 0x3B,
+ 0x4E, 0xF5, 0x65, 0x60, 0x95, 0xDE, 0x8A, 0x25,
+ 0x88, 0xA5, 0x24, 0x96, 0x29, 0x13, 0x0D, 0x19,
+ 0x45, 0x95, 0x91, 0x08, 0xD2, 0x9C, 0x4C, 0x34,
+ 0x42, 0xF0, 0xA5, 0x72, 0xEB, 0xFB, 0x5E, 0xAA,
+ 0x68, 0x80, 0x82, 0xAC, 0x34, 0xAD, 0x89, 0xF6,
+ 0xAF, 0x54, 0x82, 0xCF, 0x98, 0x8C, 0x75, 0x63,
+ 0x8D, 0xBD, 0x1C, 0x2A, 0xD7, 0x00, 0xA7, 0x8E,
+ 0xB9, 0x33, 0xB6, 0x3B, 0x95, 0x9A, 0x59, 0x1D,
+ 0x3F, 0x23, 0x6B, 0x18, 0xF8, 0x4F, 0x1A, 0x8D,
+ 0xC0, 0x26, 0x9F, 0x87, 0x61, 0xB6, 0xC6, 0x60,
+ 0x38, 0x22, 0x73, 0x1C, 0x99, 0x23, 0xEF, 0xD9,
+ 0xFD, 0xCB, 0x54, 0x74, 0xBB, 0x77, 0x14, 0xA3,
+ 0xA9, 0xE6, 0x7C, 0x7E, 0x03, 0x3A, 0x13, 0x6E,
+ 0x1D, 0x6F, 0x64, 0xB3, 0xFA, 0xFB, 0x52, 0xDE,
+ 0xDF, 0x08, 0xFB, 0x6F, 0xC5, 0xFA, 0x51, 0x6A,
+ 0x69, 0x29, 0x9B, 0x96, 0xE8, 0x16, 0xC8, 0xD1,
+ 0xE4, 0x19, 0xBD, 0x14, 0x74, 0x27, 0xE7, 0x10,
+ 0xF0, 0xC3, 0xE2, 0xA7, 0x60, 0x48, 0xBF, 0xDD,
+ 0xC4, 0x0D, 0xD0, 0xF2, 0xEF, 0xA6, 0xC9, 0xA2,
+ 0x73, 0xD1, 0xCF, 0x41, 0xE1, 0x3B, 0xE5, 0x49,
+ 0x91, 0x5D, 0x09, 0xFD, 0x1D, 0x95, 0x29, 0xDB,
+ 0x52, 0x48, 0xEB, 0xF5, 0x1D, 0xF8, 0x06, 0x67,
+ 0x75, 0xF2, 0x57, 0xA4, 0x20, 0x60, 0xEA, 0xB0,
+ 0x85, 0x93, 0x7C, 0xDD, 0x52, 0x01, 0xD4, 0x57,
+ 0xA8, 0x31, 0x2D, 0xF9, 0x0A, 0xD2, 0x2A, 0xD1,
+ 0x34, 0x18, 0x35, 0x16, 0xB6, 0x8B, 0x0F, 0x0B,
+ 0xCF, 0x50, 0x80, 0xFE, 0x76, 0xCC, 0x4F, 0x30,
+ 0x98, 0x19, 0x16, 0x3D, 0x01, 0xEA, 0x8D, 0x8A,
+ 0x3D, 0xDC, 0xFB, 0x1F, 0x77, 0x8D, 0x72, 0x76,
+ 0x02, 0x3C, 0x5D, 0xEE, 0x55, 0x13, 0x5B, 0x6E,
+ 0x5A, 0x2D, 0xD5, 0x77, 0xD7, 0x01, 0x84, 0x7D,
+ 0x21, 0x8C, 0xDD, 0x94, 0x7D, 0x31, 0x3D, 0xF0,
+ 0xE7, 0x28, 0xF5, 0x72, 0x36, 0x60, 0xE0, 0x59,
+ 0x5F, 0xFE, 0x38, 0xF8, 0x2F, 0xDB, 0x9E, 0x55,
+ 0x5A, 0xD6, 0xBA, 0x6C, 0x87, 0xF3, 0xC0, 0x76,
+ 0x5F, 0xA3, 0x0A, 0xC3, 0xA3, 0x8D, 0x0E, 0x52,
+ 0xA8, 0xDA, 0x26, 0x3A, 0xF9, 0x3E, 0x36, 0xB1,
+ 0x06, 0xF8, 0x20, 0x2D, 0x1C, 0x0B, 0x93, 0xBB,
+ 0xD3, 0x64, 0x77, 0xCE, 0x11, 0xFC, 0xA2, 0x0E,
+ 0x1B, 0x5B, 0x9E, 0x13, 0x9F, 0x20, 0x8B, 0xAA,
+ 0xCD, 0x72, 0xD7, 0xA6, 0xF3, 0x1E, 0x4F, 0x72,
+ 0xC6, 0x49, 0x0F, 0x7B, 0xF0, 0x4C, 0x61, 0x1F,
+ 0x43, 0x0D, 0x4F, 0x0D, 0x33, 0x13, 0xED, 0x63,
+ 0xE5, 0xDB, 0x71, 0xAB, 0xA4, 0x83, 0xEF, 0xDC,
+ 0x86, 0x9D, 0x4B, 0xBD, 0x1B, 0x8A, 0xFE, 0x39,
+ 0xA8, 0x8B, 0xBA, 0x4C, 0x85, 0x28, 0xFC, 0xB3,
+ 0x62, 0x85, 0xD2, 0xF0, 0x38, 0xD0, 0x4B, 0xA4,
+ 0xD1, 0x3B, 0xD4, 0xD0, 0x2C, 0x78, 0x6C, 0x6A,
+ 0xC2, 0x64, 0x2C, 0x31, 0x4A, 0xD8, 0x69, 0x24,
+ 0xED, 0x77, 0x7D, 0x68, 0x9A, 0xA1, 0x78, 0x81,
+ 0xD9, 0x7E, 0x6C, 0xFE, 0x0A, 0x0D, 0x76, 0xF7,
+ 0x4B, 0x58, 0xE7, 0xC9, 0xB5, 0x11, 0x07, 0x87,
+ 0x88, 0x6A, 0x9F, 0x3D, 0xE0, 0xEE, 0xCC, 0x60,
+ 0x6B, 0x6B, 0xE6, 0xB5, 0x54, 0x8B, 0x32, 0x1F,
+ 0x04, 0x1D, 0x0E, 0x9E, 0xFA, 0x6D, 0xB0, 0xE0,
+ 0x6D, 0xF9, 0x79, 0xB4, 0xAB, 0x5E, 0xDF, 0x23,
+ 0x7F, 0x95, 0xAD, 0x80, 0x17, 0x23, 0x90, 0x1F,
+ 0xF0, 0xC3, 0xD9, 0x2D, 0xAC, 0x3F, 0x63, 0xF5,
+ 0x77, 0xC5, 0x05, 0xAC, 0x06, 0xB6, 0xA1, 0xB4,
+ 0xA2, 0x40, 0xB3, 0x99, 0x34, 0x7D, 0x31, 0xD4,
+ 0xB1, 0xD4, 0xC1, 0xBB, 0x71, 0x1E, 0xDA, 0x3F,
+ 0xA9, 0x12, 0x68, 0xFA, 0x5B, 0x20, 0x24, 0x6D,
+ 0x4D, 0x72, 0x43, 0x18, 0xBF, 0x66, 0x71, 0x69,
+ 0x26, 0x7D, 0x77, 0x78, 0xF8, 0xE5, 0x20, 0xAE,
+ 0x56, 0x6C, 0x0F, 0x72, 0x94, 0x42, 0x85, 0x4F,
+ 0xE4, 0xFB, 0x32, 0x26, 0x1B, 0x1C, 0x6E, 0x0B,
+ 0xF0, 0xB8, 0x58, 0x00, 0xD2, 0x36, 0x64, 0xAD,
+ 0xA9, 0x00, 0xCE, 0x35, 0x3C, 0x88, 0x79, 0x94,
+ 0x0C, 0x0C, 0x9B, 0xF2, 0xDA, 0xBD, 0xCA, 0x93,
+ 0x37, 0x26, 0xD3, 0x08, 0x54, 0xD2, 0x0D, 0xBC,
+ 0x5D, 0x43, 0x5F, 0xCF, 0x28, 0xB5, 0xAA, 0x15,
+ 0x28, 0x46, 0x45, 0x6B, 0xE8, 0xDF, 0xE8, 0xCE,
+ 0x8F, 0xC0, 0x1A, 0x53, 0x63, 0x3B, 0x53, 0x75,
+ 0xDD, 0x43, 0x1F, 0x07, 0x0A, 0xD5, 0xA1, 0x2A,
+ 0x6E, 0x28, 0xE1, 0xD7, 0xD0, 0x09, 0xCF, 0x62,
+ 0xC1, 0x5F, 0x21, 0xDB, 0xC5, 0x40, 0x99, 0x48,
+ 0x87, 0x6E, 0x11, 0xF5, 0x5A, 0x4E, 0xBC, 0xF9,
+ 0xA8, 0x02, 0x7C, 0x47, 0x39, 0xA5, 0xD8, 0x52,
+ 0xB1, 0x80, 0xDC, 0xFE, 0x08, 0x4B, 0x5D, 0x09,
+ 0xDE, 0x06, 0xF3, 0x2A, 0xAD, 0x14, 0x76, 0x40,
+ 0x2F, 0x82, 0x28, 0x6A, 0xB6, 0x43, 0xEF, 0x71,
+ 0x63, 0xC2, 0x56, 0xEB, 0x3B, 0x4B, 0x52, 0x2F,
+ 0x93, 0xD3, 0x18, 0x3E, 0x18, 0xA8, 0xF7, 0x58,
+ 0xFC, 0x8B, 0x3D, 0x4D, 0x4B, 0x72, 0xBD, 0xF7,
+ 0x04, 0xC9, 0xB8, 0xD7, 0x6C, 0x8C, 0x67, 0xBB,
+ 0x4C, 0x9B, 0x57, 0xF7, 0x22, 0x4E, 0x41, 0xB6,
+ 0xFD, 0xD9, 0xF8, 0x41, 0x62, 0x0F, 0xFF, 0xAA,
+ 0xC6, 0x87, 0x95, 0xFF, 0xFD, 0x58, 0xD9, 0xB2,
+ 0xBA, 0x47, 0x61, 0x24, 0xEA, 0x92, 0x6E, 0x74,
+ 0xB3, 0xDA, 0xE5, 0x83, 0x99, 0x24, 0xB1, 0x71,
+ 0x2A, 0x33, 0xB2, 0xD5, 0x8F, 0xF0, 0x32, 0xCE,
+ 0x37, 0xCF, 0xC7, 0x1C, 0xE8, 0xDE, 0x46, 0x78,
+ 0x96, 0x97, 0xF6, 0x73, 0x90, 0xE5, 0x71, 0x05,
+ 0xEA, 0x0D, 0xC2, 0x1D, 0x9E, 0x43, 0x34, 0xBC,
+ 0x8F, 0x45, 0xE5, 0x08, 0xCA, 0x20, 0x0C, 0x84
+ },
+ .len = 2048
+ },
+ .auth_tag = {
+ .data = {
+ 0xD0, 0x62, 0x1F, 0x20, 0x1C, 0xE8, 0xDD, 0x36,
+ 0x00, 0x74, 0xF6, 0xD7, 0xFD, 0x2C, 0xA0, 0xAF
+ },
+ .len = 16
+ }
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_1 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = { 0x00 },
+ .len = 0
+ },
+ .ciphertext = {
+ .data = { 0x00 },
+ .len = 0
+ },
+ .auth_tag = {
+ .data = {
+ 0x53, 0x0F, 0x8A, 0xFB, 0xC7, 0x45, 0x36, 0xB9,
+ 0xA9, 0x63, 0xB4, 0xF1, 0xC4, 0xCB, 0x73, 0x8B },
+ .len = 16
+ }
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_2 = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .len = 16
+ },
+ .ciphertext = {
+ .data = {
+ 0xCE, 0xA7, 0x40, 0x3D, 0x4D, 0x60, 0x6B, 0x6E,
+ 0x07, 0x4E, 0xC5, 0xD3, 0xBA, 0xF3, 0x9D, 0x18 },
+ .len = 16
+ },
+ .auth_tag = {
+ .data = {
+ 0xD0, 0xD1, 0xC8, 0xA7, 0x99, 0x99, 0x6B, 0xF0,
+ 0x26, 0x5B, 0x98, 0xB5, 0xD4, 0x8A, 0xB9, 0x19 },
+ .len = 16
+ }
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_3 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E, 0xF0, 0x68, 0xE1, 0x3E },
+ .len = 64
+ },
+ .auth_tag = {
+ .data = {
+ 0x64, 0xAF, 0x1D, 0xFB, 0xE8, 0x0D, 0x37, 0xD8,
+ 0x92, 0xC3, 0xB9, 0x1D, 0xD3, 0x08, 0xAB, 0xFC },
+ .len = 16
+ }
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_4 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39 },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0x63, 0x16, 0x91, 0xAE, 0x17, 0x05, 0x5E, 0xA6,
+ 0x6D, 0x0A, 0x51, 0xE2, 0x50, 0x21, 0x85, 0x4A },
+ .len = 16
+ }
+
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_5 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 8
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39 },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0xA7, 0x99, 0xAC, 0xB8, 0x27, 0xDA, 0xB1, 0x82,
+ 0x79, 0xFD, 0x83, 0x73, 0x52, 0x4D, 0xDB, 0xF1 },
+ .len = 16
+ }
+
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_6 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39 },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0x5D, 0xA5, 0x0E, 0x53, 0x64, 0x7F, 0x3F, 0xAE,
+ 0x1A, 0x1F, 0xC0, 0xB0, 0xD8, 0xBE, 0xF2, 0x64 },
+ .len = 16
+ }
+};
+
+/** AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_256_7 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = 12
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39 },
+ .len = 60
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E },
+ .len = 60
+ },
+ .auth_tag = {
+ .data = {
+ 0x4E, 0xD0, 0x91, 0x95, 0x83, 0xA9, 0x38, 0x72,
+ 0x09, 0xA9, 0xCE, 0x5F, 0x89, 0x06, 0x4E, 0xC8 },
+ .len = 16
+ }
+};
+
+/** variable AAD AES-128 Test Vectors */
+static const struct gcm_test_data gcm_test_case_aad_1 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = GCM_LARGE_AAD_LENGTH
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1E, 0xC2, 0x21, 0x77, 0x74, 0x24,
+ 0x4B, 0x72, 0x21, 0xB7, 0x84, 0xD0, 0xD4, 0x9C,
+ 0xE3, 0xAA, 0x21, 0x2F, 0x2C, 0x02, 0xA4, 0xE0,
+ 0x35, 0xC1, 0x7E, 0x23, 0x29, 0xAC, 0xA1, 0x2E,
+ 0x21, 0xD5, 0x14, 0xB2, 0x54, 0x66, 0x93, 0x1C,
+ 0x7D, 0x8F, 0x6A, 0x5A, 0xAC, 0x84, 0xAA, 0x05,
+ 0x1B, 0xA3, 0x0B, 0x39, 0x6A, 0x0A, 0xAC, 0x97,
+ 0x3D, 0x58, 0xE0, 0x91, 0x47, 0x3F, 0x59, 0x85
+ },
+ .len = 64
+ },
+ .auth_tag = {
+ .data = {
+ 0xCA, 0x70, 0xAF, 0x96, 0xA8, 0x5D, 0x40, 0x47,
+ 0x0C, 0x3C, 0x48, 0xF5, 0xF0, 0xF5, 0xA5, 0x7D
+ },
+ .len = 16
+ }
+};
+
+/** variable AAD AES-256 Test Vectors */
+static const struct gcm_test_data gcm_test_case_aad_2 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_text,
+ .len = GCM_LARGE_AAD_LENGTH
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55 },
+ .len = 64
+ },
+ .ciphertext = {
+ .data = {
+ 0x05, 0xA2, 0x39, 0xA5, 0xE1, 0x1A, 0x74, 0xEA,
+ 0x6B, 0x2A, 0x55, 0xF6, 0xD7, 0x88, 0x44, 0x7E,
+ 0x93, 0x7E, 0x23, 0x64, 0x8D, 0xF8, 0xD4, 0x04,
+ 0x3B, 0x40, 0xEF, 0x6D, 0x7C, 0x6B, 0xF3, 0xB9,
+ 0x50, 0x15, 0x97, 0x5D, 0xB8, 0x28, 0xA1, 0xD5,
+ 0x22, 0xDE, 0x36, 0x26, 0xD0, 0x6A, 0x7A, 0xC0,
+ 0xB5, 0x14, 0x36, 0xAF, 0x3A, 0xC6, 0x50, 0xAB,
+ 0xFA, 0x47, 0xC8, 0x2E, 0xF0, 0x68, 0xE1, 0x3E
+ },
+ .len = 64
+ },
+ .auth_tag = {
+ .data = {
+ 0xBA, 0x06, 0xDA, 0xA1, 0x91, 0xE1, 0xFE, 0x22,
+ 0x59, 0xDA, 0x67, 0xAF, 0x9D, 0xA5, 0x43, 0x94
+ },
+ .len = 16
+ }
+};
+
+/** GMAC Test Vectors */
+static uint8_t gmac_plaintext[GMAC_LARGE_PLAINTEXT_LENGTH] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10,
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10
+};
+
+static const struct gmac_test_data gmac_test_case_1 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gmac_plaintext,
+ .len = 160
+ },
+ .plaintext = {
+ .data = NULL,
+ .len = 0
+ },
+ .gmac_tag = {
+ .data = {
+ 0x4C, 0x0C, 0x4F, 0x47, 0x2D, 0x78, 0xF6, 0xD8,
+ 0x03, 0x53, 0x20, 0x2F, 0x1A, 0xDF, 0x90, 0xD0
+ },
+ .len = 16
+ },
+};
+
+static const struct gmac_test_data gmac_test_case_2 = {
+ .key = {
+ .data = {
+ 0xaa, 0x74, 0x0a, 0xbf, 0xad, 0xcd, 0xa7, 0x79,
+ 0x22, 0x0d, 0x3b, 0x40, 0x6c, 0x5d, 0x7e, 0xc0,
+ 0x9a, 0x77, 0xfe, 0x9d, 0x94, 0x10, 0x45, 0x39,
+ },
+ .len = 24
+ },
+ .iv = {
+ .data = {
+ 0xab, 0x22, 0x65, 0xb4, 0xc1, 0x68, 0x95,
+ 0x55, 0x61, 0xf0, 0x43, 0x15, },
+ .len = 12
+ },
+ .aad = {
+ .data = gmac_plaintext,
+ .len = 80
+ },
+ .plaintext = {
+ .data = NULL,
+ .len = 0
+ },
+ .gmac_tag = {
+ .data = {
+ 0xCF, 0x82, 0x80, 0x64, 0x02, 0x46, 0xF4, 0xFB,
+ 0x33, 0xAE, 0x1D, 0x90, 0xEA, 0x48, 0x83, 0xDB
+ },
+ .len = 16
+ },
+};
+
+static const struct gmac_test_data gmac_test_case_3 = {
+ .key = {
+ .data = {
+ 0xb5, 0x48, 0xe4, 0x93, 0x4f, 0x5c, 0x64, 0xd3,
+ 0xc0, 0xf0, 0xb7, 0x8f, 0x7b, 0x4d, 0x88, 0x24,
+ 0xaa, 0xc4, 0x6b, 0x3c, 0x8d, 0x2c, 0xc3, 0x5e,
+ 0xe4, 0xbf, 0xb2, 0x54, 0xe4, 0xfc, 0xba, 0xf7,
+ },
+ .len = 32
+ },
+ .iv = {
+ .data = {
+ 0x2e, 0xed, 0xe1, 0xdc, 0x64, 0x47, 0xc7,
+ 0xaf, 0xc4, 0x41, 0x53, 0x58,
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gmac_plaintext,
+ .len = 65
+ },
+ .plaintext = {
+ .data = NULL,
+ .len = 0
+ },
+ .gmac_tag = {
+ .data = {
+ 0x77, 0x46, 0x0D, 0x6F, 0xB1, 0x87, 0xDB, 0xA9,
+ 0x46, 0xAD, 0xCD, 0xFB, 0xB7, 0xF9, 0x13, 0xA1
+ },
+ .len = 16
+ },
+};
+
+/******* GCM PERF VECTORS ***********/
+
+struct cryptodev_perf_test_data {
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } key;
+
+ struct {
+ uint8_t data[64] __rte_aligned(16);
+ unsigned len;
+ } iv;
+
+ struct {
+ uint8_t data[64];
+ unsigned len;
+ } aad;
+
+ struct {
+ uint8_t data[2048];
+ unsigned len;
+ } plaintext;
+
+ struct {
+ uint8_t data[2048];
+ unsigned len;
+ } ciphertext;
+
+ struct {
+ uint8_t data[16];
+ unsigned len;
+ } auth_tag;
+
+ struct {
+ uint32_t size;
+ uint8_t data[16];
+ unsigned len;
+ } auth_tags[7];
+
+};
+
+/* 2048B */
+static const struct cryptodev_perf_test_data AES_GCM_128_12IV_0AAD = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = { 0 },
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55
+ },
+ .len = 2048
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1E, 0xC2, 0x21, 0x77, 0x74, 0x24,
+ 0x4B, 0x72, 0x21, 0xB7, 0x84, 0xD0, 0xD4, 0x9C,
+ 0xE3, 0xAA, 0x21, 0x2F, 0x2C, 0x02, 0xA4, 0xE0,
+ 0x35, 0xC1, 0x7E, 0x23, 0x29, 0xAC, 0xA1, 0x2E,
+ 0x21, 0xD5, 0x14, 0xB2, 0x54, 0x66, 0x93, 0x1C,
+ 0x7D, 0x8F, 0x6A, 0x5A, 0xAC, 0x84, 0xAA, 0x05,
+ 0x1B, 0xA3, 0x0B, 0x39, 0x6A, 0x0A, 0xAC, 0x97,
+ 0x3D, 0x58, 0xE0, 0x91, 0x47, 0x3F, 0x59, 0x85,
+ 0x04, 0x99, 0x55, 0xE1, 0x36, 0x76, 0xB7, 0x14,
+ 0x1D, 0xF0, 0xF6, 0x8C, 0x65, 0xD5, 0xAD, 0xFB,
+ 0x90, 0x7F, 0x5D, 0xA2, 0xD6, 0xFD, 0xD0, 0xE5,
+ 0x0D, 0x9B, 0x68, 0x21, 0x49, 0x42, 0x6E, 0x13,
+ 0xEC, 0x22, 0x50, 0x2A, 0x30, 0x47, 0x49, 0xA1,
+ 0x7F, 0xC3, 0x09, 0xE0, 0x56, 0x91, 0xC4, 0x54,
+ 0x70, 0xD7, 0x19, 0x40, 0xCA, 0x6B, 0x65, 0x27,
+ 0x3E, 0xE9, 0xD1, 0x0F, 0x1C, 0xB5, 0x45, 0x0D,
+ 0x27, 0xE7, 0xCF, 0x94, 0x10, 0xBF, 0xA2, 0xFA,
+ 0x86, 0x20, 0x3F, 0x6E, 0xE9, 0x95, 0x03, 0x5A,
+ 0x46, 0x11, 0x75, 0xD5, 0x37, 0x71, 0x7F, 0xE0,
+ 0xBC, 0x9F, 0xC8, 0xE9, 0xB1, 0x08, 0x2C, 0x59,
+ 0x6E, 0x51, 0x4A, 0x83, 0x38, 0xC1, 0xED, 0xE2,
+ 0x2E, 0x88, 0x90, 0xA5, 0x7D, 0xA4, 0x93, 0x9A,
+ 0x30, 0xD6, 0x96, 0x34, 0x0F, 0xC4, 0xD1, 0x7E,
+ 0xC9, 0x8F, 0xC5, 0xBB, 0x80, 0x50, 0x85, 0x75,
+ 0x7D, 0x82, 0x36, 0xDB, 0x62, 0x15, 0xAF, 0x4B,
+ 0x0A, 0x9D, 0xCD, 0x64, 0x00, 0xAB, 0x88, 0x28,
+ 0xA8, 0x35, 0x17, 0x70, 0x6F, 0x47, 0x44, 0xCD,
+ 0x65, 0xAE, 0xD5, 0x05, 0x0A, 0xA8, 0x2F, 0x48,
+ 0xAC, 0xA1, 0x72, 0x64, 0x1C, 0x7E, 0xD3, 0xF5,
+ 0xD8, 0x4E, 0x73, 0x17, 0x0C, 0xE5, 0x9F, 0xB6,
+ 0x00, 0xFA, 0xD7, 0x2C, 0x3D, 0x6A, 0x10, 0x47,
+ 0x7C, 0xF2, 0x6B, 0x13, 0x10, 0x8A, 0x76, 0x39,
+ 0xF8, 0x50, 0x33, 0xAC, 0x08, 0x1D, 0xA3, 0x48,
+ 0xE1, 0xD0, 0x05, 0x49, 0xB7, 0x76, 0x03, 0x72,
+ 0x07, 0xC5, 0xD3, 0x08, 0x79, 0x38, 0x66, 0xC1,
+ 0x52, 0xAF, 0x83, 0xCD, 0xF3, 0x86, 0x62, 0xBF,
+ 0x92, 0x24, 0x97, 0xBD, 0x5D, 0x7D, 0x81, 0x56,
+ 0x4C, 0xF3, 0xD2, 0x60, 0xC2, 0xDE, 0x61, 0xC1,
+ 0x39, 0x61, 0xDA, 0x07, 0x50, 0xC7, 0x98, 0x63,
+ 0x7E, 0xDD, 0x54, 0xCA, 0xDE, 0x12, 0xD2, 0xA8,
+ 0x19, 0x08, 0x6E, 0xF9, 0xFA, 0x6F, 0x58, 0x97,
+ 0xD4, 0x0B, 0x5C, 0x5B, 0xE5, 0x30, 0xE5, 0x4C,
+ 0x0E, 0x16, 0x87, 0xF0, 0x2C, 0xCB, 0x53, 0xB8,
+ 0x0C, 0xE5, 0xDF, 0x16, 0x7B, 0xE8, 0xC2, 0xCF,
+ 0xCC, 0xFF, 0x51, 0x24, 0xC1, 0xDD, 0x59, 0x9C,
+ 0xA7, 0x56, 0x03, 0xB9, 0x0A, 0x37, 0xA2, 0xAC,
+ 0x28, 0x8B, 0xEB, 0x51, 0x4E, 0xF1, 0xAE, 0xB5,
+ 0xC8, 0xB5, 0xCB, 0x8D, 0x23, 0xF6, 0x24, 0x2D,
+ 0xF6, 0x59, 0x62, 0xC0, 0xCB, 0xD3, 0x18, 0xE4,
+ 0xB7, 0x73, 0xEF, 0xDB, 0x13, 0x9A, 0xF5, 0xD3,
+ 0xD5, 0x61, 0x01, 0x14, 0xA5, 0xE5, 0x0D, 0x27,
+ 0xC9, 0xA5, 0x08, 0x1C, 0x60, 0xBA, 0x73, 0xFF,
+ 0xA9, 0xE0, 0x27, 0x86, 0x3F, 0xF7, 0x15, 0x03,
+ 0x69, 0xA7, 0x2B, 0x57, 0xAC, 0xA6, 0x70, 0x55,
+ 0xE9, 0xB5, 0x3F, 0xEB, 0x6F, 0xCE, 0x8A, 0xA1,
+ 0x9D, 0x8B, 0x84, 0xF1, 0x7C, 0xD0, 0x35, 0x21,
+ 0x91, 0x3D, 0x3D, 0x6E, 0x83, 0xFC, 0x45, 0x36,
+ 0x93, 0xDA, 0x66, 0xDF, 0x1A, 0x59, 0x22, 0xA5,
+ 0xC4, 0x99, 0x9B, 0xF8, 0x48, 0x9A, 0x50, 0x09,
+ 0xAB, 0xAE, 0x56, 0xB6, 0x49, 0x02, 0x3E, 0x90,
+ 0xB6, 0x07, 0x7E, 0xA7, 0x6A, 0x0A, 0xB5, 0x85,
+ 0x31, 0x0D, 0x84, 0xD4, 0x01, 0xE4, 0x48, 0x63,
+ 0xF3, 0xC1, 0x54, 0x65, 0xA6, 0x4C, 0x8B, 0x33,
+ 0xF9, 0x70, 0x59, 0x3B, 0xA6, 0xF6, 0x2B, 0x66,
+ 0xC5, 0xD2, 0xEB, 0xAB, 0x67, 0xD2, 0xE3, 0x78,
+ 0xA9, 0x1A, 0x4C, 0x99, 0xA9, 0xA6, 0xCA, 0xF7,
+ 0x65, 0xF0, 0x48, 0xF8, 0x2A, 0xEA, 0x96, 0x9F,
+ 0xC4, 0x50, 0x9A, 0x0C, 0xB6, 0x0D, 0x8A, 0x2F,
+ 0xC3, 0x99, 0x4E, 0xA0, 0x06, 0x4D, 0xAB, 0x25,
+ 0x2E, 0x44, 0x47, 0xB6, 0x98, 0xF1, 0x2C, 0x96,
+ 0x54, 0x51, 0x12, 0x41, 0x0D, 0xEF, 0x32, 0x9A,
+ 0x4A, 0xBD, 0xA2, 0x26, 0x53, 0xA8, 0xFD, 0x8B,
+ 0x6C, 0x95, 0x0A, 0x1A, 0x96, 0xEF, 0x3C, 0x85,
+ 0x34, 0x4E, 0x25, 0x9E, 0x1C, 0x67, 0x33, 0x8A,
+ 0xFF, 0x6D, 0x98, 0x93, 0x3D, 0x3F, 0x49, 0x6B,
+ 0xBF, 0x7C, 0x4F, 0x63, 0x5D, 0x62, 0x64, 0x67,
+ 0x0D, 0x07, 0x7F, 0x24, 0x4A, 0x23, 0xBC, 0x35,
+ 0xE0, 0x92, 0x6F, 0x51, 0xE7, 0x25, 0x97, 0xB9,
+ 0x14, 0x35, 0x2B, 0x48, 0xAC, 0x6F, 0x54, 0xDF,
+ 0xF2, 0xB4, 0xB0, 0xE0, 0xD3, 0x28, 0x0D, 0x66,
+ 0x46, 0x28, 0x0A, 0x16, 0x9C, 0x87, 0x73, 0xB7,
+ 0x9C, 0x2B, 0xB5, 0x43, 0xC9, 0x46, 0xB9, 0x1F,
+ 0x5F, 0x3C, 0x45, 0x03, 0x4B, 0xBF, 0x44, 0x4D,
+ 0xE1, 0x44, 0xDA, 0x54, 0xC5, 0x32, 0x3A, 0xFA,
+ 0x21, 0x5C, 0xAD, 0xD5, 0x1E, 0x1B, 0x54, 0x7C,
+ 0x9F, 0xEA, 0x92, 0x8C, 0xEA, 0x69, 0xC0, 0xCE,
+ 0xDA, 0x09, 0xAD, 0x95, 0xA0, 0x8E, 0x0B, 0x8E,
+ 0x10, 0x4F, 0x5B, 0x8F, 0xB8, 0x2D, 0xAC, 0x4C,
+ 0x94, 0x4B, 0x7C, 0x1E, 0xF1, 0x53, 0x20, 0x9B,
+ 0xD6, 0xC4, 0x92, 0x4C, 0x7F, 0xFB, 0x8B, 0x8E,
+ 0x40, 0x2F, 0x24, 0xA3, 0x4E, 0x46, 0x64, 0xF4,
+ 0xC6, 0x35, 0x0F, 0xC7, 0x40, 0x55, 0x43, 0xAF,
+ 0x7E, 0x91, 0x76, 0x48, 0x6F, 0x97, 0x7A, 0xF8,
+ 0x32, 0x1E, 0xD3, 0x5B, 0xBC, 0x19, 0xB5, 0x48,
+ 0xFA, 0x4F, 0x52, 0x77, 0x5B, 0x9E, 0xA2, 0xC8,
+ 0x9A, 0x83, 0x30, 0x8D, 0x9F, 0x0B, 0x6F, 0xA8,
+ 0x2E, 0x84, 0xCC, 0xC1, 0x50, 0x96, 0x46, 0xAE,
+ 0x73, 0x91, 0x7D, 0xCD, 0x88, 0xAB, 0x67, 0x3F,
+ 0x66, 0x3A, 0x8D, 0xB1, 0x89, 0x07, 0x93, 0xDB,
+ 0x42, 0x22, 0xDC, 0x13, 0xBD, 0xCD, 0xBB, 0x12,
+ 0x8D, 0x88, 0x44, 0x13, 0x22, 0x52, 0x81, 0xDC,
+ 0xEF, 0xA1, 0xE4, 0xA3, 0xA7, 0xBA, 0xEE, 0x98,
+ 0x79, 0x45, 0x29, 0x05, 0x65, 0x3D, 0xDC, 0xAF,
+ 0xA1, 0x37, 0x29, 0xFD, 0x05, 0xD1, 0x3A, 0xF7,
+ 0x32, 0x1D, 0x02, 0xEC, 0x28, 0x1E, 0x0F, 0x96,
+ 0xF3, 0x21, 0x19, 0x5F, 0x49, 0xB9, 0xEA, 0x9A,
+ 0xAD, 0x34, 0x58, 0xD1, 0xD9, 0xB1, 0x7D, 0xD2,
+ 0xEA, 0xED, 0x74, 0xE8, 0x25, 0x9A, 0x7B, 0xC5,
+ 0xC8, 0xD8, 0x76, 0xB6, 0xBC, 0x0B, 0x78, 0xCE,
+ 0xD9, 0xA6, 0xBB, 0x2F, 0x79, 0xA4, 0x45, 0x05,
+ 0x55, 0x6E, 0x20, 0x84, 0xEB, 0xC8, 0x70, 0xB0,
+ 0x3A, 0x2D, 0x06, 0x98, 0x29, 0x10, 0xB8, 0xC5,
+ 0xE9, 0xE4, 0xB6, 0xDE, 0x97, 0x9A, 0x0D, 0x8C,
+ 0xB6, 0x22, 0x16, 0x59, 0xAB, 0xB5, 0xD7, 0x14,
+ 0xAB, 0x08, 0x02, 0x27, 0x7B, 0xF7, 0x0E, 0xAC,
+ 0xC5, 0xAC, 0x4D, 0x7F, 0xE5, 0x65, 0x51, 0x40,
+ 0x44, 0x92, 0xB1, 0x6A, 0xB7, 0x00, 0x76, 0x89,
+ 0x6E, 0x08, 0x5F, 0x45, 0x2B, 0x53, 0x86, 0x86,
+ 0xA7, 0x85, 0xBC, 0x62, 0xAC, 0xAA, 0x82, 0x73,
+ 0x0A, 0xEB, 0x35, 0x16, 0x95, 0x26, 0xAB, 0x9E,
+ 0xE9, 0x64, 0x53, 0x99, 0x08, 0x31, 0xF5, 0x6B,
+ 0x1F, 0xFE, 0x47, 0x4B, 0x09, 0x33, 0x4F, 0xBF,
+ 0x1F, 0x0B, 0x4C, 0xB2, 0xB4, 0xA4, 0x17, 0xA9,
+ 0xAD, 0xC5, 0x62, 0x7C, 0xF1, 0x1B, 0xAE, 0x46,
+ 0xD3, 0xAC, 0xFD, 0x43, 0xFE, 0x79, 0xD0, 0x58,
+ 0x2F, 0x6C, 0x9F, 0xD0, 0x65, 0xA4, 0x64, 0x03,
+ 0xAF, 0x73, 0x46, 0x75, 0x7D, 0x49, 0x1B, 0x4C,
+ 0xFA, 0x49, 0xD8, 0x9A, 0xCC, 0x59, 0xC6, 0xC7,
+ 0xA1, 0x05, 0xC2, 0x32, 0xC8, 0x6C, 0x50, 0xA8,
+ 0x06, 0x58, 0xBE, 0x6C, 0x7D, 0x22, 0xD6, 0x0D,
+ 0x74, 0x40, 0xCE, 0xD6, 0x64, 0xD6, 0x47, 0xD0,
+ 0xBF, 0xF1, 0x5C, 0x54, 0xF9, 0x06, 0x3F, 0x3D,
+ 0x86, 0xBA, 0xF2, 0x0F, 0x5E, 0x2C, 0x01, 0xCC,
+ 0xD9, 0xC7, 0xB1, 0x4A, 0xB3, 0xD7, 0x26, 0xCC,
+ 0xC3, 0x7A, 0x74, 0x2C, 0xE1, 0x22, 0x65, 0xA0,
+ 0x5B, 0xCA, 0xF4, 0xE1, 0x7D, 0xE1, 0x56, 0xFD,
+ 0x94, 0x10, 0xC6, 0xA1, 0x4A, 0xE8, 0x6B, 0x34,
+ 0x4E, 0x71, 0x60, 0x77, 0x0F, 0x03, 0xDD, 0xFF,
+ 0xC8, 0x59, 0x54, 0x6C, 0xD4, 0x4A, 0x55, 0x24,
+ 0x35, 0x21, 0x60, 0x73, 0xDF, 0x6F, 0xE7, 0x3C,
+ 0xC2, 0xF0, 0xDA, 0xA9, 0xE5, 0x8C, 0xAC, 0xB6,
+ 0xFD, 0x2E, 0xF7, 0xA0, 0x18, 0xA7, 0x55, 0x47,
+ 0xD1, 0xCB, 0x9E, 0xAA, 0x58, 0x54, 0x3B, 0x37,
+ 0x18, 0xB5, 0xC1, 0xBB, 0x41, 0x59, 0xE4, 0x28,
+ 0x4A, 0x13, 0x90, 0x6A, 0xF7, 0xD1, 0xB3, 0x71,
+ 0xB6, 0x6E, 0xF6, 0x5D, 0x2E, 0x0E, 0x6C, 0x4A,
+ 0x7B, 0xF7, 0xB6, 0x21, 0xD4, 0xFC, 0x47, 0x8C,
+ 0x9B, 0x0A, 0x90, 0xAC, 0x11, 0x52, 0x86, 0x07,
+ 0x24, 0xDA, 0xA9, 0x49, 0x50, 0xD9, 0xDC, 0xE2,
+ 0x19, 0x87, 0x73, 0x88, 0xC3, 0xE4, 0xED, 0xC9,
+ 0x1C, 0xA8, 0x7E, 0x39, 0x48, 0x91, 0x10, 0xAB,
+ 0xFC, 0x3C, 0x1E, 0xEE, 0x08, 0xA1, 0xB9, 0xB2,
+ 0x02, 0x57, 0xB1, 0xD1, 0x35, 0x5E, 0x3D, 0x94,
+ 0xFB, 0x36, 0x27, 0x1A, 0x0E, 0x75, 0xFC, 0xBC,
+ 0xDB, 0xF3, 0xF5, 0x7C, 0x08, 0x39, 0xAA, 0xF4,
+ 0x2E, 0xEE, 0xCF, 0xCD, 0x2D, 0x70, 0xB8, 0x84,
+ 0xE6, 0x22, 0x5C, 0xC0, 0xB9, 0x33, 0xCB, 0x97,
+ 0xA1, 0xA3, 0xEE, 0x93, 0x71, 0xCF, 0xC9, 0x21,
+ 0x31, 0x7A, 0xEC, 0xE7, 0x70, 0xF2, 0xAA, 0x91,
+ 0xAA, 0x48, 0xAD, 0xAC, 0x03, 0xB1, 0x26, 0x52,
+ 0xBC, 0x65, 0x22, 0xA1, 0x09, 0x3D, 0xAB, 0x16,
+ 0x08, 0xBF, 0xCF, 0x3F, 0x59, 0x08, 0x6F, 0x68,
+ 0xEB, 0x8A, 0xB3, 0xCF, 0x77, 0x82, 0xFB, 0x25,
+ 0x78, 0x16, 0x4C, 0xDB, 0x72, 0xF5, 0xCF, 0x79,
+ 0x71, 0xE4, 0x4E, 0x23, 0x15, 0x7F, 0x1E, 0xA8,
+ 0x3E, 0xC0, 0x59, 0x91, 0x20, 0xAE, 0x2C, 0x1D,
+ 0x90, 0xC8, 0x49, 0x42, 0x48, 0x29, 0x82, 0x66,
+ 0x68, 0x49, 0x73, 0xDA, 0xE4, 0x28, 0xCD, 0x7B,
+ 0x4D, 0xE4, 0x23, 0x34, 0xB9, 0xE1, 0xB4, 0x42,
+ 0x67, 0x22, 0x5B, 0xEE, 0xE6, 0x74, 0x32, 0x6F,
+ 0x21, 0x9F, 0x97, 0x46, 0x03, 0xE1, 0xC9, 0x7A,
+ 0x14, 0x27, 0x30, 0xE1, 0xB2, 0x34, 0xE6, 0xAF,
+ 0x7B, 0xAA, 0xDD, 0x89, 0x04, 0x30, 0xD6, 0x78,
+ 0x0B, 0x3D, 0xC3, 0x69, 0xB0, 0x67, 0x4F, 0x4E,
+ 0x12, 0x21, 0x93, 0x2D, 0x79, 0xDD, 0x8B, 0xDB,
+ 0xEA, 0x90, 0x66, 0x54, 0xA8, 0x05, 0xF2, 0xE4,
+ 0x59, 0x8A, 0x96, 0x52, 0x30, 0xF0, 0x4E, 0x9A,
+ 0xE5, 0xD8, 0x72, 0x1C, 0x3B, 0x63, 0x02, 0xB9,
+ 0xC7, 0xA1, 0xDA, 0xC8, 0x6C, 0x48, 0xE0, 0xDE,
+ 0x59, 0x64, 0x89, 0x2C, 0xF9, 0xC8, 0x3B, 0x00,
+ 0xEC, 0xF2, 0x68, 0x51, 0x67, 0x05, 0x85, 0xAF,
+ 0xB8, 0xD5, 0x65, 0xEE, 0x73, 0x26, 0x88, 0xFB,
+ 0xA9, 0xD6, 0x6C, 0x68, 0x9D, 0x9F, 0x23, 0x6A,
+ 0x10, 0x24, 0x82, 0xB2, 0xB7, 0x40, 0x19, 0x3E,
+ 0x6F, 0xA2, 0xD5, 0x2C, 0x6E, 0x8D, 0xE9, 0x33,
+ 0x6E, 0x24, 0x94, 0x05, 0xE9, 0x2D, 0xD9, 0x3A,
+ 0x8C, 0xE5, 0xCC, 0x1D, 0x3F, 0xB8, 0x71, 0xA8,
+ 0x98, 0x33, 0xBB, 0x1A, 0xAC, 0x41, 0x0A, 0x04,
+ 0xFE, 0x4D, 0x46, 0x17, 0x8A, 0xCB, 0xF3, 0x4B,
+ 0x97, 0x02, 0xCC, 0x9D, 0x11, 0xF1, 0xBC, 0xA9,
+ 0xC1, 0xD1, 0xB6, 0xD6, 0x7B, 0x5F, 0x9D, 0x22,
+ 0x86, 0x71, 0xEC, 0x42, 0x53, 0xB7, 0x85, 0x30,
+ 0xAF, 0x1D, 0x01, 0xA7, 0xBF, 0x72, 0xC2, 0xC6,
+ 0xC9, 0xB8, 0xD8, 0xC7, 0xE9, 0xC4, 0xBA, 0xC5,
+ 0xB1, 0x8A, 0xB8, 0x62, 0xBF, 0x75, 0x75, 0x69,
+ 0xF8, 0x8D, 0x7E, 0xD9, 0xD2, 0x28, 0xB5, 0x40,
+ 0xCE, 0xCB, 0xB8, 0x74, 0x31, 0x40, 0x7B, 0x0D,
+ 0x73, 0x98, 0x99, 0x12, 0xB7, 0x75, 0x3E, 0xBC,
+ 0xAE, 0x48, 0xCA, 0xA9, 0x1E, 0xA7, 0x95, 0x31,
+ 0x87, 0x0F, 0x14, 0x52, 0xB6, 0x8E, 0x42, 0x50,
+ 0xB2, 0x76, 0x75, 0xD8, 0x7E, 0x66, 0x23, 0x13,
+ 0x8B, 0x29, 0xAA, 0x13, 0xCA, 0x8A, 0xD8, 0x9B,
+ 0x7B, 0x38, 0xD2, 0xE8, 0x67, 0xD1, 0x89, 0x25,
+ 0x9C, 0x63, 0x2F, 0xC3, 0x26, 0xC7, 0x74, 0x83,
+ 0x05, 0xED, 0x67, 0x02, 0x85, 0xAD, 0x1D, 0x0E,
+ 0xA9, 0xD6, 0xE1, 0xC7, 0x39, 0xA0, 0x6E, 0x72,
+ 0xCE, 0x56, 0x6C, 0xB8, 0x4A, 0xDE, 0x11, 0xA2,
+ 0xBF, 0xC1, 0x84, 0x98, 0x8F, 0xCA, 0x79, 0x74,
+ 0xCA, 0x9F, 0x45, 0x16, 0xBC, 0xB1, 0xF4, 0x03,
+ 0x76, 0x6E, 0xD5, 0x46, 0x60, 0xD7, 0x1D, 0xF0,
+ 0x87, 0x29, 0x63, 0x07, 0x06, 0xB9, 0xC2, 0x69,
+ 0x6D, 0xF9, 0x4B, 0x30, 0x96, 0x83, 0xB8, 0xC5,
+ 0xBE, 0x3A, 0xBA, 0xD0, 0x3E, 0x2B, 0x04, 0x16,
+ 0x6A, 0x00, 0x3B, 0x1A, 0x8E, 0xF8, 0xF6, 0x21,
+ 0x01, 0xD6, 0x08, 0x41, 0x74, 0xA2, 0xFC, 0x36,
+ 0xED, 0x11, 0x51, 0x5A, 0x4A, 0x21, 0x1A, 0x03,
+ 0x11, 0x95, 0x11, 0xF6, 0x73, 0x38, 0x67, 0xFC,
+ 0xF1, 0x2B, 0x22, 0x54, 0x65, 0x40, 0x7D, 0x8C,
+ 0x13, 0xC4, 0x46, 0x87, 0x09, 0x2B, 0xB5, 0xA1,
+ 0x82, 0x49, 0x46, 0x56, 0xF5, 0x5F, 0xF1, 0x04,
+ 0xD8, 0x6F, 0xDB, 0x38, 0xAD, 0xF4, 0x1A, 0xA3,
+ 0xFF, 0x7C, 0xC7, 0xA6, 0xAF, 0x87, 0x5C, 0x8C,
+ 0xEA, 0x3C, 0x9D, 0x7A, 0x4A, 0xD8, 0xA8, 0x66,
+ 0xDB, 0xBF, 0x12, 0x58, 0x98, 0x8E, 0xBA, 0x6F,
+ 0xAF, 0x20, 0xDA, 0xEE, 0x82, 0x34, 0x2F, 0x33,
+ 0x88, 0x98, 0xBA, 0xB2, 0x54, 0x7F, 0x9E, 0x63,
+ 0x19, 0x6C, 0x7D, 0xCE, 0x85, 0xF8, 0xB6, 0x77,
+ 0xCB, 0x38, 0x1F, 0xB1, 0x79, 0xBD, 0xED, 0x32,
+ 0xE3, 0xB9, 0x40, 0xEF, 0x3E, 0x6C, 0x29, 0x88,
+ 0x70, 0x99, 0x47, 0xA6, 0x4A, 0x1C, 0xCC, 0x0B,
+ 0x9B, 0x72, 0xA9, 0x29, 0x83, 0x4C, 0xDE, 0x4F,
+ 0x65, 0x4E, 0xCE, 0xBD, 0xFA, 0x76, 0x8D, 0xA6,
+ 0x1A, 0xD8, 0x66, 0xFE, 0xA4, 0x2A, 0x61, 0x50,
+ 0xEE, 0x15, 0xF1, 0xF0, 0x9D, 0xFF, 0xEC, 0xEE,
+ 0x00, 0x03, 0xFE, 0xAC, 0x53, 0x02, 0xCC, 0x87,
+ 0xB1, 0xA2, 0xD8, 0x34, 0x2C, 0xEC, 0xA6, 0x4C,
+ 0x02, 0xC0, 0xC1, 0x72, 0xD6, 0x54, 0x35, 0x24,
+ 0x25, 0x8B, 0xEC, 0xDA, 0x47, 0x5F, 0x5D, 0x7E,
+ 0xD8, 0x01, 0x51, 0xDD, 0x8F, 0xB4, 0x48, 0xDD,
+ 0x94, 0x99, 0x95, 0x77, 0xB3, 0x42, 0x14, 0xEB,
+ 0x26, 0x61, 0xE9, 0x22, 0xE3, 0x07, 0x73, 0xFB,
+ 0xEF, 0x38, 0x55, 0x35, 0x8F, 0xCC, 0x30, 0x1E,
+ 0x38, 0xE0, 0x35, 0xF4, 0x9A, 0x7C, 0xCF, 0x38,
+ 0x0B, 0x9E, 0xF4, 0x88, 0x4A, 0xEA, 0xF2, 0x67,
+ 0x9F, 0x61, 0x40, 0x34, 0x09, 0xDC, 0xBF, 0xFB,
+ 0x22, 0x27, 0x04, 0x8B, 0x8D, 0x85, 0x7F, 0xB2,
+ 0x29, 0x62, 0x25, 0x73, 0x7F, 0x46, 0x2E, 0xA3,
+ 0x8E, 0xAF, 0xEC, 0x55, 0x98, 0x1A, 0xEE, 0x29,
+ 0xA0, 0x1A, 0x5F, 0xFE, 0x5D, 0xA5, 0x76, 0x93,
+ 0xAB, 0x57, 0x56, 0xEA, 0xDB, 0x39, 0xAC, 0x48,
+ 0xBE, 0x95, 0x92, 0x2B, 0xC6, 0xE1, 0x2F, 0x36,
+ 0x4B, 0x08, 0x01, 0x90, 0x50, 0xD8, 0xFA, 0xF9,
+ 0x94, 0x4E, 0x76, 0x9B, 0x72, 0x59, 0xC2, 0x2F,
+ 0x61, 0x04, 0x0A, 0x9E, 0x28, 0xE5, 0x24, 0x1E,
+ 0x79, 0xCF, 0x8D, 0xB6, 0x52, 0xA7, 0x79, 0x5F,
+ 0x44, 0x98, 0xD5, 0x0E, 0x6E, 0x4B, 0x64, 0x9B,
+ },
+ .len = 2048
+ },
+ .auth_tags[0] = {
+ .size = 64,
+ .data = { 0x4d, 0x5c, 0x2a, 0xf3, 0x27, 0xcd, 0x64, 0xa6,
+ 0x2c, 0xf3, 0x5a, 0xbd, 0x2b, 0xa6, 0xfa, 0xb4 },
+ .len = 16
+ },
+ .auth_tags[1] = {
+ .size = 128,
+ .data = { 0xE9, 0xA9, 0x75, 0xB6, 0xEF, 0x6F, 0x8C, 0xF1,
+ 0xB3, 0xA9, 0x19, 0xA4, 0xAE, 0x66, 0xBD, 0x9E },
+ .len = 16
+ },
+ .auth_tags[2] = {
+ .size = 256,
+ .data = { 0x29, 0xC3, 0x18, 0x96, 0x54, 0xCB, 0xF5, 0xAA,
+ 0x4E, 0x62, 0xB6, 0xFF, 0x45, 0xA6, 0x18, 0x0C },
+ .len = 16
+ },
+ .auth_tags[3] = {
+ .size = 512,
+ .data = { 0x3B, 0xD7, 0xC3, 0x5F, 0xE4, 0x1B, 0xC2, 0xBC,
+ 0xE9, 0xAC, 0xF2, 0xCE, 0xA7, 0x7B, 0x1D, 0x70 },
+ .len = 16
+ },
+ .auth_tags[4] = {
+ .size = 1024,
+ .data = { 0xCC, 0xBB, 0xBC, 0xCF, 0x86, 0x01, 0x4D, 0x93,
+ 0x4B, 0x68, 0x55, 0x19, 0xA1, 0x40, 0xCD, 0xEA },
+ .len = 16
+ },
+ .auth_tags[5] = {
+ .size = 1536,
+ .data = { 0x67, 0x31, 0x11, 0xA2, 0x58, 0xB5, 0x1C, 0x23,
+ 0xC0, 0x41, 0x05, 0x30, 0xC6, 0xBA, 0xFA, 0x88 },
+ .len = 16
+ },
+ .auth_tags[6] = {
+ .size = 2048,
+ .data = { 0x03, 0x9C, 0x6B, 0xB9, 0x57, 0xBF, 0x6E, 0x86,
+ 0x3A, 0x09, 0x5F, 0x08, 0xA9, 0xE4, 0xF2, 0x1F },
+ .len = 16
+ },
+ .auth_tag = {
+ .data = {
+ 0x03, 0x9C, 0x6B, 0xB9, 0x57, 0xBF, 0x6E, 0x86,
+ 0x3A, 0x09, 0x5F, 0x08, 0xA9, 0xE4, 0xF2, 0x1F
+ },
+ .len = 16
+ },
+};
+
+static const struct gmac_test_data gmac_test_case_4 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88
+ },
+ .len = 12
+ },
+ .aad = {
+ .data = gmac_plaintext,
+ .len = GMAC_LARGE_PLAINTEXT_LENGTH
+ },
+ .plaintext = {
+ .data = NULL,
+ .len = 0
+ },
+ .gmac_tag = {
+ .data = {
+ 0x3f, 0x07, 0xcb, 0xb9, 0x86, 0x3a, 0xea, 0xc2,
+ 0x2f, 0x3a, 0x2a, 0x93, 0xd8, 0x09, 0x6b, 0xda
+ },
+ .len = 16
+ }
+};
+
+static const struct gcm_test_data gcm_test_case_SGL_1 = {
+ .key = {
+ .data = {
+ 0xfe, 0xff, 0xe9, 0x92, 0x86, 0x65, 0x73, 0x1c,
+ 0x6d, 0x6a, 0x8f, 0x94, 0x67, 0x30, 0x83, 0x08 },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0xca, 0xfe, 0xba, 0xbe, 0xfa, 0xce, 0xdb, 0xad,
+ 0xde, 0xca, 0xf8, 0x88 },
+ .len = 12
+ },
+ .aad = {
+ .data = gcm_aad_zero_text,
+ .len = 0
+ },
+ .plaintext = {
+ .data = {
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0xd9, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9a,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x55,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0xba, 0x63, 0x7b, 0x39, 0x1a, 0xaf, 0xd2, 0x54,
+ 0xd7, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9c,
+ 0xd8, 0x31, 0x32, 0x25, 0xf8, 0x84, 0x06, 0xe5,
+ 0xa5, 0x59, 0x09, 0xc5, 0xaf, 0xf5, 0x26, 0x9b,
+ 0x86, 0xa7, 0xa9, 0x53, 0x15, 0x34, 0xf7, 0xda,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+ 0x2e, 0x4c, 0x30, 0x3d, 0x8a, 0x31, 0x8a, 0x72,
+ 0x1c, 0x3c, 0x0c, 0x95, 0x95, 0x68, 0x09, 0x53,
+ 0x2f, 0xcf, 0x0e, 0x24, 0x49, 0xa6, 0xb5, 0x25,
+ 0xb1, 0x6a, 0xed, 0xf5, 0xaa, 0x0d, 0xe6, 0x57,
+
+ },
+ .len = 3120
+ },
+ .ciphertext = {
+ .data = {
+ 0x42, 0x83, 0x1E, 0xC2, 0x21, 0x77, 0x74, 0x24,
+ 0x4B, 0x72, 0x21, 0xB7, 0x84, 0xD0, 0xD4, 0x9C,
+ 0xE3, 0xAA, 0x21, 0x2F, 0x2C, 0x02, 0xA4, 0xE0,
+ 0x35, 0xC1, 0x7E, 0x23, 0x29, 0xAC, 0xA1, 0x2E,
+ 0x21, 0xD5, 0x14, 0xB2, 0x54, 0x66, 0x93, 0x1C,
+ 0x7D, 0x8F, 0x6A, 0x5A, 0xAC, 0x84, 0xAA, 0x05,
+ 0x1B, 0xA3, 0x0B, 0x39, 0x6A, 0x0A, 0xAC, 0x97,
+ 0x3D, 0x58, 0xE0, 0x91, 0x47, 0x3F, 0x59, 0x85,
+ 0x05, 0x99, 0x55, 0xE1, 0x36, 0x76, 0xB7, 0x14,
+ 0x1D, 0xF0, 0xF6, 0x8C, 0x65, 0xD5, 0xAD, 0xFA,
+ 0x90, 0x7F, 0x5D, 0xA2, 0xD6, 0xFD, 0xD0, 0xE5,
+ 0x0D, 0x9B, 0x68, 0x21, 0x49, 0x42, 0x6E, 0x13,
+ 0xEC, 0x22, 0x50, 0x2A, 0x30, 0x47, 0x49, 0xA1,
+ 0x7F, 0xC3, 0x09, 0xE0, 0x56, 0x91, 0xC4, 0x54,
+ 0x70, 0xD7, 0x19, 0x40, 0xCA, 0x6B, 0x65, 0x27,
+ 0x3E, 0xE9, 0xD1, 0x0F, 0x1C, 0xB5, 0x45, 0x0C,
+ 0x29, 0xE7, 0xCF, 0x94, 0x10, 0xBF, 0xA2, 0xFA,
+ 0x86, 0x20, 0x3F, 0x6E, 0xE9, 0x95, 0x03, 0x5C,
+ 0x46, 0x11, 0x75, 0xD5, 0x37, 0x71, 0x7F, 0xE0,
+ 0xBC, 0x9F, 0xC8, 0xE9, 0xB1, 0x08, 0x2C, 0x59,
+ 0x6E, 0x51, 0x4A, 0x83, 0x38, 0xC1, 0xED, 0xE2,
+ 0x2E, 0x88, 0x90, 0xA5, 0x7D, 0xA4, 0x93, 0x9A,
+ 0x30, 0xD6, 0x96, 0x34, 0x0F, 0xC4, 0xD1, 0x7E,
+ 0xC9, 0x8F, 0xC5, 0xBB, 0x80, 0x50, 0x85, 0x73,
+ 0x8B, 0x7C, 0x0A, 0xDA, 0xD3, 0x37, 0x1C, 0x8B,
+ 0x1E, 0xAE, 0x29, 0x54, 0x05, 0x53, 0x48, 0xE5,
+ 0x94, 0xF1, 0xC5, 0x1A, 0x60, 0xDC, 0x61, 0x43,
+ 0xCD, 0x45, 0x4C, 0x6B, 0x95, 0xAD, 0x52, 0xE0,
+ 0x9E, 0xD1, 0x4E, 0xCC, 0x03, 0x27, 0x50, 0xD4,
+ 0xEB, 0xBD, 0x71, 0xA6, 0xD0, 0x2B, 0x23, 0xC0,
+ 0x9E, 0x5F, 0x34, 0xFD, 0xDE, 0xC1, 0x43, 0x35,
+ 0x77, 0xFB, 0xFD, 0xDF, 0xA0, 0x28, 0x42, 0x3B,
+ 0x0F, 0x2D, 0x31, 0xB4, 0x7A, 0xA8, 0x2F, 0xDF,
+ 0x58, 0xB5, 0x00, 0x19, 0x8D, 0xEB, 0x2C, 0xBB,
+ 0xAE, 0xAD, 0x74, 0x7F, 0x25, 0xAA, 0x24, 0x3E,
+ 0xCD, 0x89, 0x5E, 0x05, 0xD3, 0xBA, 0x0E, 0x9A,
+ 0x34, 0x7B, 0xE0, 0x11, 0xD2, 0xBA, 0x5A, 0x51,
+ 0xB4, 0x0D, 0xEE, 0x61, 0x73, 0xFC, 0xD2, 0x01,
+ 0x2D, 0x52, 0x3E, 0x37, 0x55, 0x3F, 0x58, 0xA8,
+ 0x1C, 0x8F, 0x1D, 0xD6, 0x3C, 0x39, 0x06, 0x18,
+ 0x65, 0x60, 0x55, 0x19, 0xAD, 0x1E, 0x78, 0xE9,
+ 0xF7, 0xF5, 0xFC, 0xCD, 0x5F, 0xF1, 0x34, 0x0C,
+ 0xA6, 0xFD, 0x1E, 0x9E, 0xB3, 0xCE, 0x2E, 0x10,
+ 0xFB, 0x98, 0xDD, 0x0E, 0x09, 0x5D, 0x4E, 0x58,
+ 0x75, 0x9A, 0x54, 0x74, 0xFB, 0x40, 0x76, 0x55,
+ 0x0E, 0x3E, 0xA4, 0xCE, 0x56, 0xA5, 0xE0, 0x53,
+ 0xB7, 0xAD, 0x36, 0x99, 0x6E, 0xCD, 0xC2, 0x90,
+ 0x6E, 0xEA, 0xBC, 0x21, 0xAC, 0x31, 0xFF, 0x2B,
+ 0x00, 0xA7, 0x5E, 0xC1, 0x7A, 0xF1, 0xAB, 0x24,
+ 0xA3, 0x40, 0x0B, 0xEB, 0x16, 0x62, 0x35, 0x1E,
+ 0xE9, 0xA5, 0xD3, 0x7E, 0xAA, 0x7E, 0x28, 0xA8,
+ 0x3F, 0xD8, 0x0A, 0x04, 0x12, 0x0F, 0xFF, 0x68,
+ 0x10, 0x85, 0x22, 0xD6, 0x05, 0x6A, 0x3A, 0xCB,
+ 0xC0, 0xCF, 0x8C, 0x20, 0xF0, 0x34, 0x32, 0xAA,
+ 0x76, 0x93, 0xE2, 0x23, 0x4F, 0xF2, 0xE6, 0x84,
+ 0x3B, 0xD4, 0xF3, 0x5D, 0xF3, 0x17, 0xEE, 0x27,
+ 0x67, 0xC3, 0x01, 0x6F, 0x32, 0xDE, 0xF6, 0xF6,
+ 0x87, 0xE9, 0x82, 0xEF, 0x1F, 0xA1, 0xE2, 0x68,
+ 0xF8, 0x5D, 0x49, 0x92, 0x47, 0x01, 0x75, 0x87,
+ 0x52, 0xD3, 0x54, 0xAE, 0x3B, 0xB7, 0xB2, 0x07,
+ 0x0F, 0x62, 0x7B, 0xF7, 0x50, 0x97, 0x9A, 0x4A,
+ 0x98, 0x65, 0x23, 0xA3, 0x5D, 0x76, 0x0A, 0x9C,
+ 0x6C, 0xE7, 0x89, 0xAD, 0x86, 0x70, 0xE7, 0x16,
+ 0x5F, 0x2F, 0x2E, 0x97, 0x29, 0x31, 0xF0, 0x60,
+ 0x33, 0x2C, 0xD7, 0xAA, 0xD6, 0xF0, 0x50, 0xB8,
+ 0xBD, 0x29, 0xA8, 0xA9, 0xAC, 0x5E, 0x0A, 0x3A,
+ 0x59, 0x34, 0x9A, 0x92, 0x25, 0x71, 0xB3, 0x16,
+ 0xC5, 0xD3, 0xA4, 0x15, 0x75, 0x9A, 0xB5, 0x78,
+ 0x6E, 0xCF, 0xAF, 0xC0, 0x39, 0x28, 0x44, 0x21,
+ 0xBB, 0xE8, 0x32, 0xAB, 0xCB, 0xF8, 0x4B, 0xE7,
+ 0x63, 0x9C, 0x56, 0xE7, 0xB2, 0xD6, 0x23, 0x17,
+ 0xDE, 0x92, 0xE9, 0x22, 0xC3, 0x36, 0xA5, 0xAC,
+ 0xA9, 0x98, 0x34, 0xAA, 0xFB, 0x03, 0x33, 0x33,
+ 0xBE, 0xD8, 0x22, 0x7F, 0xFA, 0x34, 0xA0, 0x35,
+ 0xC8, 0xA0, 0xDC, 0x35, 0x82, 0x06, 0x58, 0xE6,
+ 0xBF, 0x7C, 0x4F, 0x63, 0x5D, 0x62, 0x64, 0x67,
+ 0x0D, 0x07, 0x7F, 0x24, 0x4A, 0x23, 0xBC, 0x35,
+ 0xE0, 0x92, 0x6F, 0x51, 0xE7, 0x25, 0x97, 0xB9,
+ 0x14, 0x35, 0x2B, 0x48, 0xAC, 0x6F, 0x54, 0xDF,
+ 0xF2, 0xB4, 0xB0, 0xE0, 0xD3, 0x28, 0x0D, 0x67,
+ 0x48, 0x28, 0x0A, 0x16, 0x9C, 0x87, 0x73, 0xB7,
+ 0x9C, 0x2B, 0xB5, 0x43, 0xC9, 0x46, 0xB9, 0x19,
+ 0x01, 0xAA, 0xDE, 0x75, 0xA6, 0x0F, 0xB5, 0x72,
+ 0x6A, 0x51, 0xE3, 0xAC, 0xE0, 0xF6, 0x96, 0x13,
+ 0xBB, 0xC7, 0x08, 0x13, 0x9E, 0x47, 0xAA, 0xF5,
+ 0x9E, 0x69, 0xAC, 0x95, 0x29, 0xFE, 0xFF, 0x99,
+ 0xB2, 0x52, 0x72, 0x45, 0xF2, 0x07, 0xEB, 0x3C,
+ 0x0F, 0x75, 0x29, 0x73, 0x0D, 0x77, 0x58, 0x83,
+ 0xCB, 0xDD, 0xE7, 0x68, 0x1C, 0xE3, 0xD1, 0xA4,
+ 0x5D, 0xD1, 0xAB, 0xB4, 0x5A, 0x3F, 0x27, 0x66,
+ 0xDA, 0xB4, 0x81, 0x65, 0xCE, 0x1A, 0x9A, 0x7D,
+ 0xC7, 0xB6, 0x31, 0xDE, 0x83, 0xC2, 0x7C, 0xF8,
+ 0xD3, 0xC7, 0x97, 0x28, 0x50, 0xF2, 0x95, 0xFC,
+ 0xA7, 0xB2, 0xA6, 0x46, 0xEF, 0x10, 0xD2, 0x38,
+ 0x93, 0x14, 0x8D, 0xA7, 0x09, 0x17, 0x42, 0x7A,
+ 0x85, 0xB9, 0x42, 0x71, 0x2A, 0x51, 0x9B, 0x66,
+ 0x71, 0x12, 0x57, 0xB7, 0xBD, 0x26, 0xB7, 0x91,
+ 0xF8, 0x84, 0x44, 0x35, 0xAD, 0x6F, 0xCB, 0xD7,
+ 0xFC, 0xA1, 0x28, 0x77, 0x09, 0x5B, 0x6D, 0x52,
+ 0x43, 0xA1, 0xE2, 0x0A, 0x7E, 0x5A, 0x84, 0x45,
+ 0x20, 0xDE, 0xA5, 0x73, 0x1D, 0x37, 0x6E, 0xD8,
+ 0x7A, 0x0D, 0x91, 0xBE, 0xF4, 0xB3, 0x89, 0xE9,
+ 0x1F, 0x1E, 0xF6, 0xD5, 0x37, 0xB4, 0x3C, 0x1D,
+ 0xBE, 0x0D, 0x5B, 0x01, 0xB0, 0x8B, 0xCE, 0x3E,
+ 0x6D, 0x8B, 0x99, 0x9A, 0xC5, 0xAE, 0xFE, 0xA9,
+ 0x78, 0x34, 0x20, 0xA7, 0x6C, 0x7D, 0x46, 0x72,
+ 0x37, 0xAF, 0xFD, 0x17, 0x59, 0xED, 0x83, 0x5B,
+ 0xEB, 0x6E, 0x4A, 0xF1, 0xE6, 0x0D, 0x44, 0x92,
+ 0x65, 0x8E, 0x97, 0xD6, 0x83, 0x6E, 0x97, 0xCA,
+ 0x4C, 0x0A, 0xCE, 0x32, 0x2A, 0xAD, 0x22, 0x73,
+ 0xCB, 0xCB, 0xC3, 0x55, 0x08, 0x63, 0x23, 0xC2,
+ 0x31, 0x24, 0x90, 0x54, 0x99, 0xB2, 0x8C, 0xC7,
+ 0x8A, 0xB6, 0xFF, 0xC2, 0x75, 0xB1, 0xD9, 0x3D,
+ 0x95, 0xDC, 0xB6, 0xCF, 0x11, 0x74, 0x06, 0x54,
+ 0x03, 0xE3, 0x9B, 0x49, 0xE4, 0xF2, 0x73, 0x04,
+ 0xF7, 0xDC, 0x71, 0xD7, 0xFA, 0x3C, 0xD2, 0x61,
+ 0x77, 0x61, 0xB3, 0xDB, 0x6B, 0xCE, 0xCA, 0xFF,
+ 0xF0, 0xAD, 0xBC, 0x94, 0xC8, 0xF8, 0xD5, 0xF4,
+ 0x38, 0xA3, 0x61, 0xAA, 0x8C, 0x96, 0xEE, 0x56,
+ 0xAC, 0xB4, 0x42, 0xBA, 0x1A, 0xE1, 0x70, 0x98,
+ 0x1F, 0x9A, 0x6F, 0x98, 0xB9, 0x13, 0x46, 0xAB,
+ 0x0B, 0xCD, 0xA3, 0x7B, 0x0C, 0xCB, 0x8F, 0x72,
+ 0x23, 0xCF, 0x9E, 0xD8, 0xBB, 0x3F, 0x32, 0x27,
+ 0x54, 0xB8, 0x60, 0x64, 0x83, 0xAE, 0x22, 0xD1,
+ 0x6A, 0xC9, 0xF8, 0x13, 0xC4, 0xE4, 0xFF, 0x97,
+ 0xD8, 0x92, 0xA3, 0xD1, 0xD4, 0x86, 0xD7, 0xC3,
+ 0xBB, 0x40, 0xA2, 0x45, 0x78, 0xB1, 0xDB, 0x80,
+ 0xC6, 0x8D, 0x0A, 0xF0, 0xC3, 0xC2, 0xE3, 0x48,
+ 0xA1, 0x05, 0xC2, 0x32, 0xC8, 0x6C, 0x50, 0xA8,
+ 0x06, 0x58, 0xBE, 0x6C, 0x7D, 0x22, 0xD6, 0x0D,
+ 0x74, 0x40, 0xCE, 0xD6, 0x64, 0xD6, 0x47, 0xD0,
+ 0xBF, 0xF1, 0x5C, 0x54, 0xF9, 0x06, 0x3F, 0x3D,
+ 0x86, 0xBA, 0xF2, 0x0F, 0x5E, 0x2C, 0x01, 0xCC,
+ 0xD9, 0xC7, 0xB1, 0x4A, 0xB3, 0xD7, 0x26, 0xCC,
+ 0xC3, 0x7A, 0x74, 0x2C, 0xE1, 0x22, 0x65, 0xA0,
+ 0x5B, 0xCA, 0xF4, 0xE1, 0x7D, 0xE1, 0x56, 0xFD,
+ 0x95, 0x10, 0xC6, 0xA1, 0x4A, 0xE8, 0x6B, 0x34,
+ 0x4E, 0x71, 0x60, 0x77, 0x0F, 0x03, 0xDD, 0xFE,
+ 0xC8, 0x59, 0x54, 0x6C, 0xD4, 0x4A, 0x55, 0x24,
+ 0x35, 0x21, 0x60, 0x73, 0xDF, 0x6F, 0xE7, 0x3C,
+ 0xC2, 0xF0, 0xDA, 0xA9, 0xE5, 0x8C, 0xAC, 0xB6,
+ 0xFD, 0x2E, 0xF7, 0xA0, 0x18, 0xA7, 0x55, 0x47,
+ 0xD1, 0xCB, 0x9E, 0xAA, 0x58, 0x54, 0x3B, 0x37,
+ 0x18, 0xB5, 0xC1, 0xBB, 0x41, 0x59, 0xE4, 0x29,
+ 0x44, 0x13, 0x90, 0x6A, 0xF7, 0xD1, 0xB3, 0x71,
+ 0xB6, 0x6E, 0xF6, 0x5D, 0x2E, 0x0E, 0x6C, 0x4C,
+ 0x7B, 0xF7, 0xB6, 0x21, 0xD4, 0xFC, 0x47, 0x8C,
+ 0x9B, 0x0A, 0x90, 0xAC, 0x11, 0x52, 0x86, 0x07,
+ 0x24, 0xDA, 0xA9, 0x49, 0x50, 0xD9, 0xDC, 0xE2,
+ 0x19, 0x87, 0x73, 0x88, 0xC3, 0xE4, 0xED, 0xC9,
+ 0x1C, 0xA8, 0x7E, 0x39, 0x48, 0x91, 0x10, 0xAB,
+ 0xFC, 0x3C, 0x1E, 0xEE, 0x08, 0xA1, 0xB9, 0xB4,
+ 0xF4, 0xA9, 0x8D, 0xD0, 0x84, 0x7C, 0x8E, 0x54,
+ 0xEF, 0x05, 0xC3, 0x2A, 0x0B, 0x8D, 0x3C, 0x71,
+ 0xE7, 0x37, 0x27, 0x16, 0x07, 0xA2, 0x8F, 0x7A,
+ 0x86, 0x05, 0x56, 0xA3, 0xB2, 0x75, 0xC5, 0x2C,
+ 0xD4, 0x52, 0x60, 0x68, 0xA6, 0x6A, 0x48, 0xB6,
+ 0x92, 0x50, 0xEC, 0x22, 0xAD, 0x01, 0x75, 0x57,
+ 0xAF, 0xDF, 0x0F, 0x36, 0x93, 0x59, 0xF9, 0xE3,
+ 0xA1, 0x41, 0x3B, 0x60, 0xB3, 0x13, 0x12, 0x50,
+ 0x4B, 0x18, 0x20, 0xB9, 0x7B, 0x88, 0x27, 0x81,
+ 0xB1, 0xDA, 0xCA, 0x6F, 0x63, 0x95, 0x40, 0xA1,
+ 0x42, 0xE2, 0x14, 0xB8, 0x2B, 0x10, 0xB9, 0xDA,
+ 0xE7, 0x30, 0x91, 0x13, 0x52, 0xC9, 0xA3, 0x5C,
+ 0xD7, 0xBB, 0x39, 0x8F, 0x9A, 0xB8, 0xC5, 0xAF,
+ 0xC6, 0x3E, 0x65, 0x90, 0x91, 0x8C, 0x9F, 0xDD,
+ 0x84, 0xFB, 0xAD, 0x72, 0x4D, 0xD1, 0x42, 0xAD,
+ 0x0A, 0x1B, 0x3A, 0xC6, 0x06, 0x03, 0x19, 0xCB,
+ 0x31, 0x8C, 0x18, 0xD4, 0xEE, 0x90, 0x94, 0x3C,
+ 0x44, 0xDC, 0xFB, 0x78, 0x5C, 0xB5, 0xE3, 0x2F,
+ 0x89, 0x74, 0x0E, 0x28, 0x9C, 0xE4, 0xB4, 0xD2,
+ 0xE3, 0x5A, 0x32, 0xF9, 0xC0, 0x81, 0x6A, 0x38,
+ 0xC2, 0xCF, 0xD8, 0xD9, 0x3E, 0xAD, 0xF9, 0xB1,
+ 0xA2, 0x55, 0x64, 0x1E, 0xEC, 0xF5, 0x0D, 0xB1,
+ 0x8D, 0x07, 0x4E, 0xE5, 0x59, 0xE1, 0xE7, 0xFE,
+ 0x4C, 0xCF, 0x11, 0xF8, 0x27, 0xC2, 0x29, 0xE2,
+ 0xAF, 0x74, 0xAA, 0x53, 0x81, 0xD2, 0xFD, 0x5A,
+ 0xF1, 0xEB, 0x96, 0x2C, 0x3E, 0x9B, 0xC2, 0x74,
+ 0xFB, 0x65, 0x08, 0xA2, 0x63, 0xD3, 0xC5, 0x51,
+ 0xAF, 0x19, 0x8B, 0x34, 0x8B, 0x7D, 0xB7, 0x97,
+ 0x55, 0x97, 0x6D, 0x01, 0x5D, 0x98, 0xAA, 0x67,
+ 0x11, 0xBD, 0xC2, 0x99, 0x2F, 0xB4, 0xCA, 0x04,
+ 0x36, 0xF0, 0xB1, 0xA0, 0xBD, 0xA3, 0x4F, 0x4F,
+ 0xB6, 0x7B, 0xF5, 0x1E, 0x38, 0x87, 0xC2, 0x38,
+ 0x99, 0x5C, 0xE9, 0x2D, 0xDF, 0xAF, 0x5A, 0xF3,
+ 0x7A, 0x17, 0x70, 0x35, 0xEC, 0xD5, 0x19, 0xF7,
+ 0xB0, 0x21, 0x1E, 0x77, 0x30, 0x23, 0x54, 0x26,
+ 0x61, 0x4E, 0xB9, 0x02, 0xDE, 0xF4, 0x86, 0x93,
+ 0x47, 0x28, 0x43, 0x47, 0xB0, 0x56, 0xDC, 0x84,
+ 0x3E, 0x6A, 0x6B, 0xEA, 0x4D, 0x63, 0xFE, 0x56,
+ 0x5E, 0xF7, 0x6B, 0x1E, 0x5B, 0x63, 0xF1, 0x07,
+ 0x20, 0x2E, 0x9B, 0xEE, 0xDC, 0x70, 0x5E, 0x36,
+ 0x59, 0xE3, 0x3D, 0xA6, 0x0E, 0x50, 0x71, 0x06,
+ 0xDD, 0x8B, 0x3C, 0xF7, 0xEC, 0x3C, 0x7A, 0x08,
+ 0x8D, 0x4E, 0x6A, 0x08, 0xB0, 0xEE, 0x50, 0xE0,
+ 0xF9, 0x0E, 0x40, 0xC0, 0x11, 0xBF, 0x8A, 0x17,
+ 0x63, 0x9D, 0x59, 0x14, 0x0E, 0x25, 0x94, 0x09,
+ 0xE6, 0x34, 0xEC, 0x0F, 0xE4, 0x7C, 0x59, 0xCD,
+ 0x99, 0x85, 0x8E, 0x0F, 0xA1, 0x9E, 0x84, 0xBC,
+ 0x13, 0x20, 0x5F, 0x56, 0x26, 0x10, 0x1A, 0x77,
+ 0x77, 0x7B, 0x4B, 0x68, 0x13, 0x8A, 0x2C, 0xA5,
+ 0x01, 0xBF, 0xAD, 0xF2, 0x2C, 0xD9, 0x4B, 0x24,
+ 0x4C, 0xF5, 0x96, 0x4E, 0xD8, 0xE8, 0x98, 0xA8,
+ 0x9C, 0x63, 0x2F, 0xC3, 0x26, 0xC7, 0x74, 0x83,
+ 0x05, 0xED, 0x67, 0x02, 0x85, 0xAD, 0x1D, 0x0E,
+ 0xA9, 0xD6, 0xE1, 0xC7, 0x39, 0xA0, 0x6E, 0x72,
+ 0xCE, 0x56, 0x6C, 0xB8, 0x4A, 0xDE, 0x11, 0xA2,
+ 0xBF, 0xC1, 0x84, 0x98, 0x8F, 0xCA, 0x79, 0x75,
+ 0xC4, 0x9F, 0x45, 0x16, 0xBC, 0xB1, 0xF4, 0x03,
+ 0x76, 0x6E, 0xD5, 0x46, 0x60, 0xD7, 0x1D, 0xF6,
+ 0xD9, 0xBF, 0xF8, 0x71, 0xEB, 0x09, 0x33, 0x56,
+ 0xE6, 0xEC, 0x72, 0xC8, 0xB3, 0x47, 0x14, 0x2C,
+ 0x24, 0xA1, 0x1F, 0x16, 0xBE, 0x77, 0xFA, 0x9F,
+ 0x6B, 0x83, 0x05, 0x03, 0x4D, 0x6F, 0xC9, 0x76,
+ 0x69, 0x8D, 0xD7, 0x91, 0x26, 0x2B, 0x1C, 0x84,
+ 0xF2, 0x2B, 0x23, 0xA6, 0xFF, 0x7B, 0xEE, 0xCC,
+ 0x4E, 0x03, 0x8A, 0x80, 0x9E, 0x88, 0x96, 0xC3,
+ 0x7A, 0x3E, 0x1B, 0xAC, 0x40, 0x84, 0xD1, 0x64,
+ 0x89, 0x5F, 0xE3, 0x41, 0x89, 0x77, 0x4B, 0x28,
+ 0x83, 0xCA, 0x78, 0x4F, 0x36, 0xC8, 0xCE, 0x53,
+ 0x75, 0x39, 0x3A, 0x58, 0x92, 0x91, 0xF5, 0xA7,
+ 0x6A, 0xD0, 0xB2, 0xBB, 0xFC, 0x8E, 0x3B, 0xFC,
+ 0x83, 0x67, 0x42, 0xAA, 0x18, 0x51, 0x48, 0xD4,
+ 0xC4, 0x85, 0x60, 0xA4, 0x2D, 0xD4, 0x4E, 0xA1,
+ 0xF0, 0xB6, 0x41, 0x98, 0x6F, 0x84, 0xDE, 0x0C,
+ 0x03, 0x8D, 0x83, 0x4A, 0x71, 0xBB, 0x32, 0x8B,
+ 0x83, 0xF7, 0xD8, 0x08, 0x05, 0xA4, 0x48, 0xFE,
+ 0xCA, 0xBB, 0x21, 0xA8, 0xBA, 0x2A, 0xD2, 0x65,
+ 0x4E, 0xEF, 0xA1, 0x8F, 0x01, 0x09, 0xC6, 0x8C,
+ 0xE5, 0x35, 0x32, 0xBB, 0x19, 0x15, 0xAB, 0x7A,
+ 0xFD, 0x29, 0x76, 0xF9, 0xD1, 0xC5, 0x3E, 0xFD,
+ 0x7A, 0x74, 0xBC, 0x41, 0x4F, 0x2C, 0x79, 0x6F,
+ 0x45, 0x4E, 0xFD, 0x88, 0x49, 0x9A, 0x90, 0x6F,
+ 0x65, 0x00, 0xC8, 0x08, 0xB8, 0x3B, 0x40, 0x06,
+ 0x9A, 0x98, 0x5B, 0x6A, 0xD3, 0x5E, 0x32, 0x0E,
+ 0xB0, 0x21, 0xE6, 0x2D, 0xEF, 0x7B, 0x99, 0x1B,
+ 0xAF, 0x96, 0x20, 0x12, 0xE9, 0x31, 0xDA, 0x20,
+ 0xB0, 0x27, 0x99, 0xC7, 0x14, 0x56, 0x3A, 0x08,
+ 0x46, 0xA4, 0xB2, 0x0C, 0x6C, 0x1F, 0x1B, 0xAF,
+ 0x9F, 0x90, 0x03, 0xBB, 0x03, 0xE0, 0x20, 0xE9,
+ 0x45, 0x33, 0xA0, 0x3E, 0x01, 0x2C, 0xA7, 0x4A,
+ 0xCC, 0xC6, 0xF5, 0xA3, 0x35, 0x0D, 0xE1, 0x5E,
+ 0x90, 0x0B, 0xAC, 0x9A, 0x05, 0x79, 0xB2, 0x90,
+ 0x39, 0xEE, 0xC8, 0x20, 0x55, 0xB3, 0x71, 0x46,
+ 0xAC, 0x92, 0x42, 0x85, 0xD5, 0x12, 0x03, 0x8D,
+ 0xBC, 0x82, 0xE7, 0x5A, 0x6E, 0x2E, 0x2C, 0xC0,
+ 0xB6, 0x44, 0xF8, 0xBB, 0x5F, 0x7A, 0x42, 0x86,
+ 0x28, 0xF0, 0x9B, 0xF9, 0x17, 0xDD, 0x35, 0x2F,
+ 0x56, 0xE4, 0x63, 0xFF, 0xEC, 0x87, 0xC5, 0x53,
+ 0xBF, 0x64, 0xB2, 0xDA, 0xDE, 0xC1, 0x6C, 0x85,
+ 0x82, 0x51, 0x40, 0x41, 0xC9, 0x7A, 0x0A, 0xB8,
+ 0xB2, 0x75, 0x03, 0x88, 0x22, 0x6D, 0x76, 0x6E,
+ 0x2D, 0x2B, 0x73, 0xCB, 0x48, 0xC4, 0xED, 0xE0,
+ 0x96, 0xFA, 0x36, 0x9F, 0x99, 0xC7, 0x97, 0xDE,
+ 0x6D, 0xFC, 0x69, 0x86, 0x57, 0x5F, 0xB9, 0x93,
+ 0x78, 0x5C, 0x07, 0x64, 0x61, 0xD0, 0x41, 0x14,
+ 0x32, 0xED, 0xC0, 0xE4, 0xAC, 0xFC, 0x10, 0x0D,
+ 0xAF, 0xEE, 0xDA, 0xB3, 0x6D, 0xB8, 0x7C, 0x10,
+ 0xD5, 0x3B, 0x88, 0xE1, 0x15, 0xE1, 0xA4, 0x27,
+ 0xFE, 0xEE, 0x0A, 0xC8, 0x95, 0xCF, 0xCA, 0x99,
+ 0x98, 0x1D, 0xF3, 0x0E, 0xB8, 0x03, 0xD5, 0x51,
+ 0x4B, 0x56, 0xB9, 0x07, 0x85, 0x58, 0x17, 0x51,
+ 0x16, 0xC4, 0x86, 0xBB, 0xD3, 0x50, 0x01, 0x0E,
+ 0x7B, 0x9C, 0xEF, 0xF0, 0x28, 0x4A, 0xD7, 0x3D,
+ 0x1E, 0x3A, 0xBB, 0xCF, 0x2C, 0x90, 0x12, 0x2A,
+ 0xB3, 0x90, 0x72, 0xE3, 0x93, 0x81, 0xE8, 0xA4,
+ 0xEF, 0x8F, 0xD9, 0x45, 0x4F, 0xB1, 0xD0, 0x21,
+ 0xDA, 0x20, 0x5C, 0xE9, 0x41, 0x41, 0x4E, 0x48,
+ 0x95, 0x4D, 0x5A, 0xB3, 0xE5, 0x8B, 0xFC, 0xDE,
+ 0xB9, 0x7B, 0x93, 0xBE, 0xA2, 0x74, 0x1B, 0xFA,
+ 0xED, 0xCC, 0x0E, 0xDD, 0x96, 0x13, 0x2C, 0xAC,
+ 0xDE, 0x2B, 0x2D, 0x8A, 0x30, 0x5A, 0xB8, 0x4B,
+ 0x08, 0x2C, 0x74, 0xF7, 0xB4, 0x45, 0xD3, 0xA5,
+ 0x62, 0x87, 0xCA, 0x16, 0xEB, 0x49, 0x46, 0x0C,
+ 0x87, 0x7F, 0x11, 0x1D, 0x22, 0x66, 0x0A, 0x38,
+ 0x90, 0x3A, 0x31, 0x38, 0x73, 0xB2, 0xD5, 0x5E,
+ 0x06, 0xC4, 0x1E, 0x3D, 0xB7, 0x52, 0xB8, 0xE5,
+ 0xC0, 0xF9, 0x72, 0xBC, 0x7A, 0x8A, 0xD3, 0xB4,
+ 0x1D, 0xA9, 0x93, 0x3B, 0x7E, 0xFF, 0x8E, 0xA0,
+ 0x96, 0x52, 0xE9, 0x9E, 0x60, 0x4C, 0x02, 0x90,
+ 0xE5, 0x46, 0x92, 0xB3, 0xB8, 0x24, 0xE9, 0xD0,
+ 0xCE, 0xD3, 0x0B, 0xCD, 0x8B, 0xE8, 0x72, 0xEA,
+ 0x6E, 0xBF, 0x2B, 0x99, 0x6F, 0xC0, 0x65, 0xE8,
+ 0x92, 0x30, 0x03, 0x28, 0xA9, 0xB0, 0xA7, 0x03,
+ 0x92, 0x2C, 0xC8, 0x38, 0x8C, 0x38, 0x56, 0xEE,
+ 0xDB, 0x39, 0xBD, 0x7E, 0xE9, 0x8D, 0xDB, 0xC1,
+ 0xD5, 0x71, 0xC7, 0x84, 0xF3, 0xB2, 0x23, 0x22,
+ 0xB5, 0x98, 0xB3, 0x36, 0xF1, 0xC4, 0xB1, 0xA4,
+ 0xF2, 0x84, 0x24, 0xE5, 0x97, 0x48, 0x34, 0x43,
+ 0xEF, 0xD9, 0xF4, 0x10, 0xE4, 0x13, 0xEE, 0x6C,
+ 0xE7, 0x5D, 0x9B, 0xBA, 0x35, 0xF5, 0x7D, 0xE5,
+ 0xBF, 0x8A, 0xCC, 0x3D, 0x28, 0xCF, 0xE8, 0x90,
+ 0xE3, 0xCF, 0x01, 0x69, 0xD7, 0xC0, 0xD2, 0x2C,
+ 0xC2, 0x9B, 0x89, 0xF2, 0xA9, 0x83, 0xA2, 0xA9,
+ 0x12, 0xAA, 0x56, 0xD8, 0xCB, 0xA5, 0x8B, 0x0A,
+ 0x03, 0xC1, 0xE1, 0x8E, 0x02, 0x36, 0x3D, 0x8F,
+ 0x58, 0x4D, 0xEB, 0x93, 0x91, 0xC6, 0xE7, 0x22,
+ 0xCE, 0xA8, 0x02, 0xD2, 0x82, 0x0D, 0x43, 0x4D,
+ 0x4E, 0x11, 0xF8, 0x7B, 0x45, 0xD0, 0x23, 0xF7,
+ 0x14, 0x35, 0x16, 0xA4, 0x0B, 0xAD, 0xFE, 0xE2,
+ 0x2B, 0xFD, 0xF7, 0x17, 0xA9, 0x93, 0x77, 0x82,
+ 0x45, 0x6E, 0x51, 0x1F, 0x5C, 0x2C, 0x5F, 0xFF,
+ 0x1A, 0xA3, 0x0E, 0x29, 0xA5, 0x1D, 0xFD, 0x0E,
+ 0xDD, 0x14, 0xF6, 0x69, 0x20, 0x15, 0xFD, 0xBB,
+ 0xF8, 0xAF, 0x3D, 0xF3, 0xCC, 0xB8, 0x7E, 0x64,
+ 0xED, 0x99, 0xF3, 0x1D, 0xFC, 0x96, 0xA2, 0x0A,
+ 0x9C, 0xC2, 0x9B, 0xD7, 0x03, 0xA6, 0x79, 0x3B,
+ 0x16, 0x0C, 0x6C, 0x5C, 0x2B, 0x61, 0x0E, 0x48,
+ 0x96, 0x5C, 0x46, 0x7F, 0xC3, 0xCD, 0x3C, 0x10,
+ 0x30, 0x8F, 0xC4, 0xB5, 0x92, 0x46, 0x1C, 0xDF,
+ 0x10, 0xEE, 0x43, 0x27, 0x42, 0x70, 0xD2, 0xC4,
+ 0x5E, 0x77, 0x78, 0x0E, 0x0E, 0xC3, 0x8B, 0x72,
+ 0xA0, 0xFC, 0x4C, 0x0F, 0x5D, 0xBE, 0xBE, 0x07,
+ 0x5B, 0x53, 0x38, 0xC8, 0x96, 0x82, 0x2D, 0x2D,
+ 0x8E, 0xA8, 0x6C, 0x68, 0x34, 0x42, 0x31, 0x90,
+ 0xD6, 0x4D, 0x29, 0xA9, 0x90, 0x95, 0x19, 0xD6,
+ 0x8F, 0x2F, 0xF4, 0xD3, 0x71, 0x21, 0xB7, 0x7D,
+ 0x51, 0xA6, 0x15, 0xE5, 0xDA, 0x08, 0x6A, 0x23,
+ 0xDE, 0x6C, 0xBA, 0xCF, 0x84, 0xF1, 0x47, 0x25,
+ 0x4A, 0xF1, 0x2F, 0x24, 0xED, 0x3B, 0xED, 0xF0,
+ 0xA7, 0x48, 0xAE, 0x58, 0x7F, 0x0B, 0x3B, 0x78,
+ 0xCE, 0x94, 0x32, 0x82, 0x63, 0x22, 0x67, 0xAA,
+ 0x45, 0x37, 0xCC, 0x43, 0xD5, 0x10, 0x59, 0x5B,
+ 0x09, 0xC6, 0x1C, 0x32, 0xCD, 0x19, 0xA2, 0x3C,
+ 0x2B, 0x84, 0x03, 0xD5, 0x97, 0x20, 0xE7, 0xFB,
+ 0x2D, 0x0A, 0x3C, 0x5C, 0xFD, 0x39, 0x9C, 0xDE,
+ 0x02, 0x3D, 0xC7, 0xDD, 0x51, 0xDE, 0x99, 0xB3,
+ 0x65, 0x00, 0x60, 0xCF, 0xAE, 0xCD, 0xE2, 0x83,
+ 0xD5, 0x36, 0x2C, 0x89, 0x28, 0x6D, 0xC3, 0x6A,
+ 0x80, 0xCD, 0x1A, 0xC3, 0x75, 0x11, 0x7E, 0x65,
+ 0x2A, 0x44, 0x9D, 0xB5, 0x12, 0x2A, 0x78, 0xD0,
+ 0x4D, 0xF8, 0x5E, 0xBF, 0xEC, 0x6B, 0x60, 0xD2,
+ 0x89, 0x92, 0x5E, 0x17, 0xDA, 0x33, 0x83, 0xDB,
+ 0xED, 0xF4, 0x5E, 0x82, 0xE9, 0x04, 0xD7, 0xE0,
+ 0xA4, 0x1B, 0xFE, 0x32, 0x93, 0x05, 0x2C, 0xCF,
+ 0xA2, 0xAE, 0x83, 0xCA, 0x2F, 0x5E, 0x47, 0x1C,
+ 0x85, 0x0D, 0x01, 0xE5, 0x44, 0x3D, 0xE4, 0x58,
+ 0x8E, 0xC0, 0x46, 0x05, 0x95, 0xBE, 0x59, 0xED,
+ 0x0F, 0x7B, 0xA1, 0xF7, 0xDB, 0x2C, 0x79, 0x86,
+ 0xE9, 0x54, 0x98, 0xA6, 0x2A, 0xD0, 0xFE, 0xC9,
+ 0x59, 0x1D, 0x31, 0xC6, 0x27, 0x83, 0x2C, 0x12,
+ 0x9C, 0xE1, 0x43, 0x3C, 0xEC, 0x65, 0x3B, 0xEF,
+ 0xFD, 0x92, 0xBC, 0x0E, 0x38, 0xBA, 0x56, 0x1C,
+ 0xC0, 0x81, 0x9E, 0xBE, 0x76, 0x59, 0x88, 0xA4,
+ 0x0C, 0x6B, 0xD9, 0x7C, 0xD6, 0x8C, 0x32, 0xCD,
+ 0x3F, 0xB6, 0xEF, 0xBF, 0xA6, 0xC7, 0xC9, 0xD3,
+ 0x02, 0xB0, 0x3B, 0xFF, 0xFC, 0x4A, 0x97, 0x14,
+ 0xFF, 0xF2, 0x48, 0xFE, 0x1B, 0xCE, 0x7D, 0x24,
+ 0xA1, 0xD6, 0x03, 0xB0, 0x2F, 0xAA, 0xF7, 0x71,
+ 0xC9, 0x0E, 0xCB, 0x57, 0xBA, 0xEF, 0xB5, 0x65,
+ 0xE1, 0x44, 0xE4, 0x6A, 0xEB, 0xE8, 0x2B, 0x8F,
+ 0x06, 0x23, 0x7A, 0xA9, 0x70, 0xAE, 0x48, 0x65,
+ 0x94, 0xEE, 0xA5, 0x94, 0x78, 0x7D, 0x09, 0xF8,
+ 0xB5, 0x4D, 0x64, 0x67, 0x10, 0x16, 0xA2, 0xFC,
+ 0x49, 0x93, 0x76, 0x71, 0xED, 0x56, 0x25, 0xB5,
+ 0x87, 0xE8, 0x84, 0x16, 0x55, 0xE1, 0x1E, 0x34,
+ 0xE3, 0xB2, 0x49, 0x8F, 0xDC, 0xDA, 0xC3, 0x17,
+ 0x82, 0x0E, 0x19, 0xD7, 0xE0, 0x09, 0xD7, 0xD9,
+ 0x59, 0x6B, 0x55, 0x60, 0x1C, 0x1B, 0x02, 0xE8,
+ 0xD1, 0x90, 0xF6, 0x3E, 0x94, 0x4A, 0x12, 0x0C,
+ 0xBB, 0x69, 0xFD, 0x7C, 0xA0, 0xDD, 0x5F, 0x93,
+ 0x9F, 0xFE, 0x2E, 0x79, 0xDB, 0xBE, 0x6F, 0x85,
+ 0xAD, 0x9B, 0xDE, 0xAA, 0x10, 0xCA, 0xDB, 0xF2,
+ 0xF9, 0xD0, 0x54, 0x15, 0x00, 0xF0, 0x6F, 0x86,
+ 0x16, 0xF6, 0xA8, 0xA4, 0x08, 0x7B, 0x50, 0xF1,
+ 0x35, 0xAC, 0xB6, 0xBB, 0x8B, 0xA0, 0x86, 0x3B,
+ 0x3B, 0xDA, 0x9F, 0x89, 0xB5, 0x9C, 0x44, 0x41,
+ 0x6A, 0xFD, 0x8A, 0x79, 0xA0, 0xFB, 0x7D, 0x1B,
+ 0xE8, 0xC4, 0xA7, 0x3F, 0x66, 0x97, 0xA9, 0xF8,
+ 0xEA, 0x0C, 0x30, 0x81, 0x63, 0xE4, 0xE3, 0x84,
+ 0x62, 0xC5, 0x19, 0xFB, 0x00, 0xD6, 0x72, 0xE6,
+ 0xC9, 0x6C, 0xDB, 0xEB, 0xF3, 0x6F, 0xDB, 0xE7,
+ 0x00, 0x53, 0xCE, 0x1D, 0xE5, 0xF5, 0x53, 0x18,
+ 0xE5, 0xAA, 0xDA, 0x90, 0x7B, 0xCB, 0x2B, 0x74,
+ 0xED, 0x70, 0xFE, 0x90, 0xA8, 0xC8, 0x80, 0x2B,
+ 0x93, 0x08, 0xDB, 0x6A, 0x0F, 0x3D, 0xA1, 0xFA,
+ 0xB6, 0x63, 0x18, 0xF8, 0x43, 0x68, 0x00, 0xD0,
+ 0x7A, 0x97, 0xCD, 0x5B, 0xB2, 0x84, 0x90, 0x06,
+ 0xB9, 0x81, 0xC5, 0x81, 0x05, 0x55, 0x8C, 0xC4,
+ 0x03, 0x89, 0xF5, 0x63, 0x87, 0x39, 0xEC, 0xD6,
+ 0x89, 0x01, 0xE7, 0x1C, 0x4C, 0xDF, 0x5D, 0x65,
+ 0xFE, 0x4B, 0x91, 0x04, 0x5B, 0x0E, 0x03, 0x38,
+ 0x2F, 0x21, 0xA8, 0x36, 0x58, 0x93, 0xAD, 0x1F,
+ 0xEB, 0xC3, 0x91, 0x90, 0x9B, 0x95, 0xCD, 0x53,
+ 0x81, 0xAA, 0xA9, 0x48, 0x4D, 0x2B, 0x22, 0xC7,
+ 0xBE, 0x1B, 0x38, 0x21, 0xA1, 0xFE, 0x23, 0xB4,
+ 0xAC, 0x66, 0x92, 0x9E, 0xF2, 0x27, 0xDC, 0x23,
+ 0x70, 0x6E, 0xBA, 0xF9, 0xED, 0x3B, 0xCE, 0x63,
+ 0xAD, 0x68, 0xF2, 0x80, 0xFA, 0x1B, 0x14, 0xB5,
+ 0xB4, 0x07, 0xE3, 0x5A, 0x81, 0x74, 0xE1, 0xF2,
+ },
+ .len = 3120
+ },
+ .auth_tag = {
+ .data = {
+ 0xEA, 0xE9, 0x10, 0xB6, 0xB7, 0xAB, 0xEA, 0x90,
+ 0x8A, 0xD5, 0x63, 0x88, 0xDB, 0x2B, 0x8F, 0x23,
+ },
+ .len = 16
+ }
+};
+
+#endif /* TEST_CRYPTODEV_GCM_TEST_VECTORS_H_ */
diff --git a/app/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 9f095cf3..3214f9a6 100644
--- a/app/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -1,7 +1,7 @@
/*
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -97,7 +97,8 @@ hmac_md5_test_vector = {
0x50, 0xE8, 0xDE, 0xC5, 0xC1, 0x76, 0xAC, 0xAE,
0x15, 0x4A, 0xF1, 0x7F, 0x7E, 0x04, 0x42, 0x9B
},
- .len = 16
+ .len = 16,
+ .truncated_len = 12
}
};
@@ -139,7 +140,8 @@ hmac_sha1_test_vector = {
0x7E, 0x2E, 0x8F, 0xFC, 0x48, 0x39, 0x46, 0x17,
0x3F, 0x91, 0x64, 0x59
},
- .len = 20
+ .len = 20,
+ .truncated_len = 12
}
};
@@ -184,7 +186,8 @@ hmac_sha224_test_vector = {
0xF1, 0x8A, 0x63, 0xBB, 0x5D, 0x1D, 0xE3, 0x9F,
0x92, 0xF6, 0xAA, 0x19
},
- .len = 28
+ .len = 28,
+ .truncated_len = 14
}
};
@@ -229,7 +232,8 @@ hmac_sha256_test_vector = {
0x06, 0x4D, 0x64, 0x09, 0x0A, 0xCC, 0x02, 0x77,
0x71, 0x83, 0x48, 0x71, 0x07, 0x02, 0x25, 0x17
},
- .len = 32
+ .len = 32,
+ .truncated_len = 16
}
};
@@ -280,7 +284,8 @@ hmac_sha384_test_vector = {
0x10, 0x90, 0x0A, 0xE3, 0xF0, 0x59, 0xDD, 0xC0,
0x6F, 0xE6, 0x8C, 0x84, 0xD5, 0x03, 0xF8, 0x9E
},
- .len = 48
+ .len = 48,
+ .truncated_len = 24
}
};
@@ -337,7 +342,8 @@ hmac_sha512_test_vector = {
0x97, 0x37, 0x0F, 0xBE, 0xC2, 0x45, 0xA0, 0x87,
0xAF, 0x24, 0x27, 0x0C, 0x78, 0xBA, 0xBE, 0x20
},
- .len = 64
+ .len = 64,
+ .truncated_len = 32
}
};
@@ -358,13 +364,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-MD5 Digest",
.test_data = &hmac_md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-MD5 Digest Verify",
.test_data = &hmac_md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "SHA1 Digest",
@@ -382,13 +392,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA1 Digest",
.test_data = &hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-SHA1 Digest Verify",
.test_data = &hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "SHA224 Digest",
@@ -406,13 +420,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA224 Digest",
.test_data = &hmac_sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-SHA224 Digest Verify",
.test_data = &hmac_sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "SHA256 Digest",
@@ -430,13 +448,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA256 Digest",
.test_data = &hmac_sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-SHA256 Digest Verify",
.test_data = &hmac_sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "SHA384 Digest",
@@ -454,13 +476,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA384 Digest",
.test_data = &hmac_sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-SHA384 Digest Verify",
.test_data = &hmac_sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "SHA512 Digest",
@@ -478,13 +504,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA512 Digest",
.test_data = &hmac_sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
{
.test_descr = "HMAC-SHA512 Digest Verify",
.test_data = &hmac_sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER
},
};
diff --git a/app/test/test_cryptodev_hmac_test_vectors.h b/test/test/test_cryptodev_hmac_test_vectors.h
index d30215fd..d30215fd 100644
--- a/app/test/test_cryptodev_hmac_test_vectors.h
+++ b/test/test/test_cryptodev_hmac_test_vectors.h
diff --git a/app/test/test_cryptodev_kasumi_hash_test_vectors.h b/test/test/test_cryptodev_kasumi_hash_test_vectors.h
index 69742faa..69742faa 100644
--- a/app/test/test_cryptodev_kasumi_hash_test_vectors.h
+++ b/test/test/test_cryptodev_kasumi_hash_test_vectors.h
diff --git a/app/test/test_cryptodev_kasumi_test_vectors.h b/test/test/test_cryptodev_kasumi_test_vectors.h
index ef1dc6f3..ef1dc6f3 100644
--- a/app/test/test_cryptodev_kasumi_test_vectors.h
+++ b/test/test/test_cryptodev_kasumi_test_vectors.h
diff --git a/app/test/test_cryptodev_perf.c b/test/test/test_cryptodev_perf.c
index 89a67952..d60028db 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/test/test/test_cryptodev_perf.c
@@ -157,6 +157,12 @@ test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
enum rte_crypto_cipher_algorithm cipher_algo,
unsigned int cipher_key_len,
enum rte_crypto_auth_algorithm auth_algo);
+static struct rte_cryptodev_sym_session *
+test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
+ enum rte_crypto_cipher_algorithm cipher_algo,
+ unsigned int cipher_key_len,
+ enum rte_crypto_auth_algorithm auth_algo);
+
static struct rte_mbuf *
test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz);
static inline struct rte_crypto_op *
@@ -166,15 +172,15 @@ test_perf_set_crypto_op_snow3g(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len);
+ unsigned int digest_len, enum chain_mode chain);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len);
+ unsigned int digest_len, enum chain_mode chain __rte_unused);
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len);
+ unsigned int digest_len, enum chain_mode chain __rte_unused);
static uint32_t get_auth_digest_length(enum rte_crypto_auth_algorithm algo);
@@ -201,6 +207,8 @@ static const char *pmd_name(enum rte_cryptodev_type pmd)
return RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
case RTE_CRYPTODEV_SNOW3G_PMD:
return RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD);
+ case RTE_CRYPTODEV_DPAA2_SEC_PMD:
+ return RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD);
default:
return "";
}
@@ -315,7 +323,7 @@ testsuite_setup(void)
return TEST_FAILED;
}
- /* Create 2 AESNI MB devices if required */
+ /* Create an AESNI MB device if required */
if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_MB_PMD) {
#ifndef RTE_LIBRTE_PMD_AESNI_MB
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_MB must be"
@@ -323,19 +331,17 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_MB_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance %u of pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD));
}
}
- /* Create 2 AESNI GCM devices if required */
+ /* Create an AESNI GCM device if required */
if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_AESNI_GCM_PMD) {
#ifndef RTE_LIBRTE_PMD_AESNI_GCM
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_AESNI_GCM must be"
@@ -343,19 +349,17 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_AESNI_GCM_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance %u of pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD));
}
}
- /* Create 2 SNOW3G devices if required */
+ /* Create a SNOW3G device if required */
if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_SNOW3G_PMD) {
#ifndef RTE_LIBRTE_PMD_SNOW3G
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_SNOW3G must be"
@@ -363,19 +367,17 @@ testsuite_setup(void)
return TEST_FAILED;
#endif
nb_devs = rte_cryptodev_count_devtype(RTE_CRYPTODEV_SNOW3G_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL);
-
- TEST_ASSERT(ret == 0,
- "Failed to create instance %u of pmd : %s",
- i, RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL);
+
+ TEST_ASSERT(ret == 0,
+ "Failed to create instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD));
}
}
- /* Create 2 OPENSSL devices if required */
+ /* Create an OPENSSL device if required */
if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_OPENSSL_PMD) {
#ifndef RTE_LIBRTE_PMD_OPENSSL
RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_OPENSSL must be"
@@ -384,16 +386,34 @@ testsuite_setup(void)
#endif
nb_devs = rte_cryptodev_count_devtype(
RTE_CRYPTODEV_OPENSSL_PMD);
- if (nb_devs < 2) {
- for (i = nb_devs; i < 2; i++) {
- ret = rte_eal_vdev_init(
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
- NULL);
-
- TEST_ASSERT(ret == 0, "Failed to create "
- "instance %u of pmd : %s", i,
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
- }
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ }
+ }
+
+ /* Create an ARMv8 device if required */
+ if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_ARMV8_PMD) {
+#ifndef RTE_LIBRTE_PMD_ARMV8_CRYPTO
+ RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO must be"
+ " enabled in config file to run this testsuite.\n");
+ return TEST_FAILED;
+#endif
+ nb_devs = rte_cryptodev_count_devtype(
+ RTE_CRYPTODEV_ARMV8_PMD);
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_ARMV8_PMD));
}
}
@@ -2286,7 +2306,8 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
- unsigned int, unsigned int);
+ unsigned int, unsigned int,
+ enum chain_mode);
unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
@@ -2324,14 +2345,14 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
break;
case RTE_CRYPTO_CIPHER_AES_GCM:
test_perf_set_crypto_op =
- test_perf_set_crypto_op_aes_gcm;
+ test_perf_set_crypto_op_aes_gcm;
break;
default:
return TEST_FAILED;
}
op = test_perf_set_crypto_op(op, m, sess, pparams->buf_size,
- digest_length);
+ digest_length, pparams->chain);
TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
c_ops[i] = op;
@@ -2424,6 +2445,139 @@ test_perf_openssl_optimise_cyclecount(struct perf_test_params *pparams)
return TEST_SUCCESS;
}
+static int
+test_perf_armv8_optimise_cyclecount(struct perf_test_params *pparams)
+{
+ uint32_t num_to_submit = pparams->total_operations;
+ struct rte_crypto_op *c_ops[num_to_submit];
+ struct rte_crypto_op *proc_ops[num_to_submit];
+ uint64_t failed_polls, retries, start_cycles, end_cycles,
+ total_cycles = 0;
+ uint32_t burst_sent = 0, burst_received = 0;
+ uint32_t i, burst_size, num_sent, num_ops_received;
+ uint32_t nb_ops;
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_armv8_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate Crypto op data structure(s)*/
+ for (i = 0; i < num_to_submit ; i++) {
+ struct rte_mbuf *m = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate tx_buf");
+
+ struct rte_crypto_op *op =
+ rte_crypto_op_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op, "Failed to allocate op");
+
+ op = test_perf_set_crypto_op_aes(op, m, sess, pparams->buf_size,
+ digest_length, pparams->chain);
+ TEST_ASSERT_NOT_NULL(op, "Failed to attach op to session");
+
+ c_ops[i] = op;
+ }
+
+ printf("\nOn %s dev%u qp%u, %s, cipher algo:%s, cipher key length:%u, "
+ "auth_algo:%s, Packet Size %u bytes",
+ pmd_name(gbl_cryptodev_perftest_devtype),
+ ts_params->dev_id, 0,
+ chain_mode_name(pparams->chain),
+ cipher_algo_name(pparams->cipher_algo),
+ pparams->cipher_key_length,
+ auth_algo_name(pparams->auth_algo),
+ pparams->buf_size);
+ printf("\nOps Tx\tOps Rx\tOps/burst ");
+ printf("Retries "
+ "EmptyPolls\tIACycles/CyOp\tIACycles/Burst\tIACycles/Byte");
+
+ for (i = 2; i <= 128 ; i *= 2) {
+ num_sent = 0;
+ num_ops_received = 0;
+ retries = 0;
+ failed_polls = 0;
+ burst_size = i;
+ total_cycles = 0;
+ while (num_sent < num_to_submit) {
+ if ((num_to_submit - num_sent) < burst_size)
+ nb_ops = num_to_submit - num_sent;
+ else
+ nb_ops = burst_size;
+
+ start_cycles = rte_rdtsc();
+ burst_sent = rte_cryptodev_enqueue_burst(
+ ts_params->dev_id,
+ 0, &c_ops[num_sent],
+ nb_ops);
+ end_cycles = rte_rdtsc();
+
+ if (burst_sent == 0)
+ retries++;
+ num_sent += burst_sent;
+ total_cycles += (end_cycles - start_cycles);
+
+ start_cycles = rte_rdtsc();
+ burst_received = rte_cryptodev_dequeue_burst(
+ ts_params->dev_id, 0, proc_ops,
+ burst_size);
+ end_cycles = rte_rdtsc();
+ if (burst_received < burst_sent)
+ failed_polls++;
+ num_ops_received += burst_received;
+
+ total_cycles += end_cycles - start_cycles;
+ }
+
+ while (num_ops_received != num_to_submit) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(
+ ts_params->dev_id, 0, NULL, 0);
+
+ start_cycles = rte_rdtsc();
+ burst_received = rte_cryptodev_dequeue_burst(
+ ts_params->dev_id, 0, proc_ops, burst_size);
+ end_cycles = rte_rdtsc();
+
+ total_cycles += end_cycles - start_cycles;
+ if (burst_received == 0)
+ failed_polls++;
+ num_ops_received += burst_received;
+ }
+
+ printf("\n%u\t%u\t%u", num_sent, num_ops_received, burst_size);
+ printf("\t\t%"PRIu64, retries);
+ printf("\t%"PRIu64, failed_polls);
+ printf("\t\t%"PRIu64, total_cycles/num_ops_received);
+ printf("\t\t%"PRIu64,
+ (total_cycles/num_ops_received)*burst_size);
+ printf("\t\t%"PRIu64,
+ total_cycles/(num_ops_received*pparams->buf_size));
+ }
+ printf("\n");
+
+ for (i = 0; i < num_to_submit ; i++) {
+ rte_pktmbuf_free(c_ops[i]->sym->m_src);
+ rte_crypto_op_free(c_ops[i]);
+ }
+
+ return TEST_SUCCESS;
+}
+
static uint32_t get_auth_key_max_length(enum rte_crypto_auth_algorithm algo)
{
switch (algo) {
@@ -2541,16 +2695,16 @@ test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
cipher_xform.cipher.key.data = aes_key;
cipher_xform.cipher.key.length = cipher_key_len;
-
- /* Setup HMAC Parameters */
- auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
- auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
- auth_xform.auth.algo = auth_algo;
-
- auth_xform.auth.key.data = hmac_sha_key;
- auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
- auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
-
+ if (chain != CIPHER_ONLY) {
+ /* Setup HMAC Parameters */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = auth_algo;
+ auth_xform.auth.key.data = hmac_sha_key;
+ auth_xform.auth.key.length = get_auth_key_max_length(auth_algo);
+ auth_xform.auth.digest_length =
+ get_auth_digest_length(auth_algo);
+ }
switch (chain) {
case CIPHER_HASH:
cipher_xform.next = &auth_xform;
@@ -2562,6 +2716,10 @@ test_perf_create_aes_sha_session(uint8_t dev_id, enum chain_mode chain,
cipher_xform.next = NULL;
/* Create Crypto session*/
return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ case CIPHER_ONLY:
+ cipher_xform.next = NULL;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
default:
return NULL;
}
@@ -2685,6 +2843,56 @@ test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
}
}
+static struct rte_cryptodev_sym_session *
+test_perf_create_armv8_session(uint8_t dev_id, enum chain_mode chain,
+ enum rte_crypto_cipher_algorithm cipher_algo,
+ unsigned int cipher_key_len,
+ enum rte_crypto_auth_algorithm auth_algo)
+{
+ struct rte_crypto_sym_xform cipher_xform = { 0 };
+ struct rte_crypto_sym_xform auth_xform = { 0 };
+
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.cipher.algo = cipher_algo;
+
+ switch (cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipher_xform.cipher.key.data = aes_cbc_128_key;
+ break;
+ default:
+ return NULL;
+ }
+
+ cipher_xform.cipher.key.length = cipher_key_len;
+
+ /* Setup Auth Parameters */
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ auth_xform.auth.algo = auth_algo;
+
+ auth_xform.auth.digest_length = get_auth_digest_length(auth_algo);
+
+ switch (chain) {
+ case CIPHER_HASH:
+ cipher_xform.next = &auth_xform;
+ auth_xform.next = NULL;
+ /* Encrypt and hash the result */
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &cipher_xform);
+ case HASH_CIPHER:
+ auth_xform.next = &cipher_xform;
+ cipher_xform.next = NULL;
+ /* Hash encrypted message and decrypt */
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ /* Create Crypto session*/
+ return rte_cryptodev_sym_session_create(dev_id, &auth_xform);
+ default:
+ return NULL;
+ }
+}
+
#define AES_BLOCK_SIZE 16
#define AES_CIPHER_IV_LENGTH 16
@@ -2708,8 +2916,8 @@ test_perf_create_pktmbuf(struct rte_mempool *mpool, unsigned buf_sz)
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
- struct rte_cryptodev_sym_session *sess, unsigned data_len,
- unsigned digest_len)
+ struct rte_cryptodev_sym_session *sess, unsigned int data_len,
+ unsigned int digest_len, enum chain_mode chain)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -2717,13 +2925,26 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
}
/* Authentication Parameters */
- op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
- AES_CIPHER_IV_LENGTH + data_len);
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
- AES_CIPHER_IV_LENGTH + data_len);
- op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+ if (chain == CIPHER_ONLY) {
+ op->sym->auth.digest.data = NULL;
+ op->sym->auth.digest.phys_addr = 0;
+ op->sym->auth.digest.length = 0;
+ op->sym->auth.aad.data = NULL;
+ op->sym->auth.aad.length = 0;
+ op->sym->auth.data.offset = 0;
+ op->sym->auth.data.length = 0;
+ } else {
+ op->sym->auth.digest.data = rte_pktmbuf_mtod_offset(m,
+ uint8_t *, AES_CIPHER_IV_LENGTH + data_len);
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ AES_CIPHER_IV_LENGTH + data_len);
+ op->sym->auth.digest.length = digest_len;
+ op->sym->auth.aad.data = aes_iv;
+ op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+ op->sym->auth.data.offset = AES_CIPHER_IV_LENGTH;
+ op->sym->auth.data.length = data_len;
+ }
+
/* Cipher Parameters */
op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
@@ -2732,10 +2953,6 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
rte_memcpy(op->sym->cipher.iv.data, aes_iv, AES_CIPHER_IV_LENGTH);
- /* Data lengths/offsets Parameters */
- op->sym->auth.data.offset = AES_CIPHER_IV_LENGTH;
- op->sym->auth.data.length = data_len;
-
op->sym->cipher.data.offset = AES_CIPHER_IV_LENGTH;
op->sym->cipher.data.length = data_len;
@@ -2747,7 +2964,7 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len)
+ unsigned int digest_len, enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -2880,7 +3097,7 @@ test_perf_set_crypto_op_snow3g_hash(struct rte_crypto_op *op,
static inline struct rte_crypto_op *
test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
struct rte_cryptodev_sym_session *sess, unsigned int data_len,
- unsigned int digest_len)
+ unsigned int digest_len, enum chain_mode chain __rte_unused)
{
if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
rte_crypto_op_free(op);
@@ -2963,8 +3180,10 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
rte_pktmbuf_free(mbufs[k]);
return -1;
}
+
/* Make room for Digest and IV in mbuf */
- rte_pktmbuf_append(mbufs[i], digest_length);
+ if (pparams->chain != CIPHER_ONLY)
+ rte_pktmbuf_append(mbufs[i], digest_length);
rte_pktmbuf_prepend(mbufs[i], AES_CIPHER_IV_LENGTH);
}
@@ -2986,7 +3205,8 @@ test_perf_aes_sha(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op_aes(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
- sess, pparams->buf_size, digest_length);
+ sess, pparams->buf_size, digest_length,
+ pparams->chain);
/* enqueue burst */
burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
@@ -3237,7 +3457,8 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
static struct rte_crypto_op *(*test_perf_set_crypto_op)
(struct rte_crypto_op *, struct rte_mbuf *,
struct rte_cryptodev_sym_session *,
- unsigned int, unsigned int);
+ unsigned int, unsigned int,
+ enum chain_mode);
switch (pparams->cipher_algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
@@ -3298,7 +3519,8 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
ops[i] = test_perf_set_crypto_op(ops[i],
mbufs[i + (pparams->burst_size *
(j % NUM_MBUF_SETS))],
- sess, pparams->buf_size, digest_length);
+ sess, pparams->buf_size, digest_length,
+ pparams->chain);
/* enqueue burst */
burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
@@ -3361,6 +3583,139 @@ test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
return TEST_SUCCESS;
}
+static int
+test_perf_armv8(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams)
+{
+ uint16_t i, k, l, m;
+ uint16_t j = 0;
+ uint16_t ops_unused = 0;
+ uint16_t burst_size;
+ uint16_t ops_needed;
+
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+ struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_armv8_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
+ }
+ }
+
+ tsc_start = rte_rdtsc();
+
+ while (total_enqueued < pparams->total_operations) {
+ if ((total_enqueued + pparams->burst_size) <=
+ pparams->total_operations)
+ burst_size = pparams->burst_size;
+ else
+ burst_size = pparams->total_operations - total_enqueued;
+
+ ops_needed = burst_size - ops_unused;
+
+ if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+ printf("\nFailed to alloc enough ops, finish dequeuing "
+ "and free ops below.");
+ } else {
+ for (i = 0; i < ops_needed; i++)
+ ops[i] = test_perf_set_crypto_op_aes(ops[i],
+ mbufs[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))], sess,
+ pparams->buf_size, digest_length,
+ pparams->chain);
+
+ /* enqueue burst */
+ burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+ queue_id, ops, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size - burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+
+ tsc_end = rte_rdtsc();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start))
+ * rte_get_tsc_hz();
+ double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
+ / 1000000000;
+
+ printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
+ ops_s / 1000000, throughput, retries, failed_polls);
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+
+ printf("\n");
+ return TEST_SUCCESS;
+}
+
/*
perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA1);
@@ -3385,8 +3740,13 @@ test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
struct perf_test_params params_set[] = {
{
+ .chain = CIPHER_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL
+ },
+ {
.chain = CIPHER_HASH,
-
.cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
.cipher_key_length = 16,
.auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
@@ -3669,6 +4029,125 @@ test_perf_openssl_vary_burst_size(void)
}
static int
+test_perf_armv8_vary_pkt_size(void)
+{
+ unsigned int total_operations = 100000;
+ unsigned int burst_size = { 64 };
+ unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
+ 1792, 2048 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = HASH_CIPHER,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = HASH_CIPHER,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ };
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ params_set[i].total_operations = total_operations;
+ params_set[i].burst_size = burst_size;
+ printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ params_set[i].cipher_key_length,
+ burst_size);
+ printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
+ "EmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_armv8(testsuite_params.dev_id, 0,
+ &params_set[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_perf_armv8_vary_burst_size(void)
+{
+ unsigned int total_operations = 4096;
+ uint16_t buf_lengths[] = { 64 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = HASH_CIPHER,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = HASH_CIPHER,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ };
+
+ printf("\n\nStart %s.", __func__);
+ printf("\nThis Test measures the average IA cycle cost using a "
+ "constant request(packet) size. ");
+ printf("Cycle cost is only valid when indicators show device is "
+ "not busy, i.e. Retries and EmptyPolls = 0");
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ printf("\n");
+ params_set[i].total_operations = total_operations;
+
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_armv8_optimise_cyclecount(&params_set[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int
test_perf_aes_cbc_vary_burst_size(void)
{
return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
@@ -4172,6 +4651,17 @@ static struct unit_test_suite cryptodev_testsuite = {
}
};
+static struct unit_test_suite cryptodev_dpaa2_sec_testsuite = {
+ .suite_name = "Crypto Device DPAA2_SEC Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_aes_cbc_encrypt_digest_vary_pkt_size),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static struct unit_test_suite cryptodev_gcm_testsuite = {
.suite_name = "Crypto Device AESNI GCM Unit Test Suite",
.setup = testsuite_setup,
@@ -4220,6 +4710,19 @@ static struct unit_test_suite cryptodev_openssl_testsuite = {
}
};
+static struct unit_test_suite cryptodev_armv8_testsuite = {
+ .suite_name = "Crypto Device ARMv8 Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_armv8_vary_pkt_size),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_perf_armv8_vary_burst_size),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static int
perftest_aesni_gcm_cryptodev(void)
{
@@ -4276,6 +4779,22 @@ perftest_qat_continual_cryptodev(void)
return unit_test_suite_runner(&cryptodev_qat_continual_testsuite);
}
+static int
+perftest_sw_armv8_cryptodev(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_ARMV8_PMD;
+
+ return unit_test_suite_runner(&cryptodev_armv8_testsuite);
+}
+
+static int
+perftest_dpaa2_sec_cryptodev(void)
+{
+ gbl_cryptodev_perftest_devtype = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+
+ return unit_test_suite_runner(&cryptodev_dpaa2_sec_testsuite);
+}
+
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_perftest, perftest_aesni_mb_cryptodev);
REGISTER_TEST_COMMAND(cryptodev_qat_perftest, perftest_qat_cryptodev);
REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_perftest, perftest_sw_snow3g_cryptodev);
@@ -4285,3 +4804,7 @@ REGISTER_TEST_COMMAND(cryptodev_openssl_perftest,
perftest_openssl_cryptodev);
REGISTER_TEST_COMMAND(cryptodev_qat_continual_perftest,
perftest_qat_continual_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_sw_armv8_perftest,
+ perftest_sw_armv8_cryptodev);
+REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_perftest,
+ perftest_dpaa2_sec_cryptodev);
diff --git a/app/test/test_cryptodev_snow3g_hash_test_vectors.h b/test/test/test_cryptodev_snow3g_hash_test_vectors.h
index a8a47db5..a8a47db5 100644
--- a/app/test/test_cryptodev_snow3g_hash_test_vectors.h
+++ b/test/test/test_cryptodev_snow3g_hash_test_vectors.h
diff --git a/app/test/test_cryptodev_snow3g_test_vectors.h b/test/test/test_cryptodev_snow3g_test_vectors.h
index 51917c14..51917c14 100644
--- a/app/test/test_cryptodev_snow3g_test_vectors.h
+++ b/test/test/test_cryptodev_snow3g_test_vectors.h
diff --git a/app/test/test_cryptodev_zuc_test_vectors.h b/test/test/test_cryptodev_zuc_test_vectors.h
index 03a3d1f4..a900e916 100644
--- a/app/test/test_cryptodev_zuc_test_vectors.h
+++ b/test/test/test_cryptodev_zuc_test_vectors.h
@@ -33,7 +33,7 @@
#ifndef TEST_CRYPTODEV_ZUC_TEST_VECTORS_H_
#define TEST_CRYPTODEV_ZUC_TEST_VECTORS_H_
-struct zuc_test_data {
+struct wireless_test_data {
struct {
uint8_t data[64];
unsigned len;
@@ -45,12 +45,12 @@ struct zuc_test_data {
} iv;
struct {
- uint8_t data[1024];
+ uint8_t data[2048];
unsigned len; /* length must be in Bits */
} plaintext;
struct {
- uint8_t data[1024];
+ uint8_t data[2048];
unsigned len; /* length must be in Bits */
} ciphertext;
@@ -84,7 +84,7 @@ struct zuc_test_data {
unsigned len;
} digest;
};
-struct zuc_test_data zuc_test_case_1 = {
+static struct wireless_test_data zuc_test_case_cipher_193b = {
.key = {
.data = {
0x17, 0x3D, 0x14, 0xBA, 0x50, 0x03, 0x73, 0x1D,
@@ -128,7 +128,7 @@ struct zuc_test_data zuc_test_case_1 = {
}
};
-struct zuc_test_data zuc_test_case_2 = {
+static struct wireless_test_data zuc_test_case_cipher_800b = {
.key = {
.data = {
0xE5, 0xBD, 0x3E, 0xA0, 0xEB, 0x55, 0xAD, 0xE8,
@@ -190,7 +190,7 @@ struct zuc_test_data zuc_test_case_2 = {
}
};
-struct zuc_test_data zuc_test_case_3 = {
+static struct wireless_test_data zuc_test_case_cipher_1570b = {
.key = {
.data = {
0xD4, 0x55, 0x2A, 0x8F, 0xD6, 0xE6, 0x1C, 0xC8,
@@ -274,26 +274,9 @@ struct zuc_test_data zuc_test_case_3 = {
.validCipherOffsetLenInBits = {
.len = 128
},
- .aad = {
- .data = {
- 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
- 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
- },
- .len = 16
- },
- .digest = {
- .data = {0xE8, 0x60, 0x5A, 0x3E},
- .len = 4
- },
- .validAuthLenInBits = {
- .len = 120
- },
- .validAuthOffsetLenInBits = {
- .len = 128
- }
};
-struct zuc_test_data zuc_test_case_4 = {
+static struct wireless_test_data zuc_test_case_cipher_2798b = {
.key = {
.data = {
0xDB, 0x84, 0xB4, 0xFB, 0xCC, 0xDA, 0x56, 0x3B,
@@ -417,7 +400,7 @@ struct zuc_test_data zuc_test_case_4 = {
}
};
-struct zuc_test_data zuc_test_case_5 = {
+static struct wireless_test_data zuc_test_case_cipher_4019b = {
.key = {
.data = {
0xE1, 0x3F, 0xED, 0x21, 0xB4, 0x6E, 0x4E, 0x7E,
@@ -579,4 +562,569 @@ struct zuc_test_data zuc_test_case_5 = {
}
};
+static struct wireless_test_data zuc_test_case_cipher_200b_auth_200b = {
+ .key = {
+ .data = {
+ 0x17, 0x3D, 0x14, 0xBA, 0x50, 0x03, 0x73, 0x1D,
+ 0x7A, 0x60, 0x04, 0x94, 0x70, 0xF0, 0x0A, 0x29
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00,
+ 0x66, 0x03, 0x54, 0x92, 0x78, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x6C, 0xF6, 0x53, 0x40, 0x73, 0x55, 0x52, 0xAB,
+ 0x0C, 0x97, 0x52, 0xFA, 0x6F, 0x90, 0x25, 0xFE,
+ 0x0B, 0xD6, 0x75, 0xD9, 0x00, 0x58, 0x75, 0xB2,
+ 0x00
+ },
+ .len = 200
+ },
+ .ciphertext = {
+ .data = {
+ 0xA6, 0xC8, 0x5F, 0xC6, 0x6A, 0xFB, 0x85, 0x33,
+ 0xAA, 0xFC, 0x25, 0x18, 0xDF, 0xE7, 0x84, 0x94,
+ 0x0E, 0xE1, 0xE4, 0xB0, 0x30, 0x23, 0x8C, 0xC8,
+ 0x10
+ },
+ .len = 200
+ },
+ .validDataLenInBits = {
+ .len = 200
+ },
+ .validCipherLenInBits = {
+ .len = 200
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 128
+ },
+ .aad = {
+ .data = {
+ 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
+ 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {0x01, 0xFE, 0x5E, 0x38},
+ .len = 4
+ },
+ .validAuthLenInBits = {
+ .len = 200
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ }
+};
+
+static struct wireless_test_data zuc_test_case_cipher_800b_auth_120b = {
+ .key = {
+ .data = {
+ 0xE5, 0xBD, 0x3E, 0xA0, 0xEB, 0x55, 0xAD, 0xE8,
+ 0x66, 0xC6, 0xAC, 0x58, 0xBD, 0x54, 0x30, 0x2A
+ },
+ .len = 16
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00,
+ 0x00, 0x05, 0x68, 0x23, 0xC4, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x14, 0xA8, 0xEF, 0x69, 0x3D, 0x67, 0x85, 0x07,
+ 0xBB, 0xE7, 0x27, 0x0A, 0x7F, 0x67, 0xFF, 0x50,
+ 0x06, 0xC3, 0x52, 0x5B, 0x98, 0x07, 0xE4, 0x67,
+ 0xC4, 0xE5, 0x60, 0x00, 0xBA, 0x33, 0x8F, 0x5D,
+ 0x42, 0x95, 0x59, 0x03, 0x67, 0x51, 0x82, 0x22,
+ 0x46, 0xC8, 0x0D, 0x3B, 0x38, 0xF0, 0x7F, 0x4B,
+ 0xE2, 0xD8, 0xFF, 0x58, 0x05, 0xF5, 0x13, 0x22,
+ 0x29, 0xBD, 0xE9, 0x3B, 0xBB, 0xDC, 0xAF, 0x38,
+ 0x2B, 0xF1, 0xEE, 0x97, 0x2F, 0xBF, 0x99, 0x77,
+ 0xBA, 0xDA, 0x89, 0x45, 0x84, 0x7A, 0x2A, 0x6C,
+ 0x9A, 0xD3, 0x4A, 0x66, 0x75, 0x54, 0xE0, 0x4D,
+ 0x1F, 0x7F, 0xA2, 0xC3, 0x32, 0x41, 0xBD, 0x8F,
+ 0x01, 0xBA, 0x22, 0x0D
+ },
+ .len = 800
+ },
+ .ciphertext = {
+ .data = {
+ 0x13, 0x1D, 0x43, 0xE0, 0xDE, 0xA1, 0xBE, 0x5C,
+ 0x5A, 0x1B, 0xFD, 0x97, 0x1D, 0x85, 0x2C, 0xBF,
+ 0x71, 0x2D, 0x7B, 0x4F, 0x57, 0x96, 0x1F, 0xEA,
+ 0x32, 0x08, 0xAF, 0xA8, 0xBC, 0xA4, 0x33, 0xF4,
+ 0x56, 0xAD, 0x09, 0xC7, 0x41, 0x7E, 0x58, 0xBC,
+ 0x69, 0xCF, 0x88, 0x66, 0xD1, 0x35, 0x3F, 0x74,
+ 0x86, 0x5E, 0x80, 0x78, 0x1D, 0x20, 0x2D, 0xFB,
+ 0x3E, 0xCF, 0xF7, 0xFC, 0xBC, 0x3B, 0x19, 0x0F,
+ 0xE8, 0x2A, 0x20, 0x4E, 0xD0, 0xE3, 0x50, 0xFC,
+ 0x0F, 0x6F, 0x26, 0x13, 0xB2, 0xF2, 0xBC, 0xA6,
+ 0xDF, 0x5A, 0x47, 0x3A, 0x57, 0xA4, 0xA0, 0x0D,
+ 0x98, 0x5E, 0xBA, 0xD8, 0x80, 0xD6, 0xF2, 0x38,
+ 0x64, 0xA0, 0x7B, 0x01
+ },
+ .len = 800
+ },
+ .validDataLenInBits = {
+ .len = 800
+ },
+ .validCipherLenInBits = {
+ .len = 800
+ },
+ .validCipherOffsetLenInBits = {
+ .len = 128
+ },
+ .aad = {
+ .data = {
+ 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00,
+ 0xFA, 0x55, 0x6B, 0x26, 0x1C, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {0x9D, 0x42, 0x1C, 0xEA},
+ .len = 4
+ },
+ .validAuthLenInBits = {
+ .len = 120
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ }
+};
+
+struct wireless_test_data zuc_test_case_auth_1b = {
+ .key = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {0x00},
+ .len = 8
+ },
+ .validAuthLenInBits = {
+ .len = 1
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0xC8, 0xA9, 0x59, 0x5E},
+ .len = 4
+ }
+};
+
+struct wireless_test_data zuc_test_case_auth_90b = {
+ .key = {
+ .data = {
+ 0x47, 0x05, 0x41, 0x25, 0x56, 0x1E, 0xB2, 0xDD,
+ 0xA9, 0x40, 0x59, 0xDA, 0x05, 0x09, 0x78, 0x50
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00,
+ 0x56, 0x1E, 0xB2, 0xDD, 0xA0, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 96
+ },
+ .validAuthLenInBits = {
+ .len = 90
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x67, 0x19, 0xA0, 0x88},
+ .len = 4
+ }
+};
+
+struct wireless_test_data zuc_test_case_auth_577b = {
+ .key = {
+ .data = {
+ 0xC9, 0xE6, 0xCE, 0xC4, 0x60, 0x7C, 0x72, 0xDB,
+ 0x00, 0x0A, 0xEF, 0xA8, 0x83, 0x85, 0xAB, 0x0A
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0xA9, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x00, 0x00,
+ 0x29, 0x40, 0x59, 0xDA, 0x50, 0x00, 0x80, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x98, 0x3B, 0x41, 0xD4, 0x7D, 0x78, 0x0C, 0x9E,
+ 0x1A, 0xD1, 0x1D, 0x7E, 0xB7, 0x03, 0x91, 0xB1,
+ 0xDE, 0x0B, 0x35, 0xDA, 0x2D, 0xC6, 0x2F, 0x83,
+ 0xE7, 0xB7, 0x8D, 0x63, 0x06, 0xCA, 0x0E, 0xA0,
+ 0x7E, 0x94, 0x1B, 0x7B, 0xE9, 0x13, 0x48, 0xF9,
+ 0xFC, 0xB1, 0x70, 0xE2, 0x21, 0x7F, 0xEC, 0xD9,
+ 0x7F, 0x9F, 0x68, 0xAD, 0xB1, 0x6E, 0x5D, 0x7D,
+ 0x21, 0xE5, 0x69, 0xD2, 0x80, 0xED, 0x77, 0x5C,
+ 0xEB, 0xDE, 0x3F, 0x40, 0x93, 0xC5, 0x38, 0x81,
+ 0x00
+ },
+ .len = 584
+ },
+ .validAuthLenInBits = {
+ .len = 577
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0xFA, 0xE8, 0xFF, 0x0B},
+ .len = 4
+ }
+};
+
+struct wireless_test_data zuc_test_case_auth_2079b = {
+ .key = {
+ .data = {
+ 0xC8, 0xA4, 0x82, 0x62, 0xD0, 0xC2, 0xE2, 0xBA,
+ 0xC4, 0xB9, 0x6E, 0xF7, 0x7E, 0x80, 0xCA, 0x59
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x05, 0x09, 0x78, 0x50, 0x80, 0x00, 0x00, 0x00,
+ 0x85, 0x09, 0x78, 0x50, 0x80, 0x00, 0x80, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0xB5, 0x46, 0x43, 0x0B, 0xF8, 0x7B, 0x4F, 0x1E,
+ 0xE8, 0x34, 0x70, 0x4C, 0xD6, 0x95, 0x1C, 0x36,
+ 0xE2, 0x6F, 0x10, 0x8C, 0xF7, 0x31, 0x78, 0x8F,
+ 0x48, 0xDC, 0x34, 0xF1, 0x67, 0x8C, 0x05, 0x22,
+ 0x1C, 0x8F, 0xA7, 0xFF, 0x2F, 0x39, 0xF4, 0x77,
+ 0xE7, 0xE4, 0x9E, 0xF6, 0x0A, 0x4E, 0xC2, 0xC3,
+ 0xDE, 0x24, 0x31, 0x2A, 0x96, 0xAA, 0x26, 0xE1,
+ 0xCF, 0xBA, 0x57, 0x56, 0x38, 0x38, 0xB2, 0x97,
+ 0xF4, 0x7E, 0x85, 0x10, 0xC7, 0x79, 0xFD, 0x66,
+ 0x54, 0xB1, 0x43, 0x38, 0x6F, 0xA6, 0x39, 0xD3,
+ 0x1E, 0xDB, 0xD6, 0xC0, 0x6E, 0x47, 0xD1, 0x59,
+ 0xD9, 0x43, 0x62, 0xF2, 0x6A, 0xEE, 0xED, 0xEE,
+ 0x0E, 0x4F, 0x49, 0xD9, 0xBF, 0x84, 0x12, 0x99,
+ 0x54, 0x15, 0xBF, 0xAD, 0x56, 0xEE, 0x82, 0xD1,
+ 0xCA, 0x74, 0x63, 0xAB, 0xF0, 0x85, 0xB0, 0x82,
+ 0xB0, 0x99, 0x04, 0xD6, 0xD9, 0x90, 0xD4, 0x3C,
+ 0xF2, 0xE0, 0x62, 0xF4, 0x08, 0x39, 0xD9, 0x32,
+ 0x48, 0xB1, 0xEB, 0x92, 0xCD, 0xFE, 0xD5, 0x30,
+ 0x0B, 0xC1, 0x48, 0x28, 0x04, 0x30, 0xB6, 0xD0,
+ 0xCA, 0xA0, 0x94, 0xB6, 0xEC, 0x89, 0x11, 0xAB,
+ 0x7D, 0xC3, 0x68, 0x24, 0xB8, 0x24, 0xDC, 0x0A,
+ 0xF6, 0x68, 0x2B, 0x09, 0x35, 0xFD, 0xE7, 0xB4,
+ 0x92, 0xA1, 0x4D, 0xC2, 0xF4, 0x36, 0x48, 0x03,
+ 0x8D, 0xA2, 0xCF, 0x79, 0x17, 0x0D, 0x2D, 0x50,
+ 0x13, 0x3F, 0xD4, 0x94, 0x16, 0xCB, 0x6E, 0x33,
+ 0xBE, 0xA9, 0x0B, 0x8B, 0xF4, 0x55, 0x9B, 0x03,
+ 0x73, 0x2A, 0x01, 0xEA, 0x29, 0x0E, 0x6D, 0x07,
+ 0x4F, 0x79, 0xBB, 0x83, 0xC1, 0x0E, 0x58, 0x00,
+ 0x15, 0xCC, 0x1A, 0x85, 0xB3, 0x6B, 0x55, 0x01,
+ 0x04, 0x6E, 0x9C, 0x4B, 0xDC, 0xAE, 0x51, 0x35,
+ 0x69, 0x0B, 0x86, 0x66, 0xBD, 0x54, 0xB7, 0xA7,
+ 0x03, 0xEA, 0x7B, 0x6F, 0x22, 0x0A, 0x54, 0x69,
+ 0xA5, 0x68, 0x02, 0x7E
+ },
+ .len = 2080
+ },
+ .validAuthLenInBits = {
+ .len = 2079
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x00, 0x4A, 0xC4, 0xD6},
+ .len = 4
+ }
+};
+
+struct wireless_test_data zuc_test_auth_5670b = {
+ .key = {
+ .data = {
+ 0x6B, 0x8B, 0x08, 0xEE, 0x79, 0xE0, 0xB5, 0x98,
+ 0x2D, 0x6D, 0x12, 0x8E, 0xA9, 0xF2, 0x20, 0xCB
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00,
+ 0x56, 0x1E, 0xB2, 0xDD, 0xE0, 0x00, 0x00, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x5B, 0xAD, 0x72, 0x47, 0x10, 0xBA, 0x1C, 0x56,
+ 0xD5, 0xA3, 0x15, 0xF8, 0xD4, 0x0F, 0x6E, 0x09,
+ 0x37, 0x80, 0xBE, 0x8E, 0x8D, 0xE0, 0x7B, 0x69,
+ 0x92, 0x43, 0x20, 0x18, 0xE0, 0x8E, 0xD9, 0x6A,
+ 0x57, 0x34, 0xAF, 0x8B, 0xAD, 0x8A, 0x57, 0x5D,
+ 0x3A, 0x1F, 0x16, 0x2F, 0x85, 0x04, 0x5C, 0xC7,
+ 0x70, 0x92, 0x55, 0x71, 0xD9, 0xF5, 0xB9, 0x4E,
+ 0x45, 0x4A, 0x77, 0xC1, 0x6E, 0x72, 0x93, 0x6B,
+ 0xF0, 0x16, 0xAE, 0x15, 0x74, 0x99, 0xF0, 0x54,
+ 0x3B, 0x5D, 0x52, 0xCA, 0xA6, 0xDB, 0xEA, 0xB6,
+ 0x97, 0xD2, 0xBB, 0x73, 0xE4, 0x1B, 0x80, 0x75,
+ 0xDC, 0xE7, 0x9B, 0x4B, 0x86, 0x04, 0x4F, 0x66,
+ 0x1D, 0x44, 0x85, 0xA5, 0x43, 0xDD, 0x78, 0x60,
+ 0x6E, 0x04, 0x19, 0xE8, 0x05, 0x98, 0x59, 0xD3,
+ 0xCB, 0x2B, 0x67, 0xCE, 0x09, 0x77, 0x60, 0x3F,
+ 0x81, 0xFF, 0x83, 0x9E, 0x33, 0x18, 0x59, 0x54,
+ 0x4C, 0xFB, 0xC8, 0xD0, 0x0F, 0xEF, 0x1A, 0x4C,
+ 0x85, 0x10, 0xFB, 0x54, 0x7D, 0x6B, 0x06, 0xC6,
+ 0x11, 0xEF, 0x44, 0xF1, 0xBC, 0xE1, 0x07, 0xCF,
+ 0xA4, 0x5A, 0x06, 0xAA, 0xB3, 0x60, 0x15, 0x2B,
+ 0x28, 0xDC, 0x1E, 0xBE, 0x6F, 0x7F, 0xE0, 0x9B,
+ 0x05, 0x16, 0xF9, 0xA5, 0xB0, 0x2A, 0x1B, 0xD8,
+ 0x4B, 0xB0, 0x18, 0x1E, 0x2E, 0x89, 0xE1, 0x9B,
+ 0xD8, 0x12, 0x59, 0x30, 0xD1, 0x78, 0x68, 0x2F,
+ 0x38, 0x62, 0xDC, 0x51, 0xB6, 0x36, 0xF0, 0x4E,
+ 0x72, 0x0C, 0x47, 0xC3, 0xCE, 0x51, 0xAD, 0x70,
+ 0xD9, 0x4B, 0x9B, 0x22, 0x55, 0xFB, 0xAE, 0x90,
+ 0x65, 0x49, 0xF4, 0x99, 0xF8, 0xC6, 0xD3, 0x99,
+ 0x47, 0xED, 0x5E, 0x5D, 0xF8, 0xE2, 0xDE, 0xF1,
+ 0x13, 0x25, 0x3E, 0x7B, 0x08, 0xD0, 0xA7, 0x6B,
+ 0x6B, 0xFC, 0x68, 0xC8, 0x12, 0xF3, 0x75, 0xC7,
+ 0x9B, 0x8F, 0xE5, 0xFD, 0x85, 0x97, 0x6A, 0xA6,
+ 0xD4, 0x6B, 0x4A, 0x23, 0x39, 0xD8, 0xAE, 0x51,
+ 0x47, 0xF6, 0x80, 0xFB, 0xE7, 0x0F, 0x97, 0x8B,
+ 0x38, 0xEF, 0xFD, 0x7B, 0x2F, 0x78, 0x66, 0xA2,
+ 0x25, 0x54, 0xE1, 0x93, 0xA9, 0x4E, 0x98, 0xA6,
+ 0x8B, 0x74, 0xBD, 0x25, 0xBB, 0x2B, 0x3F, 0x5F,
+ 0xB0, 0xA5, 0xFD, 0x59, 0x88, 0x7F, 0x9A, 0xB6,
+ 0x81, 0x59, 0xB7, 0x17, 0x8D, 0x5B, 0x7B, 0x67,
+ 0x7C, 0xB5, 0x46, 0xBF, 0x41, 0xEA, 0xDC, 0xA2,
+ 0x16, 0xFC, 0x10, 0x85, 0x01, 0x28, 0xF8, 0xBD,
+ 0xEF, 0x5C, 0x8D, 0x89, 0xF9, 0x6A, 0xFA, 0x4F,
+ 0xA8, 0xB5, 0x48, 0x85, 0x56, 0x5E, 0xD8, 0x38,
+ 0xA9, 0x50, 0xFE, 0xE5, 0xF1, 0xC3, 0xB0, 0xA4,
+ 0xF6, 0xFB, 0x71, 0xE5, 0x4D, 0xFD, 0x16, 0x9E,
+ 0x82, 0xCE, 0xCC, 0x72, 0x66, 0xC8, 0x50, 0xE6,
+ 0x7C, 0x5E, 0xF0, 0xBA, 0x96, 0x0F, 0x52, 0x14,
+ 0x06, 0x0E, 0x71, 0xEB, 0x17, 0x2A, 0x75, 0xFC,
+ 0x14, 0x86, 0x83, 0x5C, 0xBE, 0xA6, 0x53, 0x44,
+ 0x65, 0xB0, 0x55, 0xC9, 0x6A, 0x72, 0xE4, 0x10,
+ 0x52, 0x24, 0x18, 0x23, 0x25, 0xD8, 0x30, 0x41,
+ 0x4B, 0x40, 0x21, 0x4D, 0xAA, 0x80, 0x91, 0xD2,
+ 0xE0, 0xFB, 0x01, 0x0A, 0xE1, 0x5C, 0x6D, 0xE9,
+ 0x08, 0x50, 0x97, 0x3B, 0xDF, 0x1E, 0x42, 0x3B,
+ 0xE1, 0x48, 0xA2, 0x37, 0xB8, 0x7A, 0x0C, 0x9F,
+ 0x34, 0xD4, 0xB4, 0x76, 0x05, 0xB8, 0x03, 0xD7,
+ 0x43, 0xA8, 0x6A, 0x90, 0x39, 0x9A, 0x4A, 0xF3,
+ 0x96, 0xD3, 0xA1, 0x20, 0x0A, 0x62, 0xF3, 0xD9,
+ 0x50, 0x79, 0x62, 0xE8, 0xE5, 0xBE, 0xE6, 0xD3,
+ 0xDA, 0x2B, 0xB3, 0xF7, 0x23, 0x76, 0x64, 0xAC,
+ 0x7A, 0x29, 0x28, 0x23, 0x90, 0x0B, 0xC6, 0x35,
+ 0x03, 0xB2, 0x9E, 0x80, 0xD6, 0x3F, 0x60, 0x67,
+ 0xBF, 0x8E, 0x17, 0x16, 0xAC, 0x25, 0xBE, 0xBA,
+ 0x35, 0x0D, 0xEB, 0x62, 0xA9, 0x9F, 0xE0, 0x31,
+ 0x85, 0xEB, 0x4F, 0x69, 0x93, 0x7E, 0xCD, 0x38,
+ 0x79, 0x41, 0xFD, 0xA5, 0x44, 0xBA, 0x67, 0xDB,
+ 0x09, 0x11, 0x77, 0x49, 0x38, 0xB0, 0x18, 0x27,
+ 0xBC, 0xC6, 0x9C, 0x92, 0xB3, 0xF7, 0x72, 0xA9,
+ 0xD2, 0x85, 0x9E, 0xF0, 0x03, 0x39, 0x8B, 0x1F,
+ 0x6B, 0xBA, 0xD7, 0xB5, 0x74, 0xF7, 0x98, 0x9A,
+ 0x1D, 0x10, 0xB2, 0xDF, 0x79, 0x8E, 0x0D, 0xBF,
+ 0x30, 0xD6, 0x58, 0x74, 0x64, 0xD2, 0x48, 0x78,
+ 0xCD, 0x00, 0xC0, 0xEA, 0xEE, 0x8A, 0x1A, 0x0C,
+ 0xC7, 0x53, 0xA2, 0x79, 0x79, 0xE1, 0x1B, 0x41,
+ 0xDB, 0x1D, 0xE3, 0xD5, 0x03, 0x8A, 0xFA, 0xF4,
+ 0x9F, 0x5C, 0x68, 0x2C, 0x37, 0x48, 0xD8, 0xA3,
+ 0xA9, 0xEC, 0x54, 0xE6, 0xA3, 0x71, 0x27, 0x5F,
+ 0x16, 0x83, 0x51, 0x0F, 0x8E, 0x4F, 0x90, 0x93,
+ 0x8F, 0x9A, 0xB6, 0xE1, 0x34, 0xC2, 0xCF, 0xDF,
+ 0x48, 0x41, 0xCB, 0xA8, 0x8E, 0x0C, 0xFF, 0x2B,
+ 0x0B, 0xCC, 0x8E, 0x6A, 0xDC, 0xB7, 0x11, 0x09,
+ 0xB5, 0x19, 0x8F, 0xEC, 0xF1, 0xBB, 0x7E, 0x5C,
+ 0x53, 0x1A, 0xCA, 0x50, 0xA5, 0x6A, 0x8A, 0x3B,
+ 0x6D, 0xE5, 0x98, 0x62, 0xD4, 0x1F, 0xA1, 0x13,
+ 0xD9, 0xCD, 0x95, 0x78, 0x08, 0xF0, 0x85, 0x71,
+ 0xD9, 0xA4, 0xBB, 0x79, 0x2A, 0xF2, 0x71, 0xF6,
+ 0xCC, 0x6D, 0xBB, 0x8D, 0xC7, 0xEC, 0x36, 0xE3,
+ 0x6B, 0xE1, 0xED, 0x30, 0x81, 0x64, 0xC3, 0x1C,
+ 0x7C, 0x0A, 0xFC, 0x54, 0x1C
+ },
+ .len = 5672
+ },
+ .validAuthLenInBits = {
+ .len = 5670
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x0C, 0xA1, 0x27, 0x92},
+ .len = 4
+ }
+};
+
+static struct wireless_test_data zuc_test_case_auth_128b = {
+ .key = {
+ .data = { 0x0 },
+ .len = 16
+ },
+ .aad = {
+ .data = { 0x0 },
+ .len = 16
+ },
+ .plaintext = {
+ .data = { 0x0 },
+ .len = 8
+ },
+ .validAuthLenInBits = {
+ .len = 8
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = { 0x39, 0x0a, 0x91, 0xb7 },
+ .len = 4
+ }
+};
+
+static struct wireless_test_data zuc_test_case_auth_2080b = {
+ .key = {
+ .data = {
+ 0xC8, 0xA4, 0x82, 0x62, 0xD0, 0xC2, 0xE2, 0xBA,
+ 0xC4, 0xB9, 0x6E, 0xF7, 0x7E, 0x80, 0xCA, 0x59
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0x05, 0x09, 0x78, 0x50, 0x80, 0x00, 0x00, 0x00,
+ 0x85, 0x09, 0x78, 0x50, 0x80, 0x00, 0x80, 0x00
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0xB5, 0x46, 0x43, 0x0B, 0xF8, 0x7B, 0x4F, 0x1E,
+ 0xE8, 0x34, 0x70, 0x4C, 0xD6, 0x95, 0x1C, 0x36,
+ 0xE2, 0x6F, 0x10, 0x8C, 0xF7, 0x31, 0x78, 0x8F,
+ 0x48, 0xDC, 0x34, 0xF1, 0x67, 0x8C, 0x05, 0x22,
+ 0x1C, 0x8F, 0xA7, 0xFF, 0x2F, 0x39, 0xF4, 0x77,
+ 0xE7, 0xE4, 0x9E, 0xF6, 0x0A, 0x4E, 0xC2, 0xC3,
+ 0xDE, 0x24, 0x31, 0x2A, 0x96, 0xAA, 0x26, 0xE1,
+ 0xCF, 0xBA, 0x57, 0x56, 0x38, 0x38, 0xB2, 0x97,
+ 0xF4, 0x7E, 0x85, 0x10, 0xC7, 0x79, 0xFD, 0x66,
+ 0x54, 0xB1, 0x43, 0x38, 0x6F, 0xA6, 0x39, 0xD3,
+ 0x1E, 0xDB, 0xD6, 0xC0, 0x6E, 0x47, 0xD1, 0x59,
+ 0xD9, 0x43, 0x62, 0xF2, 0x6A, 0xEE, 0xED, 0xEE,
+ 0x0E, 0x4F, 0x49, 0xD9, 0xBF, 0x84, 0x12, 0x99,
+ 0x54, 0x15, 0xBF, 0xAD, 0x56, 0xEE, 0x82, 0xD1,
+ 0xCA, 0x74, 0x63, 0xAB, 0xF0, 0x85, 0xB0, 0x82,
+ 0xB0, 0x99, 0x04, 0xD6, 0xD9, 0x90, 0xD4, 0x3C,
+ 0xF2, 0xE0, 0x62, 0xF4, 0x08, 0x39, 0xD9, 0x32,
+ 0x48, 0xB1, 0xEB, 0x92, 0xCD, 0xFE, 0xD5, 0x30,
+ 0x0B, 0xC1, 0x48, 0x28, 0x04, 0x30, 0xB6, 0xD0,
+ 0xCA, 0xA0, 0x94, 0xB6, 0xEC, 0x89, 0x11, 0xAB,
+ 0x7D, 0xC3, 0x68, 0x24, 0xB8, 0x24, 0xDC, 0x0A,
+ 0xF6, 0x68, 0x2B, 0x09, 0x35, 0xFD, 0xE7, 0xB4,
+ 0x92, 0xA1, 0x4D, 0xC2, 0xF4, 0x36, 0x48, 0x03,
+ 0x8D, 0xA2, 0xCF, 0x79, 0x17, 0x0D, 0x2D, 0x50,
+ 0x13, 0x3F, 0xD4, 0x94, 0x16, 0xCB, 0x6E, 0x33,
+ 0xBE, 0xA9, 0x0B, 0x8B, 0xF4, 0x55, 0x9B, 0x03,
+ 0x73, 0x2A, 0x01, 0xEA, 0x29, 0x0E, 0x6D, 0x07,
+ 0x4F, 0x79, 0xBB, 0x83, 0xC1, 0x0E, 0x58, 0x00,
+ 0x15, 0xCC, 0x1A, 0x85, 0xB3, 0x6B, 0x55, 0x01,
+ 0x04, 0x6E, 0x9C, 0x4B, 0xDC, 0xAE, 0x51, 0x35,
+ 0x69, 0x0B, 0x86, 0x66, 0xBD, 0x54, 0xB7, 0xA7,
+ 0x03, 0xEA, 0x7B, 0x6F, 0x22, 0x0A, 0x54, 0x69,
+ 0xA5, 0x68, 0x02, 0x7E
+ },
+ .len = 2080
+ },
+ .validAuthLenInBits = {
+ .len = 2080
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x03, 0x95, 0x32, 0xe1},
+ .len = 4
+ }
+};
+
+static struct wireless_test_data zuc_test_case_auth_584b = {
+ .key = {
+ .data = {
+ 0xc9, 0xe6, 0xce, 0xc4, 0x60, 0x7c, 0x72, 0xdb,
+ 0x00, 0x0a, 0xef, 0xa8, 0x83, 0x85, 0xab, 0x0a
+ },
+ .len = 16
+ },
+ .aad = {
+ .data = {
+ 0xa9, 0x40, 0x59, 0xda, 0x50, 0x0, 0x0, 0x0,
+ 0x29, 0x40, 0x59, 0xda, 0x50, 0x0, 0x80, 0x0
+ },
+ .len = 16
+ },
+ .plaintext = {
+ .data = {
+ 0x98, 0x3b, 0x41, 0xd4, 0x7d, 0x78, 0x0c, 0x9e,
+ 0x1a, 0xd1, 0x1d, 0x7e, 0xb7, 0x03, 0x91, 0xb1,
+ 0xde, 0x0b, 0x35, 0xda, 0x2d, 0xc6, 0x2f, 0x83,
+ 0xe7, 0xb7, 0x8d, 0x63, 0x06, 0xca, 0x0e, 0xa0,
+ 0x7e, 0x94, 0x1b, 0x7b, 0xe9, 0x13, 0x48, 0xf9,
+ 0xfc, 0xb1, 0x70, 0xe2, 0x21, 0x7f, 0xec, 0xd9,
+ 0x7f, 0x9f, 0x68, 0xad, 0xb1, 0x6e, 0x5d, 0x7d,
+ 0x21, 0xe5, 0x69, 0xd2, 0x80, 0xed, 0x77, 0x5c,
+ 0xeb, 0xde, 0x3f, 0x40, 0x93, 0xc5, 0x38, 0x81,
+ 0x00, 0x00, 0x00, 0x00
+ },
+ .len = 584
+ },
+ .validAuthLenInBits = {
+ .len = 584
+ },
+ .validAuthOffsetLenInBits = {
+ .len = 128
+ },
+ .digest = {
+ .data = {0x24, 0xa8, 0x42, 0xb3},
+ .len = 4
+ }
+};
+
#endif /* TEST_CRYPTODEV_ZUC_TEST_VECTORS_H_ */
diff --git a/app/test/test_cycles.c b/test/test/test_cycles.c
index f1897979..f1897979 100644
--- a/app/test/test_cycles.c
+++ b/test/test/test_cycles.c
diff --git a/app/test/test_debug.c b/test/test/test_debug.c
index 0a3b2c46..0a3b2c46 100644
--- a/app/test/test_debug.c
+++ b/test/test/test_debug.c
diff --git a/app/test/test_devargs.c b/test/test/test_devargs.c
index 63242f1c..63242f1c 100644
--- a/app/test/test_devargs.c
+++ b/test/test/test_devargs.c
diff --git a/app/test/test_distributor.c b/test/test/test_distributor.c
index 85cb8f39..890a8526 100644
--- a/app/test/test_distributor.c
+++ b/test/test/test_distributor.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,13 @@
#define BURST 32
#define BIG_BATCH 1024
+struct worker_params {
+ char name[64];
+ struct rte_distributor *dist;
+};
+
+struct worker_params worker_params;
+
/* statics - all zero-initialized by default */
static volatile int quit; /**< general quit variable for all threads */
static volatile int zero_quit; /**< var for when we just want thr0 to quit*/
@@ -80,18 +87,25 @@ clear_packet_count(void)
static int
handle_work(void *arg)
{
- struct rte_mbuf *pkt = NULL;
- struct rte_distributor *d = arg;
- unsigned count = 0;
- unsigned id = __sync_fetch_and_add(&worker_idx, 1);
-
- pkt = rte_distributor_get_pkt(d, id, NULL);
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
+ struct worker_params *wp = arg;
+ struct rte_distributor *db = wp->dist;
+ unsigned int count = 0, num = 0;
+ unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
+ int i;
+
+ for (i = 0; i < 8; i++)
+ buf[i] = NULL;
+ num = rte_distributor_get_pkt(db, id, buf, buf, num);
while (!quit) {
- worker_stats[id].handled_packets++, count++;
- pkt = rte_distributor_get_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ num = rte_distributor_get_pkt(db, id,
+ buf, buf, num);
}
- worker_stats[id].handled_packets++, count++;
- rte_distributor_return_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ rte_distributor_return_pkt(db, id, buf, num);
return 0;
}
@@ -107,10 +121,13 @@ handle_work(void *arg)
* not necessarily in the same order (as different flows).
*/
static int
-sanity_test(struct rte_distributor *d, struct rte_mempool *p)
+sanity_test(struct worker_params *wp, struct rte_mempool *p)
{
+ struct rte_distributor *db = wp->dist;
struct rte_mbuf *bufs[BURST];
- unsigned i;
+ struct rte_mbuf *returns[BURST*2];
+ unsigned int i, count;
+ unsigned int retries;
printf("=== Basic distributor sanity tests ===\n");
clear_packet_count();
@@ -124,8 +141,15 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
for (i = 0; i < BURST; i++)
bufs[i]->hash.usr = 0;
- rte_distributor_process(d, bufs, BURST);
- rte_distributor_flush(d);
+ rte_distributor_process(db, bufs, BURST);
+ count = 0;
+ do {
+
+ rte_distributor_flush(db);
+ count += rte_distributor_returned_pkts(db,
+ returns, BURST*2);
+ } while (count < BURST);
+
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
@@ -137,8 +161,6 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with all zero hashes done.\n");
- if (worker_stats[0].handled_packets != BURST)
- return -1;
/* pick two flows and check they go correctly */
if (rte_lcore_count() >= 3) {
@@ -146,8 +168,13 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
for (i = 0; i < BURST; i++)
bufs[i]->hash.usr = (i & 1) << 8;
- rte_distributor_process(d, bufs, BURST);
- rte_distributor_flush(d);
+ rte_distributor_process(db, bufs, BURST);
+ count = 0;
+ do {
+ rte_distributor_flush(db);
+ count += rte_distributor_returned_pkts(db,
+ returns, BURST*2);
+ } while (count < BURST);
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
@@ -159,20 +186,21 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
printf("Worker %u handled %u packets\n", i,
worker_stats[i].handled_packets);
printf("Sanity test with two hash values done\n");
-
- if (worker_stats[0].handled_packets != 16 ||
- worker_stats[1].handled_packets != 16)
- return -1;
}
/* give a different hash value to each packet,
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->hash.usr = i;
-
- rte_distributor_process(d, bufs, BURST);
- rte_distributor_flush(d);
+ bufs[i]->hash.usr = i+1;
+
+ rte_distributor_process(db, bufs, BURST);
+ count = 0;
+ do {
+ rte_distributor_flush(db);
+ count += rte_distributor_returned_pkts(db,
+ returns, BURST*2);
+ } while (count < BURST);
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
@@ -194,8 +222,9 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
unsigned num_returned = 0;
/* flush out any remaining packets */
- rte_distributor_flush(d);
- rte_distributor_clear_returns(d);
+ rte_distributor_flush(db);
+ rte_distributor_clear_returns(db);
+
if (rte_mempool_get_bulk(p, (void *)many_bufs, BIG_BATCH) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
@@ -203,28 +232,44 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
for (i = 0; i < BIG_BATCH; i++)
many_bufs[i]->hash.usr = i << 2;
+ printf("=== testing big burst (%s) ===\n", wp->name);
for (i = 0; i < BIG_BATCH/BURST; i++) {
- rte_distributor_process(d, &many_bufs[i*BURST], BURST);
- num_returned += rte_distributor_returned_pkts(d,
+ rte_distributor_process(db,
+ &many_bufs[i*BURST], BURST);
+ count = rte_distributor_returned_pkts(db,
&return_bufs[num_returned],
BIG_BATCH - num_returned);
+ num_returned += count;
}
- rte_distributor_flush(d);
- num_returned += rte_distributor_returned_pkts(d,
- &return_bufs[num_returned], BIG_BATCH - num_returned);
+ rte_distributor_flush(db);
+ count = rte_distributor_returned_pkts(db,
+ &return_bufs[num_returned],
+ BIG_BATCH - num_returned);
+ num_returned += count;
+ retries = 0;
+ do {
+ rte_distributor_flush(db);
+ count = rte_distributor_returned_pkts(db,
+ &return_bufs[num_returned],
+ BIG_BATCH - num_returned);
+ num_returned += count;
+ retries++;
+ } while ((num_returned < BIG_BATCH) && (retries < 100));
if (num_returned != BIG_BATCH) {
- printf("line %d: Number returned is not the same as "
- "number sent\n", __LINE__);
+ printf("line %d: Missing packets, expected %d\n",
+ __LINE__, num_returned);
return -1;
}
+
/* big check - make sure all packets made it back!! */
for (i = 0; i < BIG_BATCH; i++) {
unsigned j;
struct rte_mbuf *src = many_bufs[i];
- for (j = 0; j < BIG_BATCH; j++)
+ for (j = 0; j < BIG_BATCH; j++) {
if (return_bufs[j] == src)
break;
+ }
if (j == BIG_BATCH) {
printf("Error: could not find source packet #%u\n", i);
@@ -248,19 +293,28 @@ sanity_test(struct rte_distributor *d, struct rte_mempool *p)
static int
handle_work_with_free_mbufs(void *arg)
{
- struct rte_mbuf *pkt = NULL;
- struct rte_distributor *d = arg;
- unsigned count = 0;
- unsigned id = __sync_fetch_and_add(&worker_idx, 1);
-
- pkt = rte_distributor_get_pkt(d, id, NULL);
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
+ struct worker_params *wp = arg;
+ struct rte_distributor *d = wp->dist;
+ unsigned int count = 0;
+ unsigned int i;
+ unsigned int num = 0;
+ unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
+
+ for (i = 0; i < 8; i++)
+ buf[i] = NULL;
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
while (!quit) {
- worker_stats[id].handled_packets++, count++;
- rte_pktmbuf_free(pkt);
- pkt = rte_distributor_get_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ for (i = 0; i < num; i++)
+ rte_pktmbuf_free(buf[i]);
+ num = rte_distributor_get_pkt(d,
+ id, buf, buf, num);
}
- worker_stats[id].handled_packets++, count++;
- rte_distributor_return_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
@@ -270,12 +324,14 @@ handle_work_with_free_mbufs(void *arg)
* library.
*/
static int
-sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
+sanity_test_with_mbuf_alloc(struct worker_params *wp, struct rte_mempool *p)
{
+ struct rte_distributor *d = wp->dist;
unsigned i;
struct rte_mbuf *bufs[BURST];
- printf("=== Sanity test with mbuf alloc/free ===\n");
+ printf("=== Sanity test with mbuf alloc/free (%s) ===\n", wp->name);
+
clear_packet_count();
for (i = 0; i < ((1<<ITER_POWER)); i += BURST) {
unsigned j;
@@ -290,6 +346,9 @@ sanity_test_with_mbuf_alloc(struct rte_distributor *d, struct rte_mempool *p)
}
rte_distributor_flush(d);
+
+ rte_delay_us(10000);
+
if (total_packet_count() < (1<<ITER_POWER)) {
printf("Line %u: Packet count is incorrect, %u, expected %u\n",
__LINE__, total_packet_count(),
@@ -305,20 +364,32 @@ static int
handle_work_for_shutdown_test(void *arg)
{
struct rte_mbuf *pkt = NULL;
- struct rte_distributor *d = arg;
- unsigned count = 0;
- const unsigned id = __sync_fetch_and_add(&worker_idx, 1);
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
+ struct worker_params *wp = arg;
+ struct rte_distributor *d = wp->dist;
+ unsigned int count = 0;
+ unsigned int num = 0;
+ unsigned int total = 0;
+ unsigned int i;
+ unsigned int returned = 0;
+ const unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
+
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
- pkt = rte_distributor_get_pkt(d, id, NULL);
/* wait for quit single globally, or for worker zero, wait
* for zero_quit */
while (!quit && !(id == 0 && zero_quit)) {
- worker_stats[id].handled_packets++, count++;
- rte_pktmbuf_free(pkt);
- pkt = rte_distributor_get_pkt(d, id, NULL);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ for (i = 0; i < num; i++)
+ rte_pktmbuf_free(buf[i]);
+ num = rte_distributor_get_pkt(d,
+ id, buf, buf, num);
+ total += num;
}
- worker_stats[id].handled_packets++, count++;
- rte_distributor_return_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ returned = rte_distributor_return_pkt(d, id, buf, num);
if (id == 0) {
/* for worker zero, allow it to restart to pick up last packet
@@ -326,13 +397,18 @@ handle_work_for_shutdown_test(void *arg)
*/
while (zero_quit)
usleep(100);
- pkt = rte_distributor_get_pkt(d, id, NULL);
+
+ num = rte_distributor_get_pkt(d,
+ id, buf, buf, num);
+
while (!quit) {
worker_stats[id].handled_packets++, count++;
rte_pktmbuf_free(pkt);
- pkt = rte_distributor_get_pkt(d, id, NULL);
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
}
- rte_distributor_return_pkt(d, id, pkt);
+ returned = rte_distributor_return_pkt(d,
+ id, buf, num);
+ printf("Num returned = %d\n", returned);
}
return 0;
}
@@ -344,26 +420,32 @@ handle_work_for_shutdown_test(void *arg)
* library.
*/
static int
-sanity_test_with_worker_shutdown(struct rte_distributor *d,
+sanity_test_with_worker_shutdown(struct worker_params *wp,
struct rte_mempool *p)
{
+ struct rte_distributor *d = wp->dist;
struct rte_mbuf *bufs[BURST];
unsigned i;
printf("=== Sanity test of worker shutdown ===\n");
clear_packet_count();
+
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
printf("line %d: Error getting mbufs from pool\n", __LINE__);
return -1;
}
- /* now set all hash values in all buffers to zero, so all pkts go to the
- * one worker thread */
+ /*
+ * Now set all hash values in all buffers to same value so all
+ * pkts go to the one worker thread
+ */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.usr = 0;
+ bufs[i]->hash.usr = 1;
rte_distributor_process(d, bufs, BURST);
+ rte_distributor_flush(d);
+
/* at this point, we will have processed some packets and have a full
* backlog for the other ones at worker 0.
*/
@@ -374,7 +456,7 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
return -1;
}
for (i = 0; i < BURST; i++)
- bufs[i]->hash.usr = 0;
+ bufs[i]->hash.usr = 1;
/* get worker zero to quit */
zero_quit = 1;
@@ -382,6 +464,12 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
/* flush the distributor */
rte_distributor_flush(d);
+ rte_delay_us(10000);
+
+ for (i = 0; i < rte_lcore_count() - 1; i++)
+ printf("Worker %u handled %u packets\n", i,
+ worker_stats[i].handled_packets);
+
if (total_packet_count() != BURST * 2) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
@@ -389,10 +477,6 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
return -1;
}
- for (i = 0; i < rte_lcore_count() - 1; i++)
- printf("Worker %u handled %u packets\n", i,
- worker_stats[i].handled_packets);
-
printf("Sanity test with worker shutdown passed\n\n");
return 0;
}
@@ -401,13 +485,14 @@ sanity_test_with_worker_shutdown(struct rte_distributor *d,
* one worker shuts down..
*/
static int
-test_flush_with_worker_shutdown(struct rte_distributor *d,
+test_flush_with_worker_shutdown(struct worker_params *wp,
struct rte_mempool *p)
{
+ struct rte_distributor *d = wp->dist;
struct rte_mbuf *bufs[BURST];
unsigned i;
- printf("=== Test flush fn with worker shutdown ===\n");
+ printf("=== Test flush fn with worker shutdown (%s) ===\n", wp->name);
clear_packet_count();
if (rte_mempool_get_bulk(p, (void *)bufs, BURST) != 0) {
@@ -431,7 +516,13 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,
/* flush the distributor */
rte_distributor_flush(d);
+ rte_delay_us(10000);
+
zero_quit = 0;
+ for (i = 0; i < rte_lcore_count() - 1; i++)
+ printf("Worker %u handled %u packets\n", i,
+ worker_stats[i].handled_packets);
+
if (total_packet_count() != BURST) {
printf("Line %d: Error, not all packets flushed. "
"Expected %u, got %u\n",
@@ -439,10 +530,6 @@ test_flush_with_worker_shutdown(struct rte_distributor *d,
return -1;
}
- for (i = 0; i < rte_lcore_count() - 1; i++)
- printf("Worker %u handled %u packets\n", i,
- worker_stats[i].handled_packets);
-
printf("Flush test with worker shutdown passed\n\n");
return 0;
}
@@ -451,15 +538,25 @@ static
int test_error_distributor_create_name(void)
{
struct rte_distributor *d = NULL;
+ struct rte_distributor *db = NULL;
char *name = NULL;
d = rte_distributor_create(name, rte_socket_id(),
- rte_lcore_count() - 1);
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_SINGLE);
if (d != NULL || rte_errno != EINVAL) {
printf("ERROR: No error on create() with NULL name param\n");
return -1;
}
+ db = rte_distributor_create(name, rte_socket_id(),
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_BURST);
+ if (db != NULL || rte_errno != EINVAL) {
+ printf("ERROR: No error on create() with NULL param\n");
+ return -1;
+ }
+
return 0;
}
@@ -467,21 +564,34 @@ int test_error_distributor_create_name(void)
static
int test_error_distributor_create_numworkers(void)
{
- struct rte_distributor *d = NULL;
- d = rte_distributor_create("test_numworkers", rte_socket_id(),
- RTE_MAX_LCORE + 10);
- if (d != NULL || rte_errno != EINVAL) {
+ struct rte_distributor *ds = NULL;
+ struct rte_distributor *db = NULL;
+
+ ds = rte_distributor_create("test_numworkers", rte_socket_id(),
+ RTE_MAX_LCORE + 10,
+ RTE_DIST_ALG_SINGLE);
+ if (ds != NULL || rte_errno != EINVAL) {
printf("ERROR: No error on create() with num_workers > MAX\n");
return -1;
}
+
+ db = rte_distributor_create("test_numworkers", rte_socket_id(),
+ RTE_MAX_LCORE + 10,
+ RTE_DIST_ALG_BURST);
+ if (db != NULL || rte_errno != EINVAL) {
+ printf("ERROR: No error on create() num_workers > MAX\n");
+ return -1;
+ }
+
return 0;
}
/* Useful function which ensures that all worker functions terminate */
static void
-quit_workers(struct rte_distributor *d, struct rte_mempool *p)
+quit_workers(struct worker_params *wp, struct rte_mempool *p)
{
+ struct rte_distributor *d = wp->dist;
const unsigned num_workers = rte_lcore_count() - 1;
unsigned i;
struct rte_mbuf *bufs[RTE_MAX_LCORE];
@@ -505,24 +615,42 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
static int
test_distributor(void)
{
- static struct rte_distributor *d;
+ static struct rte_distributor *ds;
+ static struct rte_distributor *db;
+ static struct rte_distributor *dist[2];
static struct rte_mempool *p;
+ int i;
if (rte_lcore_count() < 2) {
printf("ERROR: not enough cores to test distributor\n");
return -1;
}
- if (d == NULL) {
- d = rte_distributor_create("Test_distributor", rte_socket_id(),
- rte_lcore_count() - 1);
- if (d == NULL) {
- printf("Error creating distributor\n");
+ if (db == NULL) {
+ db = rte_distributor_create("Test_dist_burst", rte_socket_id(),
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_BURST);
+ if (db == NULL) {
+ printf("Error creating burst distributor\n");
return -1;
}
} else {
- rte_distributor_flush(d);
- rte_distributor_clear_returns(d);
+ rte_distributor_flush(db);
+ rte_distributor_clear_returns(db);
+ }
+
+ if (ds == NULL) {
+ ds = rte_distributor_create("Test_dist_single",
+ rte_socket_id(),
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_SINGLE);
+ if (ds == NULL) {
+ printf("Error creating single distributor\n");
+ return -1;
+ }
+ } else {
+ rte_distributor_flush(ds);
+ rte_distributor_clear_returns(ds);
}
const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
@@ -536,31 +664,50 @@ test_distributor(void)
}
}
- rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
- if (sanity_test(d, p) < 0)
- goto err;
- quit_workers(d, p);
+ dist[0] = ds;
+ dist[1] = db;
+
+ for (i = 0; i < 2; i++) {
- rte_eal_mp_remote_launch(handle_work_with_free_mbufs, d, SKIP_MASTER);
- if (sanity_test_with_mbuf_alloc(d, p) < 0)
- goto err;
- quit_workers(d, p);
+ worker_params.dist = dist[i];
+ if (i)
+ sprintf(worker_params.name, "burst");
+ else
+ sprintf(worker_params.name, "single");
- if (rte_lcore_count() > 2) {
- rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
- SKIP_MASTER);
- if (sanity_test_with_worker_shutdown(d, p) < 0)
+ rte_eal_mp_remote_launch(handle_work,
+ &worker_params, SKIP_MASTER);
+ if (sanity_test(&worker_params, p) < 0)
goto err;
- quit_workers(d, p);
+ quit_workers(&worker_params, p);
- rte_eal_mp_remote_launch(handle_work_for_shutdown_test, d,
- SKIP_MASTER);
- if (test_flush_with_worker_shutdown(d, p) < 0)
+ rte_eal_mp_remote_launch(handle_work_with_free_mbufs,
+ &worker_params, SKIP_MASTER);
+ if (sanity_test_with_mbuf_alloc(&worker_params, p) < 0)
goto err;
- quit_workers(d, p);
+ quit_workers(&worker_params, p);
+
+ if (rte_lcore_count() > 2) {
+ rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
+ &worker_params,
+ SKIP_MASTER);
+ if (sanity_test_with_worker_shutdown(&worker_params,
+ p) < 0)
+ goto err;
+ quit_workers(&worker_params, p);
+
+ rte_eal_mp_remote_launch(handle_work_for_shutdown_test,
+ &worker_params,
+ SKIP_MASTER);
+ if (test_flush_with_worker_shutdown(&worker_params,
+ p) < 0)
+ goto err;
+ quit_workers(&worker_params, p);
+
+ } else {
+ printf("Too few cores to run worker shutdown test\n");
+ }
- } else {
- printf("Not enough cores to run tests for worker shutdown\n");
}
if (test_error_distributor_create_numworkers() == -1 ||
@@ -572,7 +719,7 @@ test_distributor(void)
return 0;
err:
- quit_workers(d, p);
+ quit_workers(&worker_params, p);
return -1;
}
diff --git a/app/test/test_distributor_perf.c b/test/test/test_distributor_perf.c
index 7947fe9b..732d86d0 100644
--- a/app/test/test_distributor_perf.c
+++ b/test/test/test_distributor_perf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -41,8 +41,9 @@
#include <rte_mbuf.h>
#include <rte_distributor.h>
-#define ITER_POWER 20 /* log 2 of how many iterations we do when timing. */
-#define BURST 32
+#define ITER_POWER_CL 25 /* log 2 of how many iterations for Cache Line test */
+#define ITER_POWER 21 /* log 2 of how many iterations we do when timing. */
+#define BURST 64
#define BIG_BATCH 1024
/* static vars - zero initialized by default */
@@ -54,7 +55,8 @@ struct worker_stats {
} __rte_cache_aligned;
struct worker_stats worker_stats[RTE_MAX_LCORE];
-/* worker thread used for testing the time to do a round-trip of a cache
+/*
+ * worker thread used for testing the time to do a round-trip of a cache
* line between two cores and back again
*/
static void
@@ -69,7 +71,8 @@ flip_bit(volatile uint64_t *arg)
}
}
-/* test case to time the number of cycles to round-trip a cache line between
+/*
+ * test case to time the number of cycles to round-trip a cache line between
* two cores and back again.
*/
static void
@@ -86,7 +89,7 @@ time_cache_line_switch(void)
rte_pause();
const uint64_t start_time = rte_rdtsc();
- for (i = 0; i < (1 << ITER_POWER); i++) {
+ for (i = 0; i < (1 << ITER_POWER_CL); i++) {
while (*pdata)
rte_pause();
*pdata = 1;
@@ -98,13 +101,14 @@ time_cache_line_switch(void)
*pdata = 2;
rte_eal_wait_lcore(slaveid);
printf("==== Cache line switch test ===\n");
- printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER),
+ printf("Time for %u iterations = %"PRIu64" ticks\n", (1<<ITER_POWER_CL),
end_time-start_time);
printf("Ticks per iteration = %"PRIu64"\n\n",
- (end_time-start_time) >> ITER_POWER);
+ (end_time-start_time) >> ITER_POWER_CL);
}
-/* returns the total count of the number of packets handled by the worker
+/*
+ * returns the total count of the number of packets handled by the worker
* functions given below.
*/
static unsigned
@@ -123,35 +127,44 @@ clear_packet_count(void)
memset(&worker_stats, 0, sizeof(worker_stats));
}
-/* this is the basic worker function for performance tests.
+/*
+ * This is the basic worker function for performance tests.
* it does nothing but return packets and count them.
*/
static int
handle_work(void *arg)
{
- struct rte_mbuf *pkt = NULL;
struct rte_distributor *d = arg;
- unsigned count = 0;
- unsigned id = __sync_fetch_and_add(&worker_idx, 1);
+ unsigned int count = 0;
+ unsigned int num = 0;
+ int i;
+ unsigned int id = __sync_fetch_and_add(&worker_idx, 1);
+ struct rte_mbuf *buf[8] __rte_cache_aligned;
- pkt = rte_distributor_get_pkt(d, id, NULL);
+ for (i = 0; i < 8; i++)
+ buf[i] = NULL;
+
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
while (!quit) {
- worker_stats[id].handled_packets++, count++;
- pkt = rte_distributor_get_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ num = rte_distributor_get_pkt(d, id, buf, buf, num);
}
- worker_stats[id].handled_packets++, count++;
- rte_distributor_return_pkt(d, id, pkt);
+ worker_stats[id].handled_packets += num;
+ count += num;
+ rte_distributor_return_pkt(d, id, buf, num);
return 0;
}
-/* this basic performance test just repeatedly sends in 32 packets at a time
+/*
+ * This basic performance test just repeatedly sends in 32 packets at a time
* to the distributor and verifies at the end that we got them all in the worker
* threads and finally how long per packet the processing took.
*/
static inline int
perf_test(struct rte_distributor *d, struct rte_mempool *p)
{
- unsigned i;
+ unsigned int i;
uint64_t start, end;
struct rte_mbuf *bufs[BURST];
@@ -174,7 +187,8 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)
rte_distributor_process(d, NULL, 0);
} while (total_packet_count() < (BURST << ITER_POWER));
- printf("=== Performance test of distributor ===\n");
+ rte_distributor_clear_returns(d);
+
printf("Time per burst: %"PRIu64"\n", (end - start) >> ITER_POWER);
printf("Time per packet: %"PRIu64"\n\n",
((end - start) >> ITER_POWER)/BURST);
@@ -194,9 +208,10 @@ perf_test(struct rte_distributor *d, struct rte_mempool *p)
static void
quit_workers(struct rte_distributor *d, struct rte_mempool *p)
{
- const unsigned num_workers = rte_lcore_count() - 1;
- unsigned i;
+ const unsigned int num_workers = rte_lcore_count() - 1;
+ unsigned int i;
struct rte_mbuf *bufs[RTE_MAX_LCORE];
+
rte_mempool_get_bulk(p, (void *)bufs, num_workers);
quit = 1;
@@ -215,7 +230,8 @@ quit_workers(struct rte_distributor *d, struct rte_mempool *p)
static int
test_distributor_perf(void)
{
- static struct rte_distributor *d;
+ static struct rte_distributor *ds;
+ static struct rte_distributor *db;
static struct rte_mempool *p;
if (rte_lcore_count() < 2) {
@@ -226,16 +242,28 @@ test_distributor_perf(void)
/* first time how long it takes to round-trip a cache line */
time_cache_line_switch();
- if (d == NULL) {
- d = rte_distributor_create("Test_perf", rte_socket_id(),
- rte_lcore_count() - 1);
- if (d == NULL) {
+ if (ds == NULL) {
+ ds = rte_distributor_create("Test_perf", rte_socket_id(),
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_SINGLE);
+ if (ds == NULL) {
printf("Error creating distributor\n");
return -1;
}
} else {
- rte_distributor_flush(d);
- rte_distributor_clear_returns(d);
+ rte_distributor_clear_returns(ds);
+ }
+
+ if (db == NULL) {
+ db = rte_distributor_create("Test_burst", rte_socket_id(),
+ rte_lcore_count() - 1,
+ RTE_DIST_ALG_BURST);
+ if (db == NULL) {
+ printf("Error creating burst distributor\n");
+ return -1;
+ }
+ } else {
+ rte_distributor_clear_returns(db);
}
const unsigned nb_bufs = (511 * rte_lcore_count()) < BIG_BATCH ?
@@ -249,10 +277,17 @@ test_distributor_perf(void)
}
}
- rte_eal_mp_remote_launch(handle_work, d, SKIP_MASTER);
- if (perf_test(d, p) < 0)
+ printf("=== Performance test of distributor (single mode) ===\n");
+ rte_eal_mp_remote_launch(handle_work, ds, SKIP_MASTER);
+ if (perf_test(ds, p) < 0)
+ return -1;
+ quit_workers(ds, p);
+
+ printf("=== Performance test of distributor (burst mode) ===\n");
+ rte_eal_mp_remote_launch(handle_work, db, SKIP_MASTER);
+ if (perf_test(db, p) < 0)
return -1;
- quit_workers(d, p);
+ quit_workers(db, p);
return 0;
}
diff --git a/app/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 91b40664..91b40664 100644
--- a/app/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
diff --git a/app/test/test_eal_fs.c b/test/test/test_eal_fs.c
index 78978120..78978120 100644
--- a/app/test/test_eal_fs.c
+++ b/test/test/test_eal_fs.c
diff --git a/test/test/test_efd.c b/test/test/test_efd.c
new file mode 100644
index 00000000..54430616
--- /dev/null
+++ b/test/test/test_efd.c
@@ -0,0 +1,500 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memcpy.h>
+#include <rte_malloc.h>
+#include <rte_efd.h>
+#include <rte_byteorder.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ip.h>
+
+#include "test.h"
+
+#define EFD_TEST_KEY_LEN 8
+#define TABLE_SIZE (1 << 21)
+#define ITERATIONS 3
+
+#if RTE_EFD_VALUE_NUM_BITS == 32
+#define VALUE_BITMASK 0xffffffff
+#else
+#define VALUE_BITMASK ((1 << RTE_EFD_VALUE_NUM_BITS) - 1)
+#endif
+static unsigned int test_socket_id;
+
+/* 5-tuple key type */
+struct flow_key {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint8_t proto;
+} __attribute__((packed));
+/*
+ * Print out result of unit test efd operation.
+ */
+#if defined(UNIT_TEST_EFD_VERBOSE)
+
+static void print_key_info(const char *msg, const struct flow_key *key,
+ efd_value_t val)
+{
+ const uint8_t *p = (const uint8_t *) key;
+ unsigned int i;
+
+ printf("%s key:0x", msg);
+ for (i = 0; i < sizeof(struct flow_key); i++)
+ printf("%02X", p[i]);
+
+ printf(" @ val %d\n", val);
+}
+#else
+
+static void print_key_info(__attribute__((unused)) const char *msg,
+ __attribute__((unused)) const struct flow_key *key,
+ __attribute__((unused)) efd_value_t val)
+{
+}
+#endif
+
+/* Keys used by unit test functions */
+static struct flow_key keys[5] = {
+ {
+ .ip_src = IPv4(0x03, 0x02, 0x01, 0x00),
+ .ip_dst = IPv4(0x07, 0x06, 0x05, 0x04),
+ .port_src = 0x0908,
+ .port_dst = 0x0b0a,
+ .proto = 0x0c,
+ },
+ {
+ .ip_src = IPv4(0x13, 0x12, 0x11, 0x10),
+ .ip_dst = IPv4(0x17, 0x16, 0x15, 0x14),
+ .port_src = 0x1918,
+ .port_dst = 0x1b1a,
+ .proto = 0x1c,
+ },
+ {
+ .ip_src = IPv4(0x23, 0x22, 0x21, 0x20),
+ .ip_dst = IPv4(0x27, 0x26, 0x25, 0x24),
+ .port_src = 0x2928,
+ .port_dst = 0x2b2a,
+ .proto = 0x2c,
+ },
+ {
+ .ip_src = IPv4(0x33, 0x32, 0x31, 0x30),
+ .ip_dst = IPv4(0x37, 0x36, 0x35, 0x34),
+ .port_src = 0x3938,
+ .port_dst = 0x3b3a,
+ .proto = 0x3c,
+ },
+ {
+ .ip_src = IPv4(0x43, 0x42, 0x41, 0x40),
+ .ip_dst = IPv4(0x47, 0x46, 0x45, 0x44),
+ .port_src = 0x4948,
+ .port_dst = 0x4b4a,
+ .proto = 0x4c,
+ }
+};
+/* Array to store the data */
+efd_value_t data[5];
+
+static inline uint8_t efd_get_all_sockets_bitmask(void)
+{
+ uint8_t all_cpu_sockets_bitmask = 0;
+ unsigned int i;
+ unsigned int next_lcore = rte_get_master_lcore();
+ const int val_true = 1, val_false = 0;
+ for (i = 0; i < rte_lcore_count(); i++) {
+ all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
+ next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true);
+ }
+
+ return all_cpu_sockets_bitmask;
+}
+
+/*
+ * Basic sequence of operations for a single key:
+ * - add
+ * - lookup (hit)
+ * - delete
+ * Note: lookup (miss) is not applicable since this is a filter
+ */
+static int test_add_delete(void)
+{
+ struct rte_efd_table *handle;
+ /* test with standard add/lookup/delete functions */
+ efd_value_t prev_value;
+ printf("Entering %s\n", __func__);
+
+ handle = rte_efd_create("test_add_delete",
+ TABLE_SIZE, sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ TEST_ASSERT_NOT_NULL(handle, "Error creating the EFD table\n");
+
+ data[0] = mrand48() & VALUE_BITMASK;
+ TEST_ASSERT_SUCCESS(rte_efd_update(handle, test_socket_id, &keys[0],
+ data[0]),
+ "Error inserting the key");
+ print_key_info("Add", &keys[0], data[0]);
+
+ TEST_ASSERT_EQUAL(rte_efd_lookup(handle, test_socket_id, &keys[0]),
+ data[0],
+ "failed to find key");
+
+ TEST_ASSERT_SUCCESS(rte_efd_delete(handle, test_socket_id, &keys[0],
+ &prev_value),
+ "failed to delete key");
+ TEST_ASSERT_EQUAL(prev_value, data[0],
+ "failed to delete the expected value, got %d, "
+ "expected %d", prev_value, data[0]);
+ print_key_info("Del", &keys[0], data[0]);
+
+ rte_efd_free(handle);
+
+ return 0;
+}
+
+/*
+ * Sequence of operations for a single key:
+ * - add
+ * - lookup: hit
+ * - add: update
+ * - lookup: hit (updated data)
+ * - delete: hit
+ */
+static int test_add_update_delete(void)
+{
+ struct rte_efd_table *handle;
+ printf("Entering %s\n", __func__);
+ /* test with standard add/lookup/delete functions */
+ efd_value_t prev_value;
+ data[1] = mrand48() & VALUE_BITMASK;
+
+ handle = rte_efd_create("test_add_update_delete", TABLE_SIZE,
+ sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ TEST_ASSERT_NOT_NULL(handle, "Error creating the efd table\n");
+
+ TEST_ASSERT_SUCCESS(rte_efd_update(handle, test_socket_id, &keys[1],
+ data[1]), "Error inserting the key");
+ print_key_info("Add", &keys[1], data[1]);
+
+ TEST_ASSERT_EQUAL(rte_efd_lookup(handle, test_socket_id, &keys[1]),
+ data[1], "failed to find key");
+ print_key_info("Lkp", &keys[1], data[1]);
+
+ data[1] = data[1] + 1;
+ TEST_ASSERT_SUCCESS(rte_efd_update(handle, test_socket_id, &keys[1],
+ data[1]), "Error re-inserting the key");
+ print_key_info("Add", &keys[1], data[1]);
+
+ TEST_ASSERT_EQUAL(rte_efd_lookup(handle, test_socket_id, &keys[1]),
+ data[1], "failed to find key");
+ print_key_info("Lkp", &keys[1], data[1]);
+
+ TEST_ASSERT_SUCCESS(rte_efd_delete(handle, test_socket_id, &keys[1],
+ &prev_value), "failed to delete key");
+ TEST_ASSERT_EQUAL(prev_value, data[1],
+ "failed to delete the expected value, got %d, "
+ "expected %d", prev_value, data[1]);
+ print_key_info("Del", &keys[1], data[1]);
+
+
+ rte_efd_free(handle);
+ return 0;
+}
+
+/*
+ * Sequence of operations for find existing EFD table
+ *
+ * - create table
+ * - find existing table: hit
+ * - find non-existing table: miss
+ *
+ */
+static int test_efd_find_existing(void)
+{
+ struct rte_efd_table *handle = NULL, *result = NULL;
+
+ printf("Entering %s\n", __func__);
+
+ /* Create EFD table. */
+ handle = rte_efd_create("efd_find_existing", TABLE_SIZE,
+ sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ TEST_ASSERT_NOT_NULL(handle, "Error creating the efd table\n");
+
+ /* Try to find existing EFD table */
+ result = rte_efd_find_existing("efd_find_existing");
+ TEST_ASSERT_EQUAL(result, handle, "could not find existing efd table");
+
+ /* Try to find non-existing EFD table */
+ result = rte_efd_find_existing("efd_find_non_existing");
+ TEST_ASSERT_NULL(result, "found table that shouldn't exist");
+
+ /* Cleanup. */
+ rte_efd_free(handle);
+
+ return 0;
+}
+
+/*
+ * Sequence of operations for 5 keys
+ * - add keys
+ * - lookup keys: hit (bulk)
+ * - add keys (update)
+ * - lookup keys: hit (updated data)
+ * - delete keys : hit
+ */
+static int test_five_keys(void)
+{
+ struct rte_efd_table *handle;
+ const void *key_array[5] = {0};
+ efd_value_t result[5] = {0};
+ efd_value_t prev_value;
+ unsigned int i;
+ printf("Entering %s\n", __func__);
+
+ handle = rte_efd_create("test_five_keys", TABLE_SIZE,
+ sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ TEST_ASSERT_NOT_NULL(handle, "Error creating the efd table\n");
+
+ /* Setup data */
+ for (i = 0; i < 5; i++)
+ data[i] = mrand48() & VALUE_BITMASK;
+
+ /* Add */
+ for (i = 0; i < 5; i++) {
+ TEST_ASSERT_SUCCESS(rte_efd_update(handle, test_socket_id,
+ &keys[i], data[i]),
+ "Error inserting the key");
+ print_key_info("Add", &keys[i], data[i]);
+ }
+
+ /* Lookup */
+ for (i = 0; i < 5; i++)
+ key_array[i] = &keys[i];
+
+ rte_efd_lookup_bulk(handle, test_socket_id, 5,
+ (void *) &key_array, result);
+
+ for (i = 0; i < 5; i++) {
+ TEST_ASSERT_EQUAL(result[i], data[i],
+ "bulk: failed to find key. Expected %d, got %d",
+ data[i], result[i]);
+ print_key_info("Lkp", &keys[i], data[i]);
+ }
+
+ /* Modify data (bulk) */
+ for (i = 0; i < 5; i++)
+ data[i] = data[i] + 1;
+
+ /* Add - update */
+ for (i = 0; i < 5; i++) {
+ TEST_ASSERT_SUCCESS(rte_efd_update(handle, test_socket_id,
+ &keys[i], data[i]),
+ "Error inserting the key");
+ print_key_info("Add", &keys[i], data[i]);
+ }
+
+ /* Lookup */
+ for (i = 0; i < 5; i++) {
+ TEST_ASSERT_EQUAL(rte_efd_lookup(handle, test_socket_id,
+ &keys[i]), data[i],
+ "failed to find key");
+ print_key_info("Lkp", &keys[i], data[i]);
+ }
+
+ /* Delete */
+ for (i = 0; i < 5; i++) {
+ TEST_ASSERT_SUCCESS(rte_efd_delete(handle, test_socket_id,
+ &keys[i], &prev_value),
+ "failed to delete key");
+ TEST_ASSERT_EQUAL(prev_value, data[i],
+ "failed to delete the expected value, got %d, "
+ "expected %d", prev_value, data[i]);
+ print_key_info("Del", &keys[i], data[i]);
+ }
+
+
+ rte_efd_free(handle);
+
+ return 0;
+}
+
+/*
+ * Test to see the average table utilization (entries added/max entries)
+ * before hitting a random entry that cannot be added
+ */
+static int test_average_table_utilization(void)
+{
+ struct rte_efd_table *handle = NULL;
+ uint32_t num_rules_in = TABLE_SIZE;
+ uint8_t simple_key[EFD_TEST_KEY_LEN];
+ unsigned int i, j;
+ unsigned int added_keys, average_keys_added = 0;
+
+ printf("Evaluating table utilization and correctness, please wait\n");
+ fflush(stdout);
+
+ for (j = 0; j < ITERATIONS; j++) {
+ handle = rte_efd_create("test_efd", num_rules_in,
+ EFD_TEST_KEY_LEN, efd_get_all_sockets_bitmask(),
+ test_socket_id);
+ if (handle == NULL) {
+ printf("efd table creation failed\n");
+ return -1;
+ }
+
+ unsigned int succeeded = 0;
+ unsigned int lost_keys = 0;
+
+ /* Add random entries until key cannot be added */
+ for (added_keys = 0; added_keys < num_rules_in; added_keys++) {
+
+ for (i = 0; i < EFD_TEST_KEY_LEN; i++)
+ simple_key[i] = rte_rand() & 0xFF;
+
+ efd_value_t val = simple_key[0];
+
+ if (rte_efd_update(handle, test_socket_id, simple_key,
+ val))
+ break; /* continue;*/
+ if (rte_efd_lookup(handle, test_socket_id, simple_key)
+ != val)
+ lost_keys++;
+ else
+ succeeded++;
+ }
+
+ average_keys_added += succeeded;
+
+ /* Reset the table */
+ rte_efd_free(handle);
+
+ /* Print progress on operations */
+ printf("Added %10u Succeeded %10u Lost %10u\n",
+ added_keys, succeeded, lost_keys);
+ fflush(stdout);
+ }
+
+ average_keys_added /= ITERATIONS;
+
+ printf("\nAverage table utilization = %.2f%% (%u/%u)\n",
+ ((double) average_keys_added / num_rules_in * 100),
+ average_keys_added, num_rules_in);
+
+ return 0;
+}
+
+/*
+ * Do tests for EFD creation with bad parameters.
+ */
+static int test_efd_creation_with_bad_parameters(void)
+{
+ struct rte_efd_table *handle, *tmp;
+ printf("Entering %s, **Errors are expected **\n", __func__);
+
+ handle = rte_efd_create("creation_with_bad_parameters_0", TABLE_SIZE, 0,
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ if (handle != NULL) {
+ rte_efd_free(handle);
+ printf("Impossible creating EFD table successfully "
+ "if key_len in parameter is zero\n");
+ return -1;
+ }
+
+ handle = rte_efd_create("creation_with_bad_parameters_1", TABLE_SIZE,
+ sizeof(struct flow_key), 0, test_socket_id);
+ if (handle != NULL) {
+ rte_efd_free(handle);
+ printf("Impossible creating EFD table successfully "
+ "with invalid socket bitmask\n");
+ return -1;
+ }
+
+ handle = rte_efd_create("creation_with_bad_parameters_2", TABLE_SIZE,
+ sizeof(struct flow_key), efd_get_all_sockets_bitmask(),
+ 255);
+ if (handle != NULL) {
+ rte_efd_free(handle);
+ printf("Impossible creating EFD table successfully "
+ "with invalid socket\n");
+ return -1;
+ }
+
+ /* test with same name should fail */
+ handle = rte_efd_create("same_name", TABLE_SIZE,
+ sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), 0);
+ if (handle == NULL) {
+ printf("Cannot create first EFD table with 'same_name'\n");
+ return -1;
+ }
+ tmp = rte_efd_create("same_name", TABLE_SIZE, sizeof(struct flow_key),
+ efd_get_all_sockets_bitmask(), 0);
+ if (tmp != NULL) {
+ printf("Creation of EFD table with same name should fail\n");
+ rte_efd_free(handle);
+ rte_efd_free(tmp);
+ return -1;
+ }
+ rte_efd_free(handle);
+
+ printf("# Test successful. No more errors expected\n");
+
+ return 0;
+}
+
+static int
+test_efd(void)
+{
+
+ /* Unit tests */
+ if (test_add_delete() < 0)
+ return -1;
+ if (test_efd_find_existing() < 0)
+ return -1;
+ if (test_add_update_delete() < 0)
+ return -1;
+ if (test_five_keys() < 0)
+ return -1;
+ if (test_efd_creation_with_bad_parameters() < 0)
+ return -1;
+ if (test_average_table_utilization() < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(efd_autotest, test_efd);
diff --git a/test/test/test_efd_perf.c b/test/test/test_efd_perf.c
new file mode 100644
index 00000000..2b8a8eac
--- /dev/null
+++ b/test/test/test_efd_perf.c
@@ -0,0 +1,414 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_efd.h>
+#include <rte_memcpy.h>
+#include <rte_thash.h>
+
+#include "test.h"
+
+#define NUM_KEYSIZES 10
+#define NUM_SHUFFLES 10
+#define MAX_KEYSIZE 64
+#define MAX_ENTRIES (1 << 19)
+#define KEYS_TO_ADD (MAX_ENTRIES * 3 / 4) /* 75% table utilization */
+#define NUM_LOOKUPS (KEYS_TO_ADD * 5) /* Loop among keys added, several times */
+
+#if RTE_EFD_VALUE_NUM_BITS == 32
+#define VALUE_BITMASK 0xffffffff
+#else
+#define VALUE_BITMASK ((1 << RTE_EFD_VALUE_NUM_BITS) - 1)
+#endif
+static unsigned int test_socket_id;
+
+static inline uint8_t efd_get_all_sockets_bitmask(void)
+{
+ uint8_t all_cpu_sockets_bitmask = 0;
+ unsigned int i;
+ unsigned int next_lcore = rte_get_master_lcore();
+ const int val_true = 1, val_false = 0;
+ for (i = 0; i < rte_lcore_count(); i++) {
+ all_cpu_sockets_bitmask |= 1 << rte_lcore_to_socket_id(next_lcore);
+ next_lcore = rte_get_next_lcore(next_lcore, val_false, val_true);
+ }
+
+ return all_cpu_sockets_bitmask;
+}
+
+enum operations {
+ ADD = 0,
+ LOOKUP,
+ LOOKUP_MULTI,
+ DELETE,
+ NUM_OPERATIONS
+};
+
+struct efd_perf_params {
+ struct rte_efd_table *efd_table;
+ uint32_t key_size;
+ unsigned int cycle;
+};
+
+static uint32_t hashtest_key_lens[] = {
+ /* standard key sizes */
+ 4, 8, 16, 32, 48, 64,
+ /* IPv4 SRC + DST + protocol, unpadded */
+ 9,
+ /* IPv4 5-tuple, unpadded */
+ 13,
+ /* IPv6 5-tuple, unpadded */
+ 37,
+ /* IPv6 5-tuple, padded to 8-byte boundary */
+ 40
+};
+
+/* Array to store number of cycles per operation */
+uint64_t cycles[NUM_KEYSIZES][NUM_OPERATIONS];
+
+/* Array to store the data */
+efd_value_t data[KEYS_TO_ADD];
+
+/* Array to store all input keys */
+uint8_t keys[KEYS_TO_ADD][MAX_KEYSIZE];
+
+/* Shuffle the keys that have been added, so lookups will be totally random */
+static void
+shuffle_input_keys(struct efd_perf_params *params)
+{
+ efd_value_t temp_data;
+ unsigned int i;
+ uint32_t swap_idx;
+ uint8_t temp_key[MAX_KEYSIZE];
+
+ for (i = KEYS_TO_ADD - 1; i > 0; i--) {
+ swap_idx = rte_rand() % i;
+
+ memcpy(temp_key, keys[i], hashtest_key_lens[params->cycle]);
+ temp_data = data[i];
+
+ memcpy(keys[i], keys[swap_idx], hashtest_key_lens[params->cycle]);
+ data[i] = data[swap_idx];
+
+ memcpy(keys[swap_idx], temp_key, hashtest_key_lens[params->cycle]);
+ data[swap_idx] = temp_data;
+ }
+}
+
+static int key_compare(const void *key1, const void *key2)
+{
+ return memcmp(key1, key2, MAX_KEYSIZE);
+}
+
+/*
+ * TODO: we could "error proof" these as done in test_hash_perf.c ln 165:
+ *
+ * The current setup may give errors if too full in some cases which we check
+ * for. However, since EFD allows for ~99% capacity, these errors are rare for
+ * #"KEYS_TO_ADD" which is 75% capacity.
+ */
+static int
+setup_keys_and_data(struct efd_perf_params *params, unsigned int cycle)
+{
+ unsigned int i, j;
+ int num_duplicates;
+
+ params->key_size = hashtest_key_lens[cycle];
+ params->cycle = cycle;
+
+ /* Reset all arrays */
+ for (i = 0; i < params->key_size; i++)
+ keys[0][i] = 0;
+
+ /* Generate a list of keys, some of which may be duplicates */
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ for (j = 0; j < params->key_size; j++)
+ keys[i][j] = rte_rand() & 0xFF;
+
+ data[i] = rte_rand() & VALUE_BITMASK;
+ }
+
+ /* Remove duplicates from the keys array */
+ do {
+ num_duplicates = 0;
+
+ /* Sort the list of keys to make it easier to find duplicates */
+ qsort(keys, KEYS_TO_ADD, MAX_KEYSIZE, key_compare);
+
+ /* Sift through the list of keys and look for duplicates */
+ int num_duplicates = 0;
+ for (i = 0; i < KEYS_TO_ADD - 1; i++) {
+ if (memcmp(keys[i], keys[i + 1], params->key_size) == 0) {
+ /* This key already exists, try again */
+ num_duplicates++;
+ for (j = 0; j < params->key_size; j++)
+ keys[i][j] = rte_rand() & 0xFF;
+ }
+ }
+ } while (num_duplicates != 0);
+
+ /* Shuffle the random values again */
+ shuffle_input_keys(params);
+
+ params->efd_table = rte_efd_create("test_efd_perf",
+ MAX_ENTRIES, params->key_size,
+ efd_get_all_sockets_bitmask(), test_socket_id);
+ TEST_ASSERT_NOT_NULL(params->efd_table, "Error creating the efd table\n");
+
+ return 0;
+}
+
+static int
+timed_adds(struct efd_perf_params *params)
+{
+ const uint64_t start_tsc = rte_rdtsc();
+ unsigned int i, a;
+ int32_t ret;
+
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ ret = rte_efd_update(params->efd_table, test_socket_id, keys[i],
+ data[i]);
+ if (ret != 0) {
+ printf("Error %d in rte_efd_update - key=0x", ret);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x", keys[i][a]);
+ printf(" value=%d\n", data[i]);
+
+ return -1;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[params->cycle][ADD] = time_taken / KEYS_TO_ADD;
+ return 0;
+}
+
+static int
+timed_lookups(struct efd_perf_params *params)
+{
+ unsigned int i, j, a;
+ const uint64_t start_tsc = rte_rdtsc();
+ efd_value_t ret_data;
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD; j++) {
+ ret_data = rte_efd_lookup(params->efd_table,
+ test_socket_id, keys[j]);
+ if (ret_data != data[j]) {
+ printf("Value mismatch using rte_efd_lookup: "
+ "key #%d (0x", i);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x", keys[i][a]);
+ printf(")\n");
+ printf(" Expected %d, got %d\n", data[i],
+ ret_data);
+
+ return -1;
+ }
+
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[params->cycle][LOOKUP] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_lookups_multi(struct efd_perf_params *params)
+{
+ unsigned int i, j, k, a;
+ efd_value_t result[RTE_EFD_BURST_MAX] = {0};
+ const void *keys_burst[RTE_EFD_BURST_MAX];
+ const uint64_t start_tsc = rte_rdtsc();
+
+ for (i = 0; i < NUM_LOOKUPS / KEYS_TO_ADD; i++) {
+ for (j = 0; j < KEYS_TO_ADD / RTE_EFD_BURST_MAX; j++) {
+ for (k = 0; k < RTE_EFD_BURST_MAX; k++)
+ keys_burst[k] = keys[j * RTE_EFD_BURST_MAX + k];
+
+ rte_efd_lookup_bulk(params->efd_table, test_socket_id,
+ RTE_EFD_BURST_MAX,
+ keys_burst, result);
+
+ for (k = 0; k < RTE_EFD_BURST_MAX; k++) {
+ uint32_t data_idx = j * RTE_EFD_BURST_MAX + k;
+ if (result[k] != data[data_idx]) {
+ printf("Value mismatch using "
+ "rte_efd_lookup_bulk: key #%d "
+ "(0x", i);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x",
+ keys[data_idx][a]);
+ printf(")\n");
+ printf(" Expected %d, got %d\n",
+ data[data_idx], result[k]);
+
+ return -1;
+ }
+ }
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[params->cycle][LOOKUP_MULTI] = time_taken / NUM_LOOKUPS;
+
+ return 0;
+}
+
+static int
+timed_deletes(struct efd_perf_params *params)
+{
+ unsigned int i, a;
+ const uint64_t start_tsc = rte_rdtsc();
+ int32_t ret;
+
+ for (i = 0; i < KEYS_TO_ADD; i++) {
+ ret = rte_efd_delete(params->efd_table, test_socket_id, keys[i],
+ NULL);
+
+ if (ret != 0) {
+ printf("Error %d in rte_efd_delete - key=0x", ret);
+ for (a = 0; a < params->key_size; a++)
+ printf("%02x", keys[i][a]);
+ printf("\n");
+
+ return -1;
+ }
+ }
+
+ const uint64_t end_tsc = rte_rdtsc();
+ const uint64_t time_taken = end_tsc - start_tsc;
+
+ cycles[params->cycle][DELETE] = time_taken / KEYS_TO_ADD;
+
+ return 0;
+}
+
+static void
+perform_frees(struct efd_perf_params *params)
+{
+ if (params->efd_table != NULL) {
+ rte_efd_free(params->efd_table);
+ params->efd_table = NULL;
+ }
+}
+
+static int
+exit_with_fail(const char *testname, struct efd_perf_params *params,
+ unsigned int i)
+{
+
+ printf("<<<<<Test %s failed at keysize %d iteration %d >>>>>\n",
+ testname, hashtest_key_lens[params->cycle], i);
+ perform_frees(params);
+ return -1;
+}
+
+static int
+run_all_tbl_perf_tests(void)
+{
+ unsigned int i, j;
+ struct efd_perf_params params;
+
+ printf("Measuring performance, please wait\n");
+ fflush(stdout);
+
+ test_socket_id = rte_socket_id();
+
+ for (i = 0; i < NUM_KEYSIZES; i++) {
+
+ if (setup_keys_and_data(&params, i) < 0) {
+ printf("Could not create keys/data/table\n");
+ return -1;
+ }
+
+ if (timed_adds(&params) < 0)
+ return exit_with_fail("timed_adds", &params, i);
+
+ for (j = 0; j < NUM_SHUFFLES; j++)
+ shuffle_input_keys(&params);
+
+ if (timed_lookups(&params) < 0)
+ return exit_with_fail("timed_lookups", &params, i);
+
+ if (timed_lookups_multi(&params) < 0)
+ return exit_with_fail("timed_lookups_multi", &params, i);
+
+ if (timed_deletes(&params) < 0)
+ return exit_with_fail("timed_deletes", &params, i);
+
+ /* Print a dot to show progress on operations */
+ printf(".");
+ fflush(stdout);
+
+ perform_frees(&params);
+ }
+
+ printf("\nResults (in CPU cycles/operation)\n");
+ printf("-----------------------------------\n");
+ printf("\n%-18s%-18s%-18s%-18s%-18s\n",
+ "Keysize", "Add", "Lookup", "Lookup_bulk", "Delete");
+ for (i = 0; i < NUM_KEYSIZES; i++) {
+ printf("%-18d", hashtest_key_lens[i]);
+ for (j = 0; j < NUM_OPERATIONS; j++)
+ printf("%-18"PRIu64, cycles[i][j]);
+ printf("\n");
+ }
+ return 0;
+}
+
+static int
+test_efd_perf(void)
+{
+
+ if (run_all_tbl_perf_tests() < 0)
+ return -1;
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(efd_perf_autotest, test_efd_perf);
diff --git a/app/test/test_errno.c b/test/test/test_errno.c
index 388decbb..388decbb 100644
--- a/app/test/test_errno.c
+++ b/test/test/test_errno.c
diff --git a/test/test/test_eventdev.c b/test/test/test_eventdev.c
new file mode 100644
index 00000000..88b80a92
--- /dev/null
+++ b/test/test/test_eventdev.c
@@ -0,0 +1,787 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_eventdev.h>
+#include <rte_dev.h>
+
+#include "test.h"
+
+#define TEST_DEV_ID 0
+
+static int
+testsuite_setup(void)
+{
+ RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
+ uint8_t count;
+ count = rte_event_dev_count();
+ if (!count) {
+ printf("Failed to find a valid event device,"
+ " testing with event_skeleton device\n");
+ return rte_vdev_init("event_skeleton", NULL);
+ }
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+}
+
+static int
+test_eventdev_count(void)
+{
+ uint8_t count;
+ count = rte_event_dev_count();
+ TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_get_dev_id(void)
+{
+ int ret;
+ ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
+ TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_socket_id(void)
+{
+ int socket_id;
+ socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
+ TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
+ socket_id);
+ socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
+ TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_info_get(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ TEST_ASSERT(info.max_event_ports > 0,
+ "Not enough event ports %d", info.max_event_ports);
+ TEST_ASSERT(info.max_event_queues > 0,
+ "Not enough event queues %d", info.max_event_queues);
+ return TEST_SUCCESS;
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info,
+ void (*fn)(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info))
+{
+ devconf_set_default_sane_values(dev_conf, info);
+ fn(dev_conf, info);
+ return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
+}
+
+static void
+min_dequeue_limit(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns - 1;
+}
+
+static void
+max_dequeue_limit(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
+}
+
+static void
+max_events_limit(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_events_limit = info->max_num_events + 1;
+}
+
+static void
+max_event_ports(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_event_ports = info->max_event_ports + 1;
+}
+
+static void
+max_event_queues(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_event_queues = info->max_event_queues + 1;
+}
+
+static void
+max_event_queue_flows(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
+}
+
+static void
+max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth + 1;
+}
+
+static void
+max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth + 1;
+}
+
+
+static int
+test_eventdev_configure(void)
+{
+ int ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ /* Check limits */
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, min_dequeue_limit),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, max_events_limit),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, max_event_ports),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, max_event_queues),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info,
+ max_event_port_dequeue_depth),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info,
+ max_event_port_enqueue_depth),
+ "Config negative test failed");
+
+ /* Positive case */
+ devconf_set_default_sane_values(&dev_conf, &info);
+ ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ /* re-configure */
+ devconf_set_default_sane_values(&dev_conf, &info);
+ dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
+ dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
+ ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
+
+ /* re-configure back to max_event_queues and max_event_ports */
+ devconf_set_default_sane_values(&dev_conf, &info);
+ ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
+
+ return TEST_SUCCESS;
+
+}
+
+static int
+eventdev_configure_setup(void)
+{
+ int ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ devconf_set_default_sane_values(&dev_conf, &info);
+ ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_default_conf_get(void)
+{
+ int i, ret;
+ struct rte_event_queue_conf qconf;
+
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
+ &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_setup(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ /* Negative cases */
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
+ qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ALL_TYPES &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
+ ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ qconf.nb_atomic_flows = info.max_event_queue_flows;
+ qconf.event_queue_cfg = (RTE_EVENT_QUEUE_CFG_ORDERED_ONLY &
+ RTE_EVENT_QUEUE_CFG_TYPE_MASK);
+ qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
+ ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
+ &qconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ /* Positive case */
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
+ ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
+
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_count(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ TEST_ASSERT_EQUAL(rte_event_queue_count(TEST_DEV_ID),
+ info.max_event_queues, "Wrong queue count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_queue_priority(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_queue_conf qconf;
+ uint8_t priority;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
+ &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
+ qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ priority = rte_event_queue_priority(TEST_DEV_ID, i);
+ if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ TEST_ASSERT_EQUAL(priority,
+ i % RTE_EVENT_DEV_PRIORITY_LOWEST,
+ "Wrong priority value for queue%d", i);
+ else
+ TEST_ASSERT_EQUAL(priority,
+ RTE_EVENT_DEV_PRIORITY_NORMAL,
+ "Wrong priority value for queue%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_port_default_conf_get(void)
+{
+ int i, ret;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID,
+ rte_event_port_count(TEST_DEV_ID) + 1, NULL);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
+ &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_port_setup(void)
+{
+ int i, ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ /* Negative cases */
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ pconf.new_event_threshold = info.max_num_events + 1;
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ pconf.new_event_threshold = info.max_num_events;
+ pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ pconf.dequeue_depth = info.max_event_port_dequeue_depth;
+ pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
+ &pconf);
+ TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+ /* Positive case */
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+
+ for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_dequeue_depth(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+ TEST_ASSERT_EQUAL(rte_event_port_dequeue_depth(TEST_DEV_ID, 0),
+ pconf.dequeue_depth, "Wrong port dequeue depth");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_enqueue_depth(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf pconf;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
+ ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
+
+ TEST_ASSERT_EQUAL(rte_event_port_enqueue_depth(TEST_DEV_ID, 0),
+ pconf.enqueue_depth, "Wrong port enqueue depth");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_port_count(void)
+{
+ int ret;
+ struct rte_event_dev_info info;
+
+ ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+
+ TEST_ASSERT_EQUAL(rte_event_port_count(TEST_DEV_ID),
+ info.max_event_ports, "Wrong port count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_timeout_ticks(void)
+{
+ int ret;
+ uint64_t timeout_ticks;
+
+ ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
+ if (ret != -ENOTSUP)
+ TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
+
+ return ret;
+}
+
+
+static int
+test_eventdev_start_stop(void)
+{
+ int i, ret;
+
+ ret = eventdev_configure_setup();
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
+ }
+
+ ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
+ "Failed to link port, device %d", TEST_DEV_ID);
+
+ ret = rte_event_dev_start(TEST_DEV_ID);
+ TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
+
+ rte_event_dev_stop(TEST_DEV_ID);
+ return TEST_SUCCESS;
+}
+
+
+static int
+eventdev_setup_device(void)
+{
+ int i, ret;
+
+ ret = eventdev_configure_setup();
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ for (i = 0; i < rte_event_queue_count(TEST_DEV_ID); i++) {
+ ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
+ }
+
+ for (i = 0; i < rte_event_port_count(TEST_DEV_ID); i++) {
+ ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
+ }
+
+ ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret == rte_event_queue_count(TEST_DEV_ID),
+ "Failed to link port, device %d", TEST_DEV_ID);
+
+ ret = rte_event_dev_start(TEST_DEV_ID);
+ TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
+
+ return TEST_SUCCESS;
+}
+
+static void
+eventdev_stop_device(void)
+{
+ rte_event_dev_stop(TEST_DEV_ID);
+}
+
+static int
+test_eventdev_link(void)
+{
+ int ret, nb_queues, i;
+ uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
+ TEST_DEV_ID);
+
+ nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ for (i = 0; i < nb_queues; i++) {
+ queues[i] = i;
+ priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ }
+
+ ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
+ priorities, nb_queues);
+ TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_unlink(void)
+{
+ int ret, nb_queues, i;
+ uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
+ TEST_DEV_ID);
+
+ nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ for (i = 0; i < nb_queues; i++)
+ queues[i] = i;
+
+
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
+ TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_link_get(void)
+{
+ int ret, nb_queues, i;
+ uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ /* link all queues */
+ ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
+ TEST_DEV_ID);
+
+ nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ for (i = 0; i < nb_queues; i++)
+ queues[i] = i;
+
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
+ TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+
+ ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
+ TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
+
+ /* link all queues and get the links */
+ nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ for (i = 0; i < nb_queues; i++) {
+ queues[i] = i;
+ priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ }
+ ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
+ nb_queues);
+ TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
+ TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
+ TEST_DEV_ID, ret, nb_queues);
+ /* unlink all*/
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
+ TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ /* link just one queue */
+ queues[0] = 0;
+ priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
+ TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
+ TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
+ TEST_DEV_ID, ret, 1);
+ /* unlink all*/
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
+ TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ /* 4links and 2 unlinks */
+ nb_queues = rte_event_queue_count(TEST_DEV_ID);
+ if (nb_queues >= 4) {
+ for (i = 0; i < 4; i++) {
+ queues[i] = i;
+ priorities[i] = 0x40;
+ }
+ ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
+ 4);
+ TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+
+ for (i = 0; i < 2; i++)
+ queues[i] = i;
+
+ ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
+ TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
+ TEST_DEV_ID, ret);
+ ret = rte_event_port_links_get(TEST_DEV_ID, 0,
+ queues, priorities);
+ TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
+ TEST_DEV_ID, ret, 2);
+ TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
+ TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
+ ret, 0x40);
+ TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
+ TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
+ ret, 0x40);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_eventdev_close(void)
+{
+ rte_event_dev_stop(TEST_DEV_ID);
+ return rte_event_dev_close(TEST_DEV_ID);
+}
+
+static struct unit_test_suite eventdev_common_testsuite = {
+ .suite_name = "eventdev common code unit test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_count),
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_get_dev_id),
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_socket_id),
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_info_get),
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_configure),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_default_conf_get),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_setup),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_count),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_queue_priority),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_default_conf_get),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_setup),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_dequeue_depth),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_enqueue_depth),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_port_count),
+ TEST_CASE_ST(eventdev_configure_setup, NULL,
+ test_eventdev_timeout_ticks),
+ TEST_CASE_ST(NULL, NULL,
+ test_eventdev_start_stop),
+ TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
+ test_eventdev_link),
+ TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
+ test_eventdev_unlink),
+ TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
+ test_eventdev_link_get),
+ TEST_CASE_ST(eventdev_setup_device, NULL,
+ test_eventdev_close),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_eventdev_common(void)
+{
+ return unit_test_suite_runner(&eventdev_common_testsuite);
+}
+
+REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);
diff --git a/test/test/test_eventdev_octeontx.c b/test/test/test_eventdev_octeontx.c
new file mode 100644
index 00000000..9e95722b
--- /dev/null
+++ b/test/test/test_eventdev_octeontx.c
@@ -0,0 +1,1399 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define NUM_PACKETS (1 << 18)
+#define MAX_EVENTS (16 * 1024)
+
+static int evdev;
+static struct rte_mempool *eventdev_test_mempool;
+
+struct event_attr {
+ uint32_t flow_id;
+ uint8_t event_type;
+ uint8_t sub_event_type;
+ uint8_t sched_type;
+ uint8_t queue;
+ uint8_t port;
+};
+
+static uint32_t seqn_list_index;
+static int seqn_list[NUM_PACKETS];
+
+static inline void
+seqn_list_init(void)
+{
+ RTE_BUILD_BUG_ON(NUM_PACKETS < MAX_EVENTS);
+ memset(seqn_list, 0, sizeof(seqn_list));
+ seqn_list_index = 0;
+}
+
+static inline int
+seqn_list_update(int val)
+{
+ if (seqn_list_index >= NUM_PACKETS)
+ return TEST_FAILED;
+
+ seqn_list[seqn_list_index++] = val;
+ rte_smp_wmb();
+ return TEST_SUCCESS;
+}
+
+static inline int
+seqn_list_check(int limit)
+{
+ int i;
+
+ for (i = 0; i < limit; i++) {
+ if (seqn_list[i] != i) {
+ printf("Seqn mismatch %d %d\n", seqn_list[i], i);
+ return TEST_FAILED;
+ }
+ }
+ return TEST_SUCCESS;
+}
+
+struct test_core_param {
+ rte_atomic32_t *total_events;
+ uint64_t dequeue_tmo_ticks;
+ uint8_t port;
+ uint8_t sched_type;
+};
+
+static int
+testsuite_setup(void)
+{
+ const char *eventdev_name = "event_octeontx";
+
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ printf("Error creating eventdev %s\n", eventdev_name);
+ return TEST_FAILED;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("Error finding newly created eventdev\n");
+ return TEST_FAILED;
+ }
+ }
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_close(evdev);
+}
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = info->max_event_ports;
+ dev_conf->nb_event_queues = info->max_event_queues;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+enum {
+ TEST_EVENTDEV_SETUP_DEFAULT,
+ TEST_EVENTDEV_SETUP_PRIORITY,
+ TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT,
+};
+
+static inline int
+_eventdev_setup(int mode)
+{
+ int i, ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ const char *pool_name = "evdev_octeontx_test_pool";
+
+ /* Create and destrory pool for each test case to make it standalone */
+ eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
+ MAX_EVENTS,
+ 0 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* Use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_test_mempool) {
+ printf("ERROR creating mempool\n");
+ return TEST_FAILED;
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ TEST_ASSERT(info.max_num_events >= (int32_t)MAX_EVENTS,
+ "max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_EVENTS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ if (mode == TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT)
+ dev_conf.event_dev_cfg |= RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT;
+
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ if (mode == TEST_EVENTDEV_SETUP_PRIORITY) {
+ /* Configure event queues(0 to n) with
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST to
+ * RTE_EVENT_DEV_PRIORITY_LOWEST
+ */
+ uint8_t step = (RTE_EVENT_DEV_PRIORITY_LOWEST + 1) /
+ rte_event_queue_count(evdev);
+ for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ struct rte_event_queue_conf queue_conf;
+
+ ret = rte_event_queue_default_conf_get(evdev, i,
+ &queue_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get def_conf%d", i);
+ queue_conf.priority = i * step;
+ ret = rte_event_queue_setup(evdev, i, &queue_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
+ }
+
+ } else {
+ /* Configure event queues with default priority */
+ for (i = 0; i < rte_event_queue_count(evdev); i++) {
+ ret = rte_event_queue_setup(evdev, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", i);
+ }
+ }
+ /* Configure event ports */
+ for (i = 0; i < rte_event_port_count(evdev); i++) {
+ ret = rte_event_port_setup(evdev, i, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", i);
+ ret = rte_event_port_link(evdev, i, NULL, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", i);
+ }
+
+ ret = rte_event_dev_start(evdev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEFAULT);
+}
+
+static inline int
+eventdev_setup_priority(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_PRIORITY);
+}
+
+static inline int
+eventdev_setup_dequeue_timeout(void)
+{
+ return _eventdev_setup(TEST_EVENTDEV_SETUP_DEQUEUE_TIMEOUT);
+}
+
+static inline void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline void
+update_event_and_validation_attr(struct rte_mbuf *m, struct rte_event *ev,
+ uint32_t flow_id, uint8_t event_type,
+ uint8_t sub_event_type, uint8_t sched_type,
+ uint8_t queue, uint8_t port)
+{
+ struct event_attr *attr;
+
+ /* Store the event attributes in mbuf for future reference */
+ attr = rte_pktmbuf_mtod(m, struct event_attr *);
+ attr->flow_id = flow_id;
+ attr->event_type = event_type;
+ attr->sub_event_type = sub_event_type;
+ attr->sched_type = sched_type;
+ attr->queue = queue;
+ attr->port = port;
+
+ ev->flow_id = flow_id;
+ ev->sub_event_type = sub_event_type;
+ ev->event_type = event_type;
+ /* Inject the new event */
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = queue;
+ ev->mbuf = m;
+}
+
+static inline int
+inject_events(uint32_t flow_id, uint8_t event_type, uint8_t sub_event_type,
+ uint8_t sched_type, uint8_t queue, uint8_t port,
+ unsigned int events)
+{
+ struct rte_mbuf *m;
+ unsigned int i;
+
+ for (i = 0; i < events; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ update_event_and_validation_attr(m, &ev, flow_id, event_type,
+ sub_event_type, sched_type, queue, port);
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ return 0;
+}
+
+static inline int
+check_excess_events(uint8_t port)
+{
+ int i;
+ uint16_t valid_event;
+ struct rte_event ev;
+
+ /* Check for excess events, try for a few times and exit */
+ for (i = 0; i < 32; i++) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+
+ TEST_ASSERT_SUCCESS(valid_event, "Unexpected valid event=%d",
+ ev.mbuf->seqn);
+ }
+ return 0;
+}
+
+static inline int
+generate_random_events(const unsigned int total_events)
+{
+ struct rte_event_dev_info info;
+ unsigned int i;
+ int ret;
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ for (i = 0; i < total_events; i++) {
+ ret = inject_events(
+ rte_rand() % info.max_event_queue_flows /*flow_id */,
+ rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ rte_rand() % rte_event_queue_count(evdev) /* queue */,
+ 0 /* port */,
+ 1 /* events */);
+ if (ret)
+ return TEST_FAILED;
+ }
+ return ret;
+}
+
+
+static inline int
+validate_event(struct rte_event *ev)
+{
+ struct event_attr *attr;
+
+ attr = rte_pktmbuf_mtod(ev->mbuf, struct event_attr *);
+ TEST_ASSERT_EQUAL(attr->flow_id, ev->flow_id,
+ "flow_id mismatch enq=%d deq =%d",
+ attr->flow_id, ev->flow_id);
+ TEST_ASSERT_EQUAL(attr->event_type, ev->event_type,
+ "event_type mismatch enq=%d deq =%d",
+ attr->event_type, ev->event_type);
+ TEST_ASSERT_EQUAL(attr->sub_event_type, ev->sub_event_type,
+ "sub_event_type mismatch enq=%d deq =%d",
+ attr->sub_event_type, ev->sub_event_type);
+ TEST_ASSERT_EQUAL(attr->sched_type, ev->sched_type,
+ "sched_type mismatch enq=%d deq =%d",
+ attr->sched_type, ev->sched_type);
+ TEST_ASSERT_EQUAL(attr->queue, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ attr->queue, ev->queue_id);
+ return 0;
+}
+
+typedef int (*validate_event_cb)(uint32_t index, uint8_t port,
+ struct rte_event *ev);
+
+static inline int
+consume_events(uint8_t port, const uint32_t total_events, validate_event_cb fn)
+{
+ int ret;
+ uint16_t valid_event;
+ uint32_t events = 0, forward_progress_cnt = 0, index = 0;
+ struct rte_event ev;
+
+ while (1) {
+ if (++forward_progress_cnt > UINT16_MAX) {
+ printf("Detected deadlock\n");
+ return TEST_FAILED;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ forward_progress_cnt = 0;
+ ret = validate_event(&ev);
+ if (ret)
+ return TEST_FAILED;
+
+ if (fn != NULL) {
+ ret = fn(index, port, &ev);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to validate test specific event");
+ }
+
+ ++index;
+
+ rte_pktmbuf_free(ev.mbuf);
+ if (++events >= total_events)
+ break;
+ }
+
+ return check_excess_events(port);
+}
+
+static int
+validate_simple_enqdeq(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ RTE_SET_USED(port);
+ TEST_ASSERT_EQUAL(index, ev->mbuf->seqn, "index=%d != seqn=%d", index,
+ ev->mbuf->seqn);
+ return 0;
+}
+
+static inline int
+test_simple_enqdeq(uint8_t sched_type)
+{
+ int ret;
+
+ ret = inject_events(0 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type */,
+ sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS);
+ if (ret)
+ return TEST_FAILED;
+
+ return consume_events(0 /* port */, MAX_EVENTS, validate_simple_enqdeq);
+}
+
+static int
+test_simple_enqdeq_ordered(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_simple_enqdeq_atomic(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_simple_enqdeq_parallel(void)
+{
+ return test_simple_enqdeq(RTE_SCHED_TYPE_PARALLEL);
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. On dequeue, using single event port(port 0) verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_single_port_deq(void)
+{
+ int ret;
+
+ ret = generate_random_events(MAX_EVENTS);
+ if (ret)
+ return TEST_FAILED;
+
+ return consume_events(0 /* port */, MAX_EVENTS, NULL);
+}
+
+/*
+ * Inject 0..MAX_EVENTS events over 0..rte_event_queue_count() with modulus
+ * operation
+ *
+ * For example, Inject 32 events over 0..7 queues
+ * enqueue events 0, 8, 16, 24 in queue 0
+ * enqueue events 1, 9, 17, 25 in queue 1
+ * ..
+ * ..
+ * enqueue events 7, 15, 23, 31 in queue 7
+ *
+ * On dequeue, Validate the events comes in 0,8,16,24,1,9,17,25..,7,15,23,31
+ * order from queue0(highest priority) to queue7(lowest_priority)
+ */
+static int
+validate_queue_priority(uint32_t index, uint8_t port, struct rte_event *ev)
+{
+ uint32_t range = MAX_EVENTS / rte_event_queue_count(evdev);
+ uint32_t expected_val = (index % range) * rte_event_queue_count(evdev);
+
+ expected_val += ev->queue_id;
+ RTE_SET_USED(port);
+ TEST_ASSERT_EQUAL(ev->mbuf->seqn, expected_val,
+ "seqn=%d index=%d expected=%d range=%d nb_queues=%d max_event=%d",
+ ev->mbuf->seqn, index, expected_val, range,
+ rte_event_queue_count(evdev), MAX_EVENTS);
+ return 0;
+}
+
+static int
+test_multi_queue_priority(void)
+{
+ uint8_t queue;
+ struct rte_mbuf *m;
+ int i, max_evts_roundoff;
+
+ /* See validate_queue_priority() comments for priority validate logic */
+ max_evts_roundoff = MAX_EVENTS / rte_event_queue_count(evdev);
+ max_evts_roundoff *= rte_event_queue_count(evdev);
+
+ for (i = 0; i < max_evts_roundoff; i++) {
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ TEST_ASSERT_NOT_NULL(m, "mempool alloc failed");
+
+ m->seqn = i;
+ queue = i % rte_event_queue_count(evdev);
+ update_event_and_validation_attr(m, &ev, 0, RTE_EVENT_TYPE_CPU,
+ 0, RTE_SCHED_TYPE_PARALLEL, queue, 0);
+ rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ }
+
+ return consume_events(0, max_evts_roundoff, validate_queue_priority);
+}
+
+static int
+worker_multi_port_fn(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+ int ret;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ ret = validate_event(&ev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to validate event");
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ }
+ return 0;
+}
+
+static inline int
+wait_workers_to_join(int lcore, const rte_atomic32_t *count)
+{
+ uint64_t cycles, print_cycles;
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(lcore) != FINISHED) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ printf("\r%s: events %d\n", __func__,
+ rte_atomic32_read(count));
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 10) {
+ printf("%s: No schedules for seconds, deadlock (%d)\n",
+ __func__,
+ rte_atomic32_read(count));
+ rte_event_dev_dump(evdev, stdout);
+ cycles = new_cycles;
+ return TEST_FAILED;
+ }
+ }
+ rte_eal_mp_wait_lcore();
+ return TEST_SUCCESS;
+}
+
+
+static inline int
+launch_workers_and_wait(int (*master_worker)(void *),
+ int (*slave_workers)(void *), uint32_t total_events,
+ uint8_t nb_workers, uint8_t sched_type)
+{
+ uint8_t port = 0;
+ int w_lcore;
+ int ret;
+ struct test_core_param *param;
+ rte_atomic32_t atomic_total_events;
+ uint64_t dequeue_tmo_ticks;
+
+ if (!nb_workers)
+ return 0;
+
+ rte_atomic32_set(&atomic_total_events, total_events);
+ seqn_list_init();
+
+ param = malloc(sizeof(struct test_core_param) * nb_workers);
+ if (!param)
+ return TEST_FAILED;
+
+ ret = rte_event_dequeue_timeout_ticks(evdev,
+ rte_rand() % 10000000/* 10ms */, &dequeue_tmo_ticks);
+ if (ret)
+ return TEST_FAILED;
+
+ param[0].total_events = &atomic_total_events;
+ param[0].sched_type = sched_type;
+ param[0].port = 0;
+ param[0].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+
+ w_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ rte_eal_remote_launch(master_worker, &param[0], w_lcore);
+
+ for (port = 1; port < nb_workers; port++) {
+ param[port].total_events = &atomic_total_events;
+ param[port].sched_type = sched_type;
+ param[port].port = port;
+ param[port].dequeue_tmo_ticks = dequeue_tmo_ticks;
+ rte_smp_wmb();
+ w_lcore = rte_get_next_lcore(w_lcore, 1, 0);
+ rte_eal_remote_launch(slave_workers, &param[port], w_lcore);
+ }
+
+ ret = wait_workers_to_join(w_lcore, &atomic_total_events);
+ free(param);
+ return ret;
+}
+
+/*
+ * Generate a prescribed number of events and spread them across available
+ * queues. Dequeue the events through multiple ports and verify the enqueued
+ * event attributes
+ */
+static int
+test_multi_queue_enq_multi_port_deq(void)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint8_t nr_ports;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return TEST_FAILED;
+
+ nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ printf("%s: Not enough ports=%d or workers=%d\n", __func__,
+ rte_event_port_count(evdev), rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ return launch_workers_and_wait(worker_multi_port_fn,
+ worker_multi_port_fn, total_events,
+ nr_ports, 0xff /* invalid */);
+}
+
+static int
+validate_queue_to_port_single_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ TEST_ASSERT_EQUAL(port, ev->queue_id,
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link queue x to port x and check correctness of link by checking
+ * queue_id == x on dequeue on the specific port x
+ */
+static int
+test_queue_to_port_single_link(void)
+{
+ int i, nr_links, ret;
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (i = 0; i < rte_event_port_count(evdev); i++) {
+ ret = rte_event_port_unlink(evdev, i, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d", i);
+ }
+
+ nr_links = RTE_MIN(rte_event_port_count(evdev),
+ rte_event_queue_count(evdev));
+ const unsigned int total_events = MAX_EVENTS / nr_links;
+
+ /* Link queue x to port x and inject events to queue x through port x */
+ for (i = 0; i < nr_links; i++) {
+ uint8_t queue = (uint8_t)i;
+
+ ret = rte_event_port_link(evdev, i, &queue, NULL, 1);
+ TEST_ASSERT(ret == 1, "Failed to link queue to port %d", i);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ i /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+ }
+
+ /* Verify the events generated from correct queue */
+ for (i = 0; i < nr_links; i++) {
+ ret = consume_events(i /* port */, total_events,
+ validate_queue_to_port_single_link);
+ if (ret)
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+validate_queue_to_port_multi_link(uint32_t index, uint8_t port,
+ struct rte_event *ev)
+{
+ RTE_SET_USED(index);
+ TEST_ASSERT_EQUAL(port, (ev->queue_id & 0x1),
+ "queue mismatch enq=%d deq =%d",
+ port, ev->queue_id);
+ return 0;
+}
+
+/*
+ * Link all even number of queues to port 0 and all odd number of queues to
+ * port 1 and verify the link connection on dequeue
+ */
+static int
+test_queue_to_port_multi_link(void)
+{
+ int ret, port0_events = 0, port1_events = 0;
+ uint8_t nr_queues, nr_ports, queue, port;
+
+ nr_queues = rte_event_queue_count(evdev);
+ nr_ports = rte_event_port_count(evdev);
+
+ if (nr_ports < 2) {
+ printf("%s: Not enough ports to test ports=%d\n",
+ __func__, nr_ports);
+ return TEST_SUCCESS;
+ }
+
+ /* Unlink all connections that created in eventdev_setup */
+ for (port = 0; port < nr_ports; port++) {
+ ret = rte_event_port_unlink(evdev, port, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to unlink all queues port=%d",
+ port);
+ }
+
+ const unsigned int total_events = MAX_EVENTS / nr_queues;
+
+ /* Link all even number of queues to port0 and odd numbers to port 1*/
+ for (queue = 0; queue < nr_queues; queue++) {
+ port = queue & 0x1;
+ ret = rte_event_port_link(evdev, port, &queue, NULL, 1);
+ TEST_ASSERT(ret == 1, "Failed to link queue=%d to port=%d",
+ queue, port);
+
+ ret = inject_events(
+ 0x100 /*flow_id */,
+ rte_rand() % (RTE_EVENT_TYPE_CPU + 1) /* event_type */,
+ rte_rand() % 256 /* sub_event_type */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1),
+ queue /* queue */,
+ port /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ if (port == 0)
+ port0_events += total_events;
+ else
+ port1_events += total_events;
+ }
+
+ ret = consume_events(0 /* port */, port0_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return TEST_FAILED;
+ ret = consume_events(1 /* port */, port1_events,
+ validate_queue_to_port_multi_link);
+ if (ret)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static int
+worker_flow_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0 */
+ if (ev.sub_event_type == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 1; /* stage 1 */
+ ev.sched_type = new_sched_type;
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.sub_event_type == 1) { /* Events from stage 1*/
+ if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ printf("Failed to update seqn_list\n");
+ return TEST_FAILED;
+ }
+ } else {
+ printf("Invalid ev.sub_event_type = %d\n",
+ ev.sub_event_type);
+ return TEST_FAILED;
+ }
+ }
+ return 0;
+}
+
+static int
+test_multiport_flow_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint8_t nr_ports;
+ int ret;
+
+ nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ printf("%s: Not enough ports=%d or workers=%d\n", __func__,
+ rte_event_port_count(evdev), rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ ret = launch_workers_and_wait(worker_flow_based_pipeline,
+ worker_flow_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return TEST_FAILED;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return TEST_SUCCESS;
+}
+
+
+/* Multi port ordered to atomic transaction */
+static int
+test_multi_port_flow_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_ordered_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_ordered_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_atomic_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_atomic_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_flow_parallel_to_atomic(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_flow_parallel_to_ordered(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_flow_parallel_to_parallel(void)
+{
+ return test_multiport_flow_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_group_based_pipeline(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t new_sched_type = param->sched_type;
+ rte_atomic32_t *total_events = param->total_events;
+ uint64_t dequeue_tmo_ticks = param->dequeue_tmo_ticks;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1,
+ dequeue_tmo_ticks);
+ if (!valid_event)
+ continue;
+
+ /* Events from stage 0(group 0) */
+ if (ev.queue_id == 0) {
+ /* Move to atomic flow to maintain the ordering */
+ ev.flow_id = 0x2;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = new_sched_type;
+ ev.queue_id = 1; /* Stage 1*/
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ } else if (ev.queue_id == 1) { /* Events from stage 1(group 1)*/
+ if (seqn_list_update(ev.mbuf->seqn) == TEST_SUCCESS) {
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ printf("Failed to update seqn_list\n");
+ return TEST_FAILED;
+ }
+ } else {
+ printf("Invalid ev.queue_id = %d\n", ev.queue_id);
+ return TEST_FAILED;
+ }
+ }
+
+
+ return 0;
+}
+
+static int
+test_multiport_queue_sched_type_test(uint8_t in_sched_type,
+ uint8_t out_sched_type)
+{
+ const unsigned int total_events = MAX_EVENTS;
+ uint8_t nr_ports;
+ int ret;
+
+ nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+ if (rte_event_queue_count(evdev) < 2 || !nr_ports) {
+ printf("%s: Not enough queues=%d ports=%d or workers=%d\n",
+ __func__, rte_event_queue_count(evdev),
+ rte_event_port_count(evdev), rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ in_sched_type,
+ 0 /* queue */,
+ 0 /* port */,
+ total_events /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ ret = launch_workers_and_wait(worker_group_based_pipeline,
+ worker_group_based_pipeline,
+ total_events, nr_ports, out_sched_type);
+ if (ret)
+ return TEST_FAILED;
+
+ if (in_sched_type != RTE_SCHED_TYPE_PARALLEL &&
+ out_sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ /* Check the events order maintained or not */
+ return seqn_list_check(total_events);
+ }
+ return TEST_SUCCESS;
+}
+
+static int
+test_multi_port_queue_ordered_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_ordered_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_ordered_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ORDERED,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_atomic_to_atomic(void)
+{
+ /* Ingress event order test */
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_atomic_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_atomic_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_ATOMIC,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+test_multi_port_queue_parallel_to_atomic(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ATOMIC);
+}
+
+static int
+test_multi_port_queue_parallel_to_ordered(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_ORDERED);
+}
+
+static int
+test_multi_port_queue_parallel_to_parallel(void)
+{
+ return test_multiport_queue_sched_type_test(RTE_SCHED_TYPE_PARALLEL,
+ RTE_SCHED_TYPE_PARALLEL);
+}
+
+static int
+worker_flow_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.sub_event_type == 255) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+static int
+launch_multi_port_max_stages_random_sched_type(int (*fn)(void *))
+{
+ uint8_t nr_ports;
+ int ret;
+
+ nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+ if (!nr_ports) {
+ printf("%s: Not enough ports=%d or workers=%d\n", __func__,
+ rte_event_port_count(evdev), rte_lcore_count() - 1);
+ return TEST_SUCCESS;
+ }
+
+ /* Injects events with m->seqn=0 to total_events */
+ ret = inject_events(
+ 0x1 /*flow_id */,
+ RTE_EVENT_TYPE_CPU /* event_type */,
+ 0 /* sub_event_type (stage 0) */,
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1) /* sched_type */,
+ 0 /* queue */,
+ 0 /* port */,
+ MAX_EVENTS /* events */);
+ if (ret)
+ return TEST_FAILED;
+
+ return launch_workers_and_wait(fn, fn, MAX_EVENTS, nr_ports,
+ 0xff /* invalid */);
+}
+
+/* Flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_flow_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_flow_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_queue_based_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t nr_queues = rte_event_queue_count(evdev);
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_queue_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_queue_based_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_mixed_pipeline_max_stages_rand_sched_type(void *arg)
+{
+ struct test_core_param *param = arg;
+ struct rte_event ev;
+ uint16_t valid_event;
+ uint8_t port = param->port;
+ uint8_t nr_queues = rte_event_queue_count(evdev);
+ rte_atomic32_t *total_events = param->total_events;
+
+ while (rte_atomic32_read(total_events) > 0) {
+ valid_event = rte_event_dequeue_burst(evdev, port, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ if (ev.queue_id == nr_queues - 1) { /* Last stage */
+ rte_pktmbuf_free(ev.mbuf);
+ rte_atomic32_sub(total_events, 1);
+ } else {
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.queue_id++;
+ ev.sub_event_type = rte_rand() % 256;
+ ev.sched_type =
+ rte_rand() % (RTE_SCHED_TYPE_PARALLEL + 1);
+ ev.op = RTE_EVENT_OP_FORWARD;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+ }
+ return 0;
+}
+
+/* Queue and flow based pipeline with maximum stages with random sched type */
+static int
+test_multi_port_mixed_max_stages_random_sched_type(void)
+{
+ return launch_multi_port_max_stages_random_sched_type(
+ worker_mixed_pipeline_max_stages_rand_sched_type);
+}
+
+static int
+worker_ordered_flow_producer(void *arg)
+{
+ struct test_core_param *param = arg;
+ uint8_t port = param->port;
+ struct rte_mbuf *m;
+ int counter = 0;
+
+ while (counter < NUM_PACKETS) {
+ m = rte_pktmbuf_alloc(eventdev_test_mempool);
+ if (m == NULL)
+ continue;
+
+ m->seqn = counter++;
+
+ struct rte_event ev = {.event = 0, .u64 = 0};
+
+ ev.flow_id = 0x1; /* Generate a fat flow */
+ ev.sub_event_type = 0;
+ /* Inject the new event */
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.queue_id = 0;
+ ev.mbuf = m;
+ rte_event_enqueue_burst(evdev, port, &ev, 1);
+ }
+
+ return 0;
+}
+
+static inline int
+test_producer_consumer_ingress_order_test(int (*fn)(void *))
+{
+ uint8_t nr_ports;
+
+ nr_ports = RTE_MIN(rte_event_port_count(evdev), rte_lcore_count() - 1);
+
+ if (rte_lcore_count() < 3 || nr_ports < 2) {
+ printf("### Not enough cores for %s test.\n", __func__);
+ return TEST_SUCCESS;
+ }
+
+ launch_workers_and_wait(worker_ordered_flow_producer, fn,
+ NUM_PACKETS, nr_ports, RTE_SCHED_TYPE_ATOMIC);
+ /* Check the events order maintained or not */
+ return seqn_list_check(NUM_PACKETS);
+}
+
+/* Flow based producer consumer ingress order test */
+static int
+test_flow_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_flow_based_pipeline);
+}
+
+/* Queue based producer consumer ingress order test */
+static int
+test_queue_producer_consumer_ingress_order_test(void)
+{
+ return test_producer_consumer_ingress_order_test(
+ worker_group_based_pipeline);
+}
+
+static struct unit_test_suite eventdev_octeontx_testsuite = {
+ .suite_name = "eventdev octeontx unit test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_simple_enqdeq_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_single_port_deq),
+ TEST_CASE_ST(eventdev_setup_priority, eventdev_teardown,
+ test_multi_queue_priority),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_queue_enq_multi_port_deq),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_single_link),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_queue_to_port_multi_link),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_ordered_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_atomic_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_parallel_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_ordered_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_atomic_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_atomic),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_ordered),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_parallel_to_parallel),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_flow_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_queue_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_multi_port_mixed_max_stages_random_sched_type),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_flow_producer_consumer_ingress_order_test),
+ TEST_CASE_ST(eventdev_setup, eventdev_teardown,
+ test_queue_producer_consumer_ingress_order_test),
+ /* Tests with dequeue timeout */
+ TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_flow_ordered_to_atomic),
+ TEST_CASE_ST(eventdev_setup_dequeue_timeout, eventdev_teardown,
+ test_multi_port_queue_ordered_to_atomic),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_eventdev_octeontx(void)
+{
+ return unit_test_suite_runner(&eventdev_octeontx_testsuite);
+}
+
+REGISTER_TEST_COMMAND(eventdev_octeontx_autotest, test_eventdev_octeontx);
diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c
new file mode 100644
index 00000000..b187d029
--- /dev/null
+++ b/test/test/test_eventdev_sw.c
@@ -0,0 +1,3188 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+
+#include <rte_eventdev.h>
+#include "test.h"
+
+#define MAX_PORTS 16
+#define MAX_QIDS 16
+#define NUM_PACKETS (1<<18)
+
+static int evdev;
+
+struct test {
+ struct rte_mempool *mbuf_pool;
+ uint8_t port[MAX_PORTS];
+ uint8_t qid[MAX_QIDS];
+ int nb_qids;
+};
+
+static struct rte_event release_ev;
+
+static inline struct rte_mbuf *
+rte_gen_arp(int portid, struct rte_mempool *mp)
+{
+ /*
+ * len = 14 + 46
+ * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46
+ */
+ static const uint8_t arp_request[] = {
+ /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
+ /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
+ 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
+ /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+ struct rte_mbuf *m;
+ int pkt_len = sizeof(arp_request) - 1;
+
+ m = rte_pktmbuf_alloc(mp);
+ if (!m)
+ return 0;
+
+ memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
+ arp_request, pkt_len);
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ RTE_SET_USED(portid);
+
+ return m;
+}
+
+static void
+xstats_print(void)
+{
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret < 0) {
+ printf("%d: xstats names get() returned error\n",
+ __LINE__);
+ return;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 1,
+ ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret > (signed int)XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+ for (i = 0; (signed int)i < ret; i++) {
+ printf("%d : %s : %"PRIu64"\n",
+ i, xstats_names[i].name, values[i]);
+ }
+}
+
+/* initialization and config */
+static inline int
+init(struct test *t, int nb_queues, int nb_ports)
+{
+ struct rte_event_dev_config config = {
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_event_queue_flows = 1024,
+ .nb_events_limit = 4096,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128,
+ };
+ int ret;
+
+ void *temp = t->mbuf_pool; /* save and restore mbuf pool */
+
+ memset(t, 0, sizeof(*t));
+ t->mbuf_pool = temp;
+
+ ret = rte_event_dev_configure(evdev, &config);
+ if (ret < 0)
+ printf("%d: Error configuring device\n", __LINE__);
+ return ret;
+};
+
+static inline int
+create_ports(struct test *t, int num_ports)
+{
+ int i;
+ static const struct rte_event_port_conf conf = {
+ .new_event_threshold = 1024,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ if (num_ports > MAX_PORTS)
+ return -1;
+
+ for (i = 0; i < num_ports; i++) {
+ if (rte_event_port_setup(evdev, i, &conf) < 0) {
+ printf("Error setting up port %d\n", i);
+ return -1;
+ }
+ t->port[i] = i;
+ }
+
+ return 0;
+}
+
+static inline int
+create_lb_qids(struct test *t, int num_qids, uint32_t flags)
+{
+ int i;
+
+ /* Q creation */
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg = flags,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+create_atomic_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY);
+}
+
+static inline int
+create_ordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_ORDERED_ONLY);
+}
+
+
+static inline int
+create_unordered_qids(struct test *t, int num_qids)
+{
+ return create_lb_qids(t, num_qids, RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY);
+}
+
+static inline int
+create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
+{
+ int i;
+
+ /* Q creation */
+ static const struct rte_event_queue_conf conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+
+ if (rte_event_port_link(evdev, ports[i - t->nb_qids],
+ &t->qid[i], NULL, 1) != 1) {
+ printf("%d: error creating link for qid %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+ t->nb_qids += num_qids;
+ if (t->nb_qids > MAX_QIDS)
+ return -1;
+
+ return 0;
+}
+
+/* destruction */
+static inline int
+cleanup(struct test *t __rte_unused)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+ return 0;
+};
+
+struct test_event_dev_stats {
+ uint64_t rx_pkts; /**< Total packets received */
+ uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */
+ uint64_t tx_pkts; /**< Total packets transmitted */
+
+ /** Packets received on this port */
+ uint64_t port_rx_pkts[MAX_PORTS];
+ /** Packets dropped on this port */
+ uint64_t port_rx_dropped[MAX_PORTS];
+ /** Packets inflight on this port */
+ uint64_t port_inflight[MAX_PORTS];
+ /** Packets transmitted on this port */
+ uint64_t port_tx_pkts[MAX_PORTS];
+ /** Packets received on this qid */
+ uint64_t qid_rx_pkts[MAX_QIDS];
+ /** Packets dropped on this qid */
+ uint64_t qid_rx_dropped[MAX_QIDS];
+ /** Packets transmitted on this qid */
+ uint64_t qid_tx_pkts[MAX_QIDS];
+};
+
+static inline int
+test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats)
+{
+ static uint32_t i;
+ static uint32_t total_ids[3]; /* rx, tx and drop */
+ static uint32_t port_rx_pkts_ids[MAX_PORTS];
+ static uint32_t port_rx_dropped_ids[MAX_PORTS];
+ static uint32_t port_inflight_ids[MAX_PORTS];
+ static uint32_t port_tx_pkts_ids[MAX_PORTS];
+ static uint32_t qid_rx_pkts_ids[MAX_QIDS];
+ static uint32_t qid_rx_dropped_ids[MAX_QIDS];
+ static uint32_t qid_tx_pkts_ids[MAX_QIDS];
+
+
+ stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_rx", &total_ids[0]);
+ stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_drop", &total_ids[1]);
+ stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id,
+ "dev_tx", &total_ids[2]);
+ for (i = 0; i < MAX_PORTS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "port_%u_rx", i);
+ stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_drop", i);
+ stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_inflight", i);
+ stats->port_inflight[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_inflight_ids[i]);
+ snprintf(name, sizeof(name), "port_%u_tx", i);
+ stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &port_tx_pkts_ids[i]);
+ }
+ for (i = 0; i < MAX_QIDS; i++) {
+ char name[32];
+ snprintf(name, sizeof(name), "qid_%u_rx", i);
+ stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_pkts_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_drop", i);
+ stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_rx_dropped_ids[i]);
+ snprintf(name, sizeof(name), "qid_%u_tx", i);
+ stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get(
+ dev_id, name, &qid_tx_pkts_ids[i]);
+ }
+
+ return 0;
+}
+
+/* run_prio_packet_test
+ * This performs a basic packet priority check on the test instance passed in.
+ * It is factored out of the main priority tests as the same tests must be
+ * performed to ensure prioritization of each type of QID.
+ *
+ * Requirements:
+ * - An initialized test structure, including mempool
+ * - t->port[0] is initialized for both Enq / Deq of packets to the QID
+ * - t->qid[0] is the QID to be tested
+ * - if LB QID, the CQ must be mapped to the QID.
+ */
+static int
+run_prio_packet_test(struct test *t)
+{
+ int err;
+ const uint32_t MAGIC_SEQN[] = {4711, 1234};
+ const uint32_t PRIORITY[] = {
+ RTE_EVENT_DEV_PRIORITY_NORMAL,
+ RTE_EVENT_DEV_PRIORITY_HIGHEST
+ };
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) {
+ /* generate pkt and enqueue */
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->seqn = MAGIC_SEQN[i];
+
+ ev = (struct rte_event){
+ .priority = PRIORITY[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .mbuf = arp
+ };
+ err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[t->port[0]] != 2) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ struct rte_event ev, ev2;
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev.mbuf->seqn != MAGIC_SEQN[1]) {
+ printf("%d: first packet out not highest priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev.mbuf);
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (ev2.mbuf->seqn != MAGIC_SEQN[0]) {
+ printf("%d: second packet out not lower priority\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ rte_pktmbuf_free(ev2.mbuf);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+test_single_directed_packet(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 3 directed QIDs going to 3 ports */
+ if (init(t, 3, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_directed_qids(t, 3, t->port) < 0)
+ return -1;
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = wrk_enq,
+ .mbuf = arp,
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t MAGIC_SEQN = 4711;
+ arp->seqn = MAGIC_SEQN;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1);
+ if (err < 0) {
+ printf("%d: error failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* Run schedule() as dir packets may need to be re-ordered */
+ rte_event_schedule(evdev);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: error failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_rx_pkts[rx_enq] != 1) {
+ printf("%d: error stats incorrect for directed port\n",
+ __LINE__);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: error failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_rx_pkts[wrk_enq] != 0 &&
+ stats.port_rx_pkts[wrk_enq] != 1) {
+ printf("%d: error directed stats post-dequeue\n", __LINE__);
+ return -1;
+ }
+
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: error magic sequence number not dequeued\n",
+ __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ cleanup(t);
+ return 0;
+}
+
+
+static int
+test_priority_directed(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_directed_qids(t, 1, t->port) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_atomic(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_ordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_ordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+test_priority_unordered(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_unordered_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* map the QID */
+ if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping qid to port\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ return run_prio_packet_test(t);
+}
+
+static int
+burst_packets(struct test *t)
+{
+ /************** CONFIG ****************/
+ uint32_t i;
+ int err;
+ int ret;
+
+ /* Create instance with 2 ports and 2 queues */
+ if (init(t, 2, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid0\n", __LINE__);
+ return -1;
+ }
+ ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1);
+ if (ret != 1) {
+ printf("%d: error mapping lb qid1\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ const uint32_t rx_port = 0;
+ const uint32_t NUM_PKTS = 2;
+
+ for (i = 0; i < NUM_PKTS; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: error generating pkt\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = i % 2,
+ .flow_id = i % 3,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_event_schedule(evdev);
+
+ /* Check stats for all NUM_PKTS arrived to sched core */
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+ if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) {
+ printf("%d: Sched core didn't receive all %d pkts\n",
+ __LINE__, NUM_PKTS);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+ int p;
+
+ deq_pkts = 0;
+ /******** DEQ QID 1 *******/
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 1\n",
+ __LINE__);
+ return -1;
+ }
+
+ /******** DEQ QID 2 *******/
+ deq_pkts = 0;
+ do {
+ struct rte_event ev;
+ p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0);
+ deq_pkts += p;
+ rte_pktmbuf_free(ev.mbuf);
+ } while (p);
+ if (deq_pkts != NUM_PKTS/2) {
+ printf("%d: Half of NUM_PKTS didn't arrive at port 2\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+abuse_inflights(struct test *t)
+{
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue op only */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ /* schedule */
+ rte_event_schedule(evdev);
+
+ struct test_event_dev_stats stats;
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 0 ||
+ stats.tx_pkts != 0 ||
+ stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+xstats_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ const uint32_t XSTATS_MAX = 1024;
+
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ /* Device names / values */
+ int ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, ret);
+ if (ret != 6) {
+ printf("%d: expected 6 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Port names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ ids, values, ret);
+ if (ret != 21) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* Queue names / values */
+ ret = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (ret != 13) {
+ printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 1, ids, values, ret);
+ if (ret != -EINVAL) {
+ printf("%d: expected 0 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, ret);
+ if (ret != 13) {
+ printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ return -1;
+ }
+
+ /* enqueue packets to check values */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ ev.flow_id = 7;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ static const uint64_t expected[] = {3, 3, 0, 1, 0, 0};
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected[i] != values[i]) {
+ printf(
+ "%d Error xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0};
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (expected_zero[i] != values[i]) {
+ printf(
+ "%d Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* port reset checks */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, 0,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats < 0)
+ goto fail;
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+
+ static const uint64_t port_expected[] = {
+ 3 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ if (ret != RTE_DIM(port_expected)) {
+ printf(
+ "%s %d: wrong number of port stats (%d), expected %zu\n",
+ __func__, __LINE__, ret, RTE_DIM(port_expected));
+ }
+
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected[i] != values[i]) {
+ printf(
+ "%s : %d: Error stat %s is %"PRIu64
+ ", expected %"PRIu64"\n",
+ __func__, __LINE__, xstats_names[i].name,
+ values[i], port_expected[i]);
+ goto fail;
+ }
+ }
+
+ ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT,
+ 0, NULL, 0);
+
+ /* ensure reset statistics are zero-ed */
+ static const uint64_t port_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 0 /* inflights */,
+ 0 /* avg pkt cycles */,
+ 29 /* credits */,
+ 0 /* rx ring used */,
+ 4096 /* rx ring free */,
+ 0 /* cq ring used */,
+ 32 /* cq ring free */,
+ 0 /* dequeue calls */,
+ /* 10 dequeue burst buckets */
+ 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ };
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ 0, ids, values, num_stats);
+ for (i = 0; (signed int)i < ret; i++) {
+ if (port_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], port_expected_zero[i]);
+ goto fail;
+ }
+ }
+
+ /* QUEUE STATS TESTS */
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ xstats_names, ids, XSTATS_MAX);
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ 0, ids, values, num_stats);
+ if (ret < 0) {
+ printf("xstats get returned %d\n", ret);
+ goto fail;
+ }
+ if ((unsigned int)ret > XSTATS_MAX)
+ printf("%s %d: more xstats available than space\n",
+ __func__, __LINE__);
+
+ static const uint64_t queue_expected[] = {
+ 3 /* rx */,
+ 3 /* tx */,
+ 0 /* drop */,
+ 3 /* inflights */,
+ 512 /* iq size */,
+ 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
+ 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
+ };
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected[i]);
+ goto fail;
+ }
+ }
+
+ /* Reset the queue stats here */
+ ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ NULL,
+ 0);
+
+ /* Verify that the resetable stats are reset, and others are not */
+ static const uint64_t queue_expected_zero[] = {
+ 0 /* rx */,
+ 0 /* tx */,
+ 0 /* drop */,
+ 3 /* inflight */,
+ 512 /* iq size */,
+ 0, 0, 0, 0, /* 4 iq used */
+ 0, 0, 1, 0, /* qid to port pinned flows */
+ };
+
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
+ ids, values, num_stats);
+ int fails = 0;
+ for (i = 0; (signed int)i < ret; i++) {
+ if (queue_expected_zero[i] != values[i]) {
+ printf(
+ "%d, Error, xstat %d (id %d) %s : %"PRIu64
+ ", expect %"PRIu64"\n",
+ __LINE__, i, ids[i], xstats_names[i].name,
+ values[i], queue_expected_zero[i]);
+ fails++;
+ }
+ }
+ if (fails) {
+ printf("%d : %d of values were not as expected above\n",
+ __LINE__, fails);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+
+fail:
+ rte_event_dev_dump(0, stdout);
+ cleanup(t);
+ return -1;
+}
+
+
+static int
+xstats_id_abuse_tests(struct test *t)
+{
+ int err;
+ const uint32_t XSTATS_MAX = 1024;
+ const uint32_t link_port = 2;
+
+ uint32_t ids[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ /* no test for device, as it ignores the port/q number */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ UINT8_MAX-1, xstats_names, ids,
+ XSTATS_MAX);
+ if (num_stats != 0) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ 0, num_stats);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_reconfig_credits(struct test *t)
+{
+ if (init(t, 1, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ uint32_t i;
+ const uint32_t NUM_ITERS = 32;
+ for (i = 0; i < NUM_ITERS; i++) {
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ return -1;
+ }
+ t->qid[0] = 0;
+
+ static const struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ return -1;
+ }
+
+ int links = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+ if (links != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ const uint32_t NPKTS = 1;
+ uint32_t j;
+ for (j = 0; j < NPKTS; j++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ int err = rte_event_enqueue_burst(evdev, 0, &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ rte_event_dev_dump(0, stdout);
+ goto fail;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ struct rte_event ev[NPKTS];
+ int deq = rte_event_dequeue_burst(evdev, t->port[0], ev,
+ NPKTS, 0);
+ if (deq != 1)
+ printf("%d error; no packet dequeued\n", __LINE__);
+
+ /* let cleanup below stop the device on last iter */
+ if (i != NUM_ITERS-1)
+ rte_event_dev_stop(evdev);
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+port_single_lb_reconfig(struct test *t)
+{
+ if (init(t, 2, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_lb_atomic = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ static const struct rte_event_queue_conf conf_single_link = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+ if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto fail;
+ }
+
+ struct rte_event_port_conf port_conf = {
+ .new_event_threshold = 128,
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ if (rte_event_port_setup(evdev, 0, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+ if (rte_event_port_setup(evdev, 1, &port_conf) < 0) {
+ printf("%d Error setting up port\n", __LINE__);
+ goto fail;
+ }
+
+ /* link port to lb queue */
+ uint8_t queue_id = 0;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1);
+ if (ret != 1) {
+ printf("%d: Error unlinking lb port\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 1;
+ if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) {
+ printf("%d: error creating link for qid\n", __LINE__);
+ goto fail;
+ }
+
+ queue_id = 0;
+ int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_brute_force(struct test *t)
+{
+ uint32_t i;
+ const uint32_t XSTATS_MAX = 1024;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+ for (i = 0; i < 3; i++) {
+ uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i;
+ uint32_t j;
+ for (j = 0; j < UINT8_MAX; j++) {
+ rte_event_dev_xstats_names_get(evdev, mode,
+ j, xstats_names, ids, XSTATS_MAX);
+
+ rte_event_dev_xstats_get(evdev, mode, j, ids,
+ values, XSTATS_MAX);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+xstats_id_reset_tests(struct test *t)
+{
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ goto fail;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto fail;
+ }
+
+#define XSTATS_MAX 1024
+ int ret;
+ uint32_t i;
+ uint32_t ids[XSTATS_MAX];
+ uint64_t values[XSTATS_MAX];
+ struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX];
+
+ for (i = 0; i < XSTATS_MAX; i++)
+ ids[i] = i;
+
+#define NUM_DEV_STATS 6
+ /* Device names / values */
+ int num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ 0, ids, values, num_stats);
+ if (ret != NUM_DEV_STATS) {
+ printf("%d: expected %d stats, got return %d\n", __LINE__,
+ NUM_DEV_STATS, ret);
+ goto fail;
+ }
+
+#define NPKTS 7
+ for (i = 0; i < NPKTS; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto fail;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto fail;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ static const char * const dev_names[] = {
+ "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls",
+ "dev_sched_no_iq_enq", "dev_sched_no_cq_enq",
+ };
+ uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0};
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ dev_names[i],
+ &id);
+ if (id != i) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, dev_names[i], i, id);
+ goto fail;
+ }
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"
+ PRIu64" got %d\n", __LINE__, dev_names[i],
+ dev_expected[i], id);
+ goto fail;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_DEVICE, 0,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ goto fail;
+ }
+ dev_expected[i] = 0;
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0);
+ if (val != dev_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, dev_names[i],
+ dev_expected[i], val);
+ goto fail;
+ }
+ };
+
+/* 48 is stat offset from start of the devices whole xstats.
+ * This WILL break every time we add a statistic to a port
+ * or the device, but there is no other way to test
+ */
+#define PORT_OFF 48
+/* num stats for the tested port. CQ size adds more stats to a port */
+#define NUM_PORT_STATS 21
+/* the port to test. */
+#define PORT 2
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ ids, values, num_stats);
+
+ if (ret != NUM_PORT_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_PORT_STATS, ret);
+ goto fail;
+ }
+ static const char * const port_names[] = {
+ "port_2_rx",
+ "port_2_tx",
+ "port_2_drop",
+ "port_2_inflight",
+ "port_2_avg_pkt_cycles",
+ "port_2_credits",
+ "port_2_rx_ring_used",
+ "port_2_rx_ring_free",
+ "port_2_cq_ring_used",
+ "port_2_cq_ring_free",
+ "port_2_dequeue_calls",
+ "port_2_dequeues_returning_0",
+ "port_2_dequeues_returning_1-4",
+ "port_2_dequeues_returning_5-8",
+ "port_2_dequeues_returning_9-12",
+ "port_2_dequeues_returning_13-16",
+ "port_2_dequeues_returning_17-20",
+ "port_2_dequeues_returning_21-24",
+ "port_2_dequeues_returning_25-28",
+ "port_2_dequeues_returning_29-32",
+ "port_2_dequeues_returning_33-36",
+ };
+ uint64_t port_expected[] = {
+ 0, /* rx */
+ NPKTS, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ uint64_t port_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ NPKTS, /* inflight */
+ 0, /* avg pkt cycles */
+ 0, /* credits */
+ 0, /* rx ring used */
+ 4096, /* rx ring free */
+ NPKTS, /* cq ring used */
+ 25, /* cq ring free */
+ 0, /* dequeue zero calls */
+ 0, 0, 0, 0, 0, /* 10 dequeue buckets */
+ 0, 0, 0, 0, 0,
+ };
+ if (RTE_DIM(port_expected) != NUM_PORT_STATS ||
+ RTE_DIM(port_names) != NUM_PORT_STATS) {
+ printf("%d: port array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ int failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ port_names[i],
+ &id);
+ if (id != i + PORT_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, port_names[i], i+PORT_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != port_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %d\n", __LINE__, port_names[i],
+ port_expected[i], id);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_PORT, PORT,
+ &id,
+ 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0);
+ if (val != port_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, port_names[i],
+ port_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+ if (failed)
+ goto fail;
+
+/* num queue stats */
+#define NUM_Q_STATS 13
+/* queue offset from start of the devices whole xstats.
+ * This will break every time we add a statistic to a device/port/queue
+ */
+#define QUEUE_OFF 90
+ const uint32_t queue = 0;
+ num_stats = rte_event_dev_xstats_names_get(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE, queue,
+ xstats_names, ids, XSTATS_MAX);
+ if (num_stats != NUM_Q_STATS) {
+ printf("%d: expected %d stats, got return %d\n",
+ __LINE__, NUM_Q_STATS, num_stats);
+ goto fail;
+ }
+ ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, ids, values, num_stats);
+ if (ret != NUM_Q_STATS) {
+ printf("%d: expected 21 stats, got return %d\n", __LINE__, ret);
+ goto fail;
+ }
+ static const char * const queue_names[] = {
+ "qid_0_rx",
+ "qid_0_tx",
+ "qid_0_drop",
+ "qid_0_inflight",
+ "qid_0_iq_size",
+ "qid_0_iq_0_used",
+ "qid_0_iq_1_used",
+ "qid_0_iq_2_used",
+ "qid_0_iq_3_used",
+ "qid_0_port_0_pinned_flows",
+ "qid_0_port_1_pinned_flows",
+ "qid_0_port_2_pinned_flows",
+ "qid_0_port_3_pinned_flows",
+ };
+ uint64_t queue_expected[] = {
+ 7, /* rx */
+ 7, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 512, /* iq size */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ 0, /* qid 0 port 0 pinned flows */
+ 0, /* qid 0 port 1 pinned flows */
+ 1, /* qid 0 port 2 pinned flows */
+ 0, /* qid 0 port 4 pinned flows */
+ };
+ uint64_t queue_expected_zero[] = {
+ 0, /* rx */
+ 0, /* tx */
+ 0, /* drop */
+ 7, /* inflight */
+ 512, /* iq size */
+ 0, /* iq 0 used */
+ 0, /* iq 1 used */
+ 0, /* iq 2 used */
+ 0, /* iq 3 used */
+ 0, /* qid 0 port 0 pinned flows */
+ 0, /* qid 0 port 1 pinned flows */
+ 1, /* qid 0 port 2 pinned flows */
+ 0, /* qid 0 port 4 pinned flows */
+ };
+ if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_names) != NUM_Q_STATS) {
+ printf("%d : queue array of wrong size\n", __LINE__);
+ goto fail;
+ }
+
+ failed = 0;
+ for (i = 0; (int)i < ret; i++) {
+ unsigned int id;
+ uint64_t val = rte_event_dev_xstats_by_name_get(evdev,
+ queue_names[i],
+ &id);
+ if (id != i + QUEUE_OFF) {
+ printf("%d: %s id incorrect, expected %d got %d\n",
+ __LINE__, queue_names[i], i+QUEUE_OFF,
+ id);
+ failed = 1;
+ }
+ if (val != queue_expected[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %d\n", __LINE__, queue_names[i],
+ queue_expected[i], id);
+ failed = 1;
+ }
+ /* reset to zero */
+ int reset_ret = rte_event_dev_xstats_reset(evdev,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue, &id, 1);
+ if (reset_ret) {
+ printf("%d: failed to reset successfully\n", __LINE__);
+ failed = 1;
+ }
+ /* check value again */
+ val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i],
+ 0);
+ if (val != queue_expected_zero[i]) {
+ printf("%d: %s value incorrect, expected %"PRIu64
+ " got %"PRIu64"\n", __LINE__, queue_names[i],
+ queue_expected_zero[i], val);
+ failed = 1;
+ }
+ };
+
+ if (failed)
+ goto fail;
+
+ cleanup(t);
+ return 0;
+fail:
+ cleanup(t);
+ return -1;
+}
+
+static int
+ordered_reconfigure(struct test *t)
+{
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid\n", __LINE__);
+ goto failed;
+ }
+
+ if (rte_event_queue_setup(evdev, 0, &conf) < 0) {
+ printf("%d: error creating qid, for 2nd time\n", __LINE__);
+ goto failed;
+ }
+
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+failed:
+ cleanup(t);
+ return -1;
+}
+
+static int
+qid_priorities(struct test *t)
+{
+ /* Test works by having a CQ with enough empty space for all packets,
+ * and enqueueing 3 packets to 3 QIDs. They must return based on the
+ * priority of the QID, not the ingress order, to pass the test
+ */
+ unsigned int i;
+ /* Create instance with 1 ports, and 3 qids */
+ if (init(t, 3, 1) < 0 ||
+ create_ports(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* Create QID */
+ const struct rte_event_queue_conf conf = {
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ /* increase priority (0 == highest), as we go */
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i,
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ };
+
+ if (rte_event_queue_setup(evdev, i, &conf) < 0) {
+ printf("%d: error creating qid %d\n", __LINE__, i);
+ return -1;
+ }
+ t->qid[i] = i;
+ }
+ t->nb_qids = i;
+ /* map all QIDs to port */
+ rte_event_port_link(evdev, t->port[0], NULL, NULL, 0);
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* enqueue 3 packets, setting seqn and QID to check priority */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ ev.queue_id = t->qid[i];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ arp->seqn = i;
+
+ int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ /* dequeue packets, verify priority was upheld */
+ struct rte_event ev[32];
+ uint32_t deq_pkts =
+ rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0);
+ if (deq_pkts != 3) {
+ printf("%d: failed to deq packets\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ for (i = 0; i < 3; i++) {
+ if (ev[i].mbuf->seqn != 2-i) {
+ printf(
+ "%d: qid priority test: seqn %d incorrectly prioritized\n",
+ __LINE__, i);
+ }
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing(struct test *t)
+{
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* map port 1 - 3 inclusive */
+ if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0],
+ NULL, 1) != 1) {
+ printf("%d: error mapping qid to port %d\n",
+ __LINE__, i);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test
+ * with a new flow, which should be sent to the 3rd mapped CQ
+ */
+ static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2};
+
+ for (i = 0; i < RTE_DIM(flows); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = flows[i],
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ struct test_event_dev_stats stats;
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.port_inflight[1] != 4) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 2) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 3) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+load_balancing_history(struct test *t)
+{
+ struct test_event_dev_stats stats = {0};
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0)
+ return -1;
+
+ /* CQ mapping to QID */
+ if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 2 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) {
+ printf("%d: error mapping port 3 qid\n", __LINE__);
+ return -1;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Create a set of flows that test the load-balancing operation of the
+ * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop
+ * the packet from CQ 0, send in a new set of flows. Ensure that:
+ * 1. The new flow 3 gets into the empty CQ0
+ * 2. packets for existing flow gets added into CQ1
+ * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain
+ * more outstanding pkts
+ *
+ * This test makes sure that when a flow ends (i.e. all packets
+ * have been completed for that flow), that the flow can be moved
+ * to a different CQ when new packets come in for that flow.
+ */
+ static uint32_t flows1[] = {0, 1, 1, 2};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows1[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows1[i];
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_event_schedule(evdev);
+
+ /* Dequeue the flow 0 packet from port 1, so that we can then drop */
+ struct rte_event ev;
+ if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) {
+ printf("%d: failed to dequeue\n", __LINE__);
+ return -1;
+ }
+ if (ev.mbuf->hash.rss != flows1[0]) {
+ printf("%d: unexpected flow received\n", __LINE__);
+ return -1;
+ }
+
+ /* drop the flow 0 packet from port 1 */
+ rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1);
+
+ /* call the scheduler */
+ rte_event_schedule(evdev);
+
+ /*
+ * Set up the next set of flows, first a new flow to fill up
+ * CQ 0, so that the next flow 0 packet should go to CQ2
+ */
+ static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 };
+
+ for (i = 0; i < RTE_DIM(flows2); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ struct rte_event ev = {
+ .flow_id = flows2[i],
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .event_type = RTE_EVENT_TYPE_CPU,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .mbuf = arp
+ };
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+ arp->hash.rss = flows2[i];
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* schedule */
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d:failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on each port.
+ */
+ if (stats.port_inflight[1] != 3) {
+ printf("%d:%s: port 1 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[2] != 4) {
+ printf("%d:%s: port 2 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+ if (stats.port_inflight[3] != 2) {
+ printf("%d:%s: port 3 inflight not correct\n", __LINE__,
+ __func__);
+ printf("Inflights, ports 1, 2, 3: %u, %u, %u\n",
+ (unsigned int)stats.port_inflight[1],
+ (unsigned int)stats.port_inflight[2],
+ (unsigned int)stats.port_inflight[3]);
+ return -1;
+ }
+
+ for (i = 1; i <= 3; i++) {
+ struct rte_event ev;
+ while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0))
+ rte_event_enqueue_burst(evdev, i, &release_ev, 1);
+ }
+ rte_event_schedule(evdev);
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+invalid_qid(struct test *t)
+{
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ int err;
+ uint32_t i;
+
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ for (i = 0; i < 4; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0],
+ NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping port 1 qid\n", __LINE__);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Send in a packet with an invalid qid to the scheduler.
+ * We should see the packed enqueued OK, but the inflights for
+ * that packet should not be incremented, and the rx_dropped
+ * should be incremented.
+ */
+ static uint32_t flows1[] = {20};
+
+ for (i = 0; i < RTE_DIM(flows1); i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0] + flows1[i],
+ .flow_id = i,
+ .mbuf = arp,
+ };
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+
+ /* call the scheduler */
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * Now check the resulting inflights on the port, and the rx_dropped.
+ */
+ if (stats.port_inflight[0] != 0) {
+ printf("%d:%s: port 1 inflight count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ if (stats.port_rx_dropped[0] != 1) {
+ printf("%d:%s: port 1 drops\n", __LINE__, __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ /* each packet drop should only be counted in one place - port or dev */
+ if (stats.rx_dropped != 0) {
+ printf("%d:%s: port 1 dropped count not correct\n", __LINE__,
+ __func__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+single_packet(struct test *t)
+{
+ const uint32_t MAGIC_SEQN = 7321;
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int wrk_enq = 2;
+ int err;
+
+ /* Create instance with 4 ports */
+ if (init(t, 1, 4) < 0 ||
+ create_ports(t, 4) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** Gen pkt and enqueue ****************/
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.mbuf = arp;
+ ev.queue_id = 0;
+ ev.flow_id = 3;
+ arp->seqn = MAGIC_SEQN;
+
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ if (stats.rx_pkts != 1 ||
+ stats.tx_pkts != 1 ||
+ stats.port_inflight[wrk_enq] != 1) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+
+ uint32_t deq_pkts;
+
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0);
+ if (deq_pkts < 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ return -1;
+ }
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (ev.mbuf->seqn != MAGIC_SEQN) {
+ printf("%d: magic sequence number not dequeued\n", __LINE__);
+ return -1;
+ }
+
+ rte_pktmbuf_free(ev.mbuf);
+ err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1);
+ if (err < 0) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[wrk_enq] != 0) {
+ printf("%d: port inflight not correct\n", __LINE__);
+ return -1;
+ }
+
+ cleanup(t);
+ return 0;
+}
+
+static int
+inflight_counts(struct test *t)
+{
+ struct rte_event ev;
+ struct test_event_dev_stats stats;
+ const int rx_enq = 0;
+ const int p1 = 1;
+ const int p2 = 2;
+ int err;
+ int i;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, 3) < 0 ||
+ create_ports(t, 3) < 0 ||
+ create_atomic_qids(t, 2) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /************** FORWARD ****************/
+#define QID1_NUM 5
+ for (i = 0; i < QID1_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+#define QID2_NUM 3
+ for (i = 0; i < QID2_NUM; i++) {
+ struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool);
+
+ if (!arp) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ goto err;
+ }
+ ev.queue_id = t->qid[1];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = arp;
+ err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* schedule */
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (err) {
+ printf("%d: failed to get stats\n", __LINE__);
+ goto err;
+ }
+
+ if (stats.rx_pkts != QID1_NUM + QID2_NUM ||
+ stats.tx_pkts != QID1_NUM + QID2_NUM) {
+ printf("%d: Sched core didn't handle pkt as expected\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: %s port 1 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: %s port 2 inflight not correct\n", __LINE__,
+ __func__);
+ goto err;
+ }
+
+ /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/
+ /* port 1 */
+ struct rte_event events[QID1_NUM + QID2_NUM];
+ uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events,
+ RTE_DIM(events), 0);
+
+ if (deq_pkts != QID1_NUM) {
+ printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != QID1_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID1_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p1] != 0) {
+ printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+
+ /* port2 */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events,
+ RTE_DIM(events), 0);
+ if (deq_pkts != QID2_NUM) {
+ printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__);
+ goto err;
+ }
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != QID2_NUM) {
+ printf("%d: port 1 inflight decrement after DEQ != 0\n",
+ __LINE__);
+ goto err;
+ }
+ for (i = 0; i < QID2_NUM; i++) {
+ err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev,
+ 1);
+ if (err != 1) {
+ printf("%d: %s rte enqueue of inf release failed\n",
+ __LINE__, __func__);
+ goto err;
+ }
+ }
+
+ /*
+ * As the scheduler core decrements inflights, it needs to run to
+ * process packets to act on the drop messages
+ */
+ rte_event_schedule(evdev);
+
+ err = test_event_dev_stats_get(evdev, &stats);
+ if (stats.port_inflight[p2] != 0) {
+ printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+parallel_basic(struct test *t, int check_order)
+{
+ const uint8_t rx_port = 0;
+ const uint8_t w1_port = 1;
+ const uint8_t w3_port = 3;
+ const uint8_t tx_port = 4;
+ int err;
+ int i;
+ uint32_t deq_pkts, j;
+ struct rte_mbuf *mbufs[3];
+ struct rte_mbuf *mbufs_out[3] = { 0 };
+ const uint32_t MAGIC_SEQN = 1234;
+
+ /* Create instance with 4 ports */
+ if (init(t, 2, tx_port + 1) < 0 ||
+ create_ports(t, tx_port + 1) < 0 ||
+ (check_order ? create_ordered_qids(t, 1) :
+ create_unordered_qids(t, 1)) < 0 ||
+ create_directed_qids(t, 1, &tx_port)) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /*
+ * CQ mapping to QID
+ * We need three ports, all mapped to the same ordered qid0. Then we'll
+ * take a packet out to each port, re-enqueue in reverse order,
+ * then make sure the reordering has taken place properly when we
+ * dequeue from the tx_port.
+ *
+ * Simplified test setup diagram:
+ *
+ * rx_port w1_port
+ * \ / \
+ * qid0 - w2_port - qid1
+ * \ / \
+ * w3_port tx_port
+ */
+ /* CQ mapping to QID for LB ports (directed mapped on create) */
+ for (i = w1_port; i <= w3_port; i++) {
+ err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
+ 1);
+ if (err != 1) {
+ printf("%d: error mapping lb qid\n", __LINE__);
+ cleanup(t);
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ /* Enqueue 3 packets to the rx port */
+ for (i = 0; i < 3; i++) {
+ struct rte_event ev;
+ mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
+ if (!mbufs[i]) {
+ printf("%d: gen of pkt failed\n", __LINE__);
+ return -1;
+ }
+
+ ev.queue_id = t->qid[0];
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.mbuf = mbufs[i];
+ mbufs[i]->seqn = MAGIC_SEQN + i;
+
+ /* generate pkt and enqueue */
+ err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue pkt %u, retval = %u\n",
+ __LINE__, i, err);
+ return -1;
+ }
+ }
+
+ rte_event_schedule(evdev);
+
+ /* use extra slot to make logic in loops easier */
+ struct rte_event deq_ev[w3_port + 1];
+
+ /* Dequeue the 3 packets, one from each worker port */
+ for (i = w1_port; i <= w3_port; i++) {
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
+ &deq_ev[i], 1, 0);
+ if (deq_pkts != 1) {
+ printf("%d: Failed to deq\n", __LINE__);
+ rte_event_dev_dump(evdev, stdout);
+ return -1;
+ }
+ }
+
+ /* Enqueue each packet in reverse order, flushing after each one */
+ for (i = w3_port; i >= w1_port; i--) {
+
+ deq_ev[i].op = RTE_EVENT_OP_FORWARD;
+ deq_ev[i].queue_id = t->qid[1];
+ err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
+ if (err != 1) {
+ printf("%d: Failed to enqueue\n", __LINE__);
+ return -1;
+ }
+ }
+ rte_event_schedule(evdev);
+
+ /* dequeue from the tx ports, we should get 3 packets */
+ deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
+ 3, 0);
+
+ /* Check to see if we've got all 3 packets */
+ if (deq_pkts != 3) {
+ printf("%d: expected 3 pkts at tx port got %d from port %d\n",
+ __LINE__, deq_pkts, tx_port);
+ rte_event_dev_dump(evdev, stdout);
+ return 1;
+ }
+
+ /* Check to see if the sequence numbers are in expected order */
+ if (check_order) {
+ for (j = 0 ; j < deq_pkts ; j++) {
+ if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) {
+ printf(
+ "%d: Incorrect sequence number(%d) from port %d\n",
+ __LINE__, mbufs_out[j]->seqn, tx_port);
+ return -1;
+ }
+ }
+ }
+
+ /* Destroy the instance */
+ cleanup(t);
+ return 0;
+}
+
+static int
+ordered_basic(struct test *t)
+{
+ return parallel_basic(t, 1);
+}
+
+static int
+unordered_basic(struct test *t)
+{
+ return parallel_basic(t, 0);
+}
+
+static int
+holb(struct test *t) /* test to check we avoid basic head-of-line blocking */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW
+ /* all other fields zero */
+ };
+ struct rte_event ev = new_ev;
+ unsigned int rx_port = 0; /* port we get the first flow on */
+ char rx_port_used_stat[64];
+ char rx_port_free_stat[64];
+ char other_port_used_stat[64];
+
+ if (init(t, 1, 2) < 0 ||
+ create_ports(t, 2) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+ int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 ||
+ nb_links != 1) {
+ printf("%d: Error links queue to ports\n", __LINE__);
+ goto err;
+ }
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ /* send one packet and see where it goes, port 0 or 1 */
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error doing first enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_event_schedule(evdev);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL)
+ != 1)
+ rx_port = 1;
+
+ snprintf(rx_port_used_stat, sizeof(rx_port_used_stat),
+ "port_%u_cq_ring_used", rx_port);
+ snprintf(rx_port_free_stat, sizeof(rx_port_free_stat),
+ "port_%u_cq_ring_free", rx_port);
+ snprintf(other_port_used_stat, sizeof(other_port_used_stat),
+ "port_%u_cq_ring_used", rx_port ^ 1);
+ if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, first event not scheduled\n", __LINE__);
+ goto err;
+ }
+
+ /* now fill up the rx port's queue with one flow to cause HOLB */
+ do {
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_event_schedule(evdev);
+ } while (rte_event_dev_xstats_by_name_get(evdev,
+ rx_port_free_stat, NULL) != 0);
+
+ /* one more packet, which needs to stay in IQ - i.e. HOLB */
+ ev = new_ev;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_event_schedule(evdev);
+
+ /* check that the other port still has an empty CQ */
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 0) {
+ printf("%d: Error, second port CQ is not empty\n", __LINE__);
+ goto err;
+ }
+ /* check IQ now has one packet */
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+
+ /* send another flow, which should pass the other IQ entry */
+ ev = new_ev;
+ ev.flow_id = 1;
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error with enqueue\n", __LINE__);
+ goto err;
+ }
+ rte_event_schedule(evdev);
+
+ if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL)
+ != 1) {
+ printf("%d: Error, second flow did not pass out first\n",
+ __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL)
+ != 1) {
+ printf("%d: Error, QID does not have exactly 1 packet\n",
+ __LINE__);
+ goto err;
+ }
+ cleanup(t);
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
+static int
+worker_loopback_worker_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[1];
+ int count = 0;
+ int enqd;
+
+ /*
+ * Takes packets from the input port and then loops them back through
+ * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times
+ * so each packet goes through 8*16 = 128 times.
+ */
+ printf("%d: \tWorker function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+#define BURST_SIZE 32
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev,
+ BURST_SIZE, 0);
+ if (nb_rx == 0) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ ev[i].queue_id++;
+ if (ev[i].queue_id != 8) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+
+ ev[i].queue_id = 0;
+ ev[i].mbuf->udata64++;
+ if (ev[i].mbuf->udata64 != 16) {
+ ev[i].op = RTE_EVENT_OP_FORWARD;
+ enqd = rte_event_enqueue_burst(evdev, port,
+ &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d: Can't enqueue FWD!!\n",
+ __LINE__);
+ return -1;
+ }
+ continue;
+ }
+ /* we have hit 16 iterations through system - drop */
+ rte_pktmbuf_free(ev[i].mbuf);
+ count++;
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1);
+ if (enqd != 1) {
+ printf("%d drop enqueue failed\n", __LINE__);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback_producer_fn(void *arg)
+{
+ struct test *t = arg;
+ uint8_t port = t->port[0];
+ uint64_t count = 0;
+
+ printf("%d: \tProducer function started\n", __LINE__);
+ while (count < NUM_PACKETS) {
+ struct rte_mbuf *m = 0;
+ do {
+ m = rte_pktmbuf_alloc(t->mbuf_pool);
+ } while (m == NULL);
+
+ m->udata64 = 0;
+
+ struct rte_event ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .queue_id = t->qid[0],
+ .flow_id = (uintptr_t)m & 0xFFFF,
+ .mbuf = m,
+ };
+
+ if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) {
+ while (rte_event_enqueue_burst(evdev, port, &ev, 1) !=
+ 1)
+ rte_pause();
+ }
+
+ count++;
+ }
+
+ return 0;
+}
+
+static int
+worker_loopback(struct test *t)
+{
+ /* use a single producer core, and a worker core to see what happens
+ * if the worker loops packets back multiple times
+ */
+ struct test_event_dev_stats stats;
+ uint64_t print_cycles = 0, cycles = 0;
+ uint64_t tx_pkts = 0;
+ int err;
+ int w_lcore, p_lcore;
+
+ if (init(t, 8, 2) < 0 ||
+ create_atomic_qids(t, 8) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* RX with low max events */
+ static struct rte_event_port_conf conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 64,
+ };
+ /* beware: this cannot be initialized in the static above as it would
+ * only be initialized once - and this needs to be set for multiple runs
+ */
+ conf.new_event_threshold = 512;
+
+ if (rte_event_port_setup(evdev, 0, &conf) < 0) {
+ printf("Error setting up RX port\n");
+ return -1;
+ }
+ t->port[0] = 0;
+ /* TX with higher max events */
+ conf.new_event_threshold = 4096;
+ if (rte_event_port_setup(evdev, 1, &conf) < 0) {
+ printf("Error setting up TX port\n");
+ return -1;
+ }
+ t->port[1] = 1;
+
+ /* CQ mapping to QID */
+ err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0);
+ if (err != 8) { /* should have mapped all queues*/
+ printf("%d: error mapping port 2 to all qids\n", __LINE__);
+ return -1;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ return -1;
+ }
+
+ p_lcore = rte_get_next_lcore(
+ /* start core */ -1,
+ /* skip master */ 1,
+ /* wrap */ 0);
+ w_lcore = rte_get_next_lcore(p_lcore, 1, 0);
+
+ rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore);
+ rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore);
+
+ print_cycles = cycles = rte_get_timer_cycles();
+ while (rte_eal_get_lcore_state(p_lcore) != FINISHED ||
+ rte_eal_get_lcore_state(w_lcore) != FINISHED) {
+
+ rte_event_schedule(evdev);
+
+ uint64_t new_cycles = rte_get_timer_cycles();
+
+ if (new_cycles - print_cycles > rte_get_timer_hz()) {
+ test_event_dev_stats_get(evdev, &stats);
+ printf(
+ "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n",
+ __LINE__, stats.rx_pkts, stats.tx_pkts);
+
+ print_cycles = new_cycles;
+ }
+ if (new_cycles - cycles > rte_get_timer_hz() * 3) {
+ test_event_dev_stats_get(evdev, &stats);
+ if (stats.tx_pkts == tx_pkts) {
+ rte_event_dev_dump(evdev, stdout);
+ printf("Dumping xstats:\n");
+ xstats_print();
+ printf(
+ "%d: No schedules for seconds, deadlock\n",
+ __LINE__);
+ return -1;
+ }
+ tx_pkts = stats.tx_pkts;
+ cycles = new_cycles;
+ }
+ }
+ rte_event_schedule(evdev); /* ensure all completions are flushed */
+
+ rte_eal_mp_wait_lcore();
+
+ cleanup(t);
+ return 0;
+}
+
+static struct rte_mempool *eventdev_func_mempool;
+
+static int
+test_sw_eventdev(void)
+{
+ struct test *t = malloc(sizeof(struct test));
+ int ret;
+
+ /* manually initialize the op, older gcc's complain on static
+ * initialization of struct elements that are a bitfield.
+ */
+ release_ev.op = RTE_EVENT_OP_RELEASE;
+
+ const char *eventdev_name = "event_sw0";
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("%d: Eventdev %s not found - creating.\n",
+ __LINE__, eventdev_name);
+ if (rte_vdev_init(eventdev_name, NULL) < 0) {
+ printf("Error creating eventdev\n");
+ return -1;
+ }
+ evdev = rte_event_dev_get_dev_id(eventdev_name);
+ if (evdev < 0) {
+ printf("Error finding newly created eventdev\n");
+ return -1;
+ }
+ }
+
+ /* Only create mbuf pool once, reuse for each test run */
+ if (!eventdev_func_mempool) {
+ eventdev_func_mempool = rte_pktmbuf_pool_create(
+ "EVENTDEV_SW_SA_MBUF_POOL",
+ (1<<12), /* 4k buffers */
+ 32 /*MBUF_CACHE_SIZE*/,
+ 0,
+ 512, /* use very small mbufs */
+ rte_socket_id());
+ if (!eventdev_func_mempool) {
+ printf("ERROR creating mempool\n");
+ return -1;
+ }
+ }
+ t->mbuf_pool = eventdev_func_mempool;
+
+ printf("*** Running Single Directed Packet test...\n");
+ ret = test_single_directed_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Directed Packet test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Single Load Balanced Packet test...\n");
+ ret = single_packet(t);
+ if (ret != 0) {
+ printf("ERROR - Single Packet test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Unordered Basic test...\n");
+ ret = unordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Unordered Basic test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Ordered Basic test...\n");
+ ret = ordered_basic(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Basic test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Burst Packets test...\n");
+ ret = burst_packets(t);
+ if (ret != 0) {
+ printf("ERROR - Burst Packets test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Load Balancing test...\n");
+ ret = load_balancing(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Prioritized Directed test...\n");
+ ret = test_priority_directed(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Directed test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Prioritized Atomic test...\n");
+ ret = test_priority_atomic(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Atomic test FAILED.\n");
+ return ret;
+ }
+
+ printf("*** Running Prioritized Ordered test...\n");
+ ret = test_priority_ordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Ordered test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Prioritized Unordered test...\n");
+ ret = test_priority_unordered(t);
+ if (ret != 0) {
+ printf("ERROR - Prioritized Unordered test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Invalid QID test...\n");
+ ret = invalid_qid(t);
+ if (ret != 0) {
+ printf("ERROR - Invalid QID test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Load Balancing History test...\n");
+ ret = load_balancing_history(t);
+ if (ret != 0) {
+ printf("ERROR - Load Balancing History test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Inflight Count test...\n");
+ ret = inflight_counts(t);
+ if (ret != 0) {
+ printf("ERROR - Inflight Count test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Abuse Inflights test...\n");
+ ret = abuse_inflights(t);
+ if (ret != 0) {
+ printf("ERROR - Abuse Inflights test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running XStats test...\n");
+ ret = xstats_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running XStats ID Reset test...\n");
+ ret = xstats_id_reset_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Reset test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running XStats Brute Force test...\n");
+ ret = xstats_brute_force(t);
+ if (ret != 0) {
+ printf("ERROR - XStats Brute Force test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running XStats ID Abuse test...\n");
+ ret = xstats_id_abuse_tests(t);
+ if (ret != 0) {
+ printf("ERROR - XStats ID Abuse test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running QID Priority test...\n");
+ ret = qid_priorities(t);
+ if (ret != 0) {
+ printf("ERROR - QID Priority test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Ordered Reconfigure test...\n");
+ ret = ordered_reconfigure(t);
+ if (ret != 0) {
+ printf("ERROR - Ordered Reconfigure test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Port LB Single Reconfig test...\n");
+ ret = port_single_lb_reconfig(t);
+ if (ret != 0) {
+ printf("ERROR - Port LB Single Reconfig test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Port Reconfig Credits test...\n");
+ ret = port_reconfig_credits(t);
+ if (ret != 0) {
+ printf("ERROR - Port Reconfig Credits Reset test FAILED.\n");
+ return ret;
+ }
+ printf("*** Running Head-of-line-blocking test...\n");
+ ret = holb(t);
+ if (ret != 0) {
+ printf("ERROR - Head-of-line-blocking test FAILED.\n");
+ return ret;
+ }
+ if (rte_lcore_count() >= 3) {
+ printf("*** Running Worker loopback test...\n");
+ ret = worker_loopback(t);
+ if (ret != 0) {
+ printf("ERROR - Worker loopback test FAILED.\n");
+ return ret;
+ }
+ } else {
+ printf("### Not enough cores for worker loopback test.\n");
+ printf("### Need at least 3 cores for test.\n");
+ }
+ /*
+ * Free test instance, leaving mempool initialized, and a pointer to it
+ * in static eventdev_func_mempool, as it is re-used on re-runs
+ */
+ free(t);
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(eventdev_sw_autotest, test_sw_eventdev);
diff --git a/app/test/test_func_reentrancy.c b/test/test/test_func_reentrancy.c
index baa01ffc..baa01ffc 100644
--- a/app/test/test_func_reentrancy.c
+++ b/test/test/test_func_reentrancy.c
diff --git a/app/test/test_hash.c b/test/test/test_hash.c
index 2c87efe6..2c87efe6 100644
--- a/app/test/test_hash.c
+++ b/test/test/test_hash.c
diff --git a/app/test/test_hash_functions.c b/test/test/test_hash_functions.c
index 9652b04d..9652b04d 100644
--- a/app/test/test_hash_functions.c
+++ b/test/test/test_hash_functions.c
diff --git a/app/test/test_hash_multiwriter.c b/test/test/test_hash_multiwriter.c
index 4dcbd9d5..4dcbd9d5 100644
--- a/app/test/test_hash_multiwriter.c
+++ b/test/test/test_hash_multiwriter.c
diff --git a/app/test/test_hash_perf.c b/test/test/test_hash_perf.c
index c0051b20..c0051b20 100644
--- a/app/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
diff --git a/app/test/test_hash_scaling.c b/test/test/test_hash_scaling.c
index 46c48e54..46c48e54 100644
--- a/app/test/test_hash_scaling.c
+++ b/test/test/test_hash_scaling.c
diff --git a/app/test/test_interrupts.c b/test/test/test_interrupts.c
index 371101f0..e0229e5e 100644
--- a/app/test/test_interrupts.c
+++ b/test/test/test_interrupts.c
@@ -199,8 +199,9 @@ test_interrupt_handle_compare(struct rte_intr_handle *intr_handle_l,
* Callback for the test interrupt.
*/
static void
-test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
+test_interrupt_callback(void *arg)
{
+ struct rte_intr_handle *intr_handle = arg;
if (test_intr_type >= TEST_INTERRUPT_HANDLE_MAX) {
printf("invalid interrupt type\n");
flag = -1;
@@ -230,9 +231,9 @@ test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
* Callback for the test interrupt.
*/
static void
-test_interrupt_callback_1(struct rte_intr_handle *intr_handle,
- __attribute__((unused)) void *arg)
+test_interrupt_callback_1(void *arg)
{
+ struct rte_intr_handle *intr_handle = arg;
if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
printf("null or invalid intr_handle for %s\n", __func__);
flag = -1;
@@ -364,7 +365,7 @@ test_interrupt_full_path_check(enum test_interrupt_handle_type intr_type)
test_intr_handle = intr_handles[intr_type];
test_intr_type = intr_type;
if (rte_intr_callback_register(&test_intr_handle,
- test_interrupt_callback, NULL) < 0) {
+ test_interrupt_callback, &test_intr_handle) < 0) {
printf("fail to register callback\n");
return -1;
}
@@ -378,7 +379,7 @@ test_interrupt_full_path_check(enum test_interrupt_handle_type intr_type)
rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
if (rte_intr_callback_unregister(&test_intr_handle,
- test_interrupt_callback, NULL) < 0)
+ test_interrupt_callback, &test_intr_handle) < 0)
return -1;
if (flag == 0) {
@@ -441,7 +442,7 @@ test_interrupt(void)
/* check if it will fail to register cb with invalid intr_handle */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
if (rte_intr_callback_register(&test_intr_handle,
- test_interrupt_callback, NULL) == 0) {
+ test_interrupt_callback, &test_intr_handle) == 0) {
printf("unexpectedly register successfully with invalid "
"intr_handle\n");
goto out;
@@ -449,7 +450,7 @@ test_interrupt(void)
/* check if it will fail to register without callback */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
- if (rte_intr_callback_register(&test_intr_handle, NULL, NULL) == 0) {
+ if (rte_intr_callback_register(&test_intr_handle, NULL, &test_intr_handle) == 0) {
printf("unexpectedly register successfully with "
"null callback\n");
goto out;
@@ -466,7 +467,7 @@ test_interrupt(void)
/* check if it will fail to unregister cb with invalid intr_handle */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
if (rte_intr_callback_unregister(&test_intr_handle,
- test_interrupt_callback, NULL) > 0) {
+ test_interrupt_callback, &test_intr_handle) > 0) {
printf("unexpectedly unregister successfully with "
"invalid intr_handle\n");
goto out;
@@ -475,12 +476,12 @@ test_interrupt(void)
/* check if it is ok to register the same intr_handle twice */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
if (rte_intr_callback_register(&test_intr_handle,
- test_interrupt_callback, NULL) < 0) {
+ test_interrupt_callback, &test_intr_handle) < 0) {
printf("it fails to register test_interrupt_callback\n");
goto out;
}
if (rte_intr_callback_register(&test_intr_handle,
- test_interrupt_callback_1, NULL) < 0) {
+ test_interrupt_callback_1, &test_intr_handle) < 0) {
printf("it fails to register test_interrupt_callback_1\n");
goto out;
}
@@ -492,7 +493,7 @@ test_interrupt(void)
goto out;
}
if (rte_intr_callback_unregister(&test_intr_handle,
- test_interrupt_callback, NULL) <= 0) {
+ test_interrupt_callback, &test_intr_handle) <= 0) {
printf("it fails to unregister test_interrupt_callback\n");
goto out;
}
diff --git a/app/test/test_kni.c b/test/test/test_kni.c
index 309741cb..db17fdf3 100644
--- a/app/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -92,7 +92,7 @@ static const struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0,
.hw_vlan_filter = 0,
.jumbo_frame = 0,
- .hw_strip_crc = 0,
+ .hw_strip_crc = 1,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
diff --git a/app/test/test_kvargs.c b/test/test/test_kvargs.c
index 4d9e805b..4d9e805b 100644
--- a/app/test/test_kvargs.c
+++ b/test/test/test_kvargs.c
diff --git a/app/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 32296604..52d2d052 100644
--- a/app/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -172,7 +172,7 @@ struct rte_eth_rxmode rx_mode = {
.hw_vlan_strip = 1, /**< VLAN strip enabled. */
.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
- .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
+ .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
};
struct rte_fdir_conf fdir_conf = {
@@ -191,7 +191,7 @@ static struct rte_eth_conf default_pmd_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
diff --git a/app/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 53caa3e9..106ec624 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -143,7 +143,7 @@ static struct rte_eth_conf default_pmd_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -193,7 +193,8 @@ static uint8_t lacpdu_rx_count[RTE_MAX_ETHPORTS] = {0, };
static int
slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf, size);
+ return rte_ring_dequeue_burst(slave->tx_queue, (void **)buf,
+ size, NULL);
}
/*
@@ -206,7 +207,8 @@ slave_get_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
static int
slave_put_pkts(struct slave_conf *slave, struct rte_mbuf **buf, uint16_t size)
{
- return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf, size);
+ return rte_ring_enqueue_burst(slave->rx_queue, (void **)buf,
+ size, NULL);
}
static uint16_t
diff --git a/app/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index 34f1c166..d28db7d7 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -52,7 +52,6 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_eth_bond.h>
-#include <rte_eth_null.h>
#include "test.h"
@@ -67,7 +66,7 @@
#define SLAVE_RXTX_QUEUE_FMT ("rssconf_slave%d_q%d")
#define NUM_MBUFS 8191
-#define MBUF_SIZE (1600 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_SIZE (1600 + RTE_PKTMBUF_HEADROOM)
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 32
@@ -116,7 +115,7 @@ static struct rte_eth_conf default_pmd_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -133,7 +132,7 @@ static struct rte_eth_conf rss_pmd_conf = {
.hw_ip_checksum = 0, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -536,13 +535,12 @@ test_setup(void)
if (test_params.mbuf_pool == NULL) {
- test_params.mbuf_pool = rte_mempool_create("RSS_MBUF_POOL", NUM_MBUFS *
- SLAVE_COUNT, MBUF_SIZE, MBUF_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
- NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ test_params.mbuf_pool = rte_pktmbuf_pool_create(
+ "RSS_MBUF_POOL", NUM_MBUFS * SLAVE_COUNT,
+ MBUF_CACHE_SIZE, 0, MBUF_SIZE, rte_socket_id());
TEST_ASSERT(test_params.mbuf_pool != NULL,
- "rte_mempool_create failed\n");
+ "rte_pktmbuf_pool_create failed\n");
}
/* Create / initialize ring eth devs. */
@@ -552,7 +550,8 @@ test_setup(void)
port_id = rte_eth_dev_count();
snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id);
- retval = eth_dev_null_create(name, 0, 64, 0);
+ retval = rte_vdev_init(name,
+ "driver=net_null,size=64,copy=0");
TEST_ASSERT_SUCCESS(retval, "Failed to create null device '%s'\n",
name);
diff --git a/app/test/test_logs.c b/test/test/test_logs.c
index 6985ddde..730a86bd 100644
--- a/app/test/test_logs.c
+++ b/test/test/test_logs.c
@@ -62,24 +62,24 @@ static int
test_logs(void)
{
/* enable these logs type */
- rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1);
- rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 1);
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP1, RTE_LOG_EMERG);
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_EMERG);
/* log in error level */
- rte_set_log_level(RTE_LOG_ERR);
+ rte_log_set_global_level(RTE_LOG_ERR);
RTE_LOG(ERR, TESTAPP1, "error message\n");
RTE_LOG(CRIT, TESTAPP1, "critical message\n");
/* log in critical level */
- rte_set_log_level(RTE_LOG_CRIT);
+ rte_log_set_global_level(RTE_LOG_CRIT);
RTE_LOG(ERR, TESTAPP2, "error message (not displayed)\n");
RTE_LOG(CRIT, TESTAPP2, "critical message\n");
/* disable one log type */
- rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 0);
+ rte_log_set_level(RTE_LOGTYPE_TESTAPP2, RTE_LOG_DEBUG);
/* log in error level */
- rte_set_log_level(RTE_LOG_ERR);
+ rte_log_set_global_level(RTE_LOG_ERR);
RTE_LOG(ERR, TESTAPP1, "error message\n");
RTE_LOG(ERR, TESTAPP2, "error message (not displayed)\n");
diff --git a/app/test/test_lpm.c b/test/test/test_lpm.c
index 41ae80fe..41ae80fe 100644
--- a/app/test/test_lpm.c
+++ b/test/test/test_lpm.c
diff --git a/app/test/test_lpm6.c b/test/test/test_lpm6.c
index 61134f70..e0e7bf02 100644
--- a/app/test/test_lpm6.c
+++ b/test/test/test_lpm6.c
@@ -79,6 +79,7 @@ static int32_t test24(void);
static int32_t test25(void);
static int32_t test26(void);
static int32_t test27(void);
+static int32_t test28(void);
rte_lpm6_test tests6[] = {
/* Test Cases */
@@ -110,6 +111,7 @@ rte_lpm6_test tests6[] = {
test25,
test26,
test27,
+ test28,
};
#define NUM_LPM6_TESTS (sizeof(tests6)/sizeof(tests6[0]))
@@ -354,7 +356,7 @@ test6(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t next_hop_return = 0;
+ uint32_t next_hop_return = 0;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -392,7 +394,7 @@ test7(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[10][16];
- int16_t next_hop_return[10];
+ int32_t next_hop_return[10];
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -469,7 +471,8 @@ test9(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth = 16, next_hop_add = 100, next_hop_return = 0;
+ uint8_t depth = 16;
+ uint32_t next_hop_add = 100, next_hop_return = 0;
int32_t status = 0;
uint8_t i;
@@ -513,7 +516,8 @@ test10(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth, next_hop_add = 100;
+ uint8_t depth;
+ uint32_t next_hop_add = 100;
int32_t status = 0;
int i;
@@ -557,7 +561,8 @@ test11(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth, next_hop_add = 100;
+ uint8_t depth;
+ uint32_t next_hop_add = 100;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -617,7 +622,8 @@ test12(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth, next_hop_add = 100;
+ uint8_t depth;
+ uint32_t next_hop_add = 100;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -655,7 +661,8 @@ test13(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth, next_hop_add = 100;
+ uint8_t depth;
+ uint32_t next_hop_add = 100;
int32_t status = 0;
config.max_rules = 2;
@@ -702,7 +709,8 @@ test14(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth = 25, next_hop_add = 100;
+ uint8_t depth = 25;
+ uint32_t next_hop_add = 100;
int32_t status = 0;
int i;
@@ -748,7 +756,8 @@ test15(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+ uint8_t depth = 24;
+ uint32_t next_hop_add = 100, next_hop_return = 0;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -784,7 +793,8 @@ test16(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {12,12,1,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth = 128, next_hop_add = 100, next_hop_return = 0;
+ uint8_t depth = 128;
+ uint32_t next_hop_add = 100, next_hop_return = 0;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -828,7 +838,8 @@ test17(void)
uint8_t ip1[] = {127,255,255,255,255,255,255,255,255,
255,255,255,255,255,255,255};
uint8_t ip2[] = {128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- uint8_t depth, next_hop_add, next_hop_return;
+ uint8_t depth;
+ uint32_t next_hop_add, next_hop_return;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -857,7 +868,7 @@ test17(void)
/* Loop with rte_lpm6_delete. */
for (depth = 16; depth >= 1; depth--) {
- next_hop_add = (uint8_t) (depth - 1);
+ next_hop_add = (depth - 1);
status = rte_lpm6_delete(lpm, ip2, depth);
TEST_LPM_ASSERT(status == 0);
@@ -893,8 +904,9 @@ test18(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[16], ip_1[16], ip_2[16];
- uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
- next_hop_add_2, next_hop_return;
+ uint8_t depth, depth_1, depth_2;
+ uint32_t next_hop_add, next_hop_add_1,
+ next_hop_add_2, next_hop_return;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1055,7 +1067,8 @@ test19(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[16];
- uint8_t depth, next_hop_add, next_hop_return;
+ uint8_t depth;
+ uint32_t next_hop_add, next_hop_return;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1253,7 +1266,8 @@ test20(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[16];
- uint8_t depth, next_hop_add, next_hop_return;
+ uint8_t depth;
+ uint32_t next_hop_add, next_hop_return;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1320,8 +1334,9 @@ test21(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip_batch[4][16];
- uint8_t depth, next_hop_add;
- int16_t next_hop_return[4];
+ uint8_t depth;
+ uint32_t next_hop_add;
+ int32_t next_hop_return[4];
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1378,8 +1393,9 @@ test22(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip_batch[5][16];
- uint8_t depth[5], next_hop_add;
- int16_t next_hop_return[5];
+ uint8_t depth[5];
+ uint32_t next_hop_add;
+ int32_t next_hop_return[5];
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1495,7 +1511,8 @@ test23(void)
struct rte_lpm6_config config;
uint32_t i;
uint8_t ip[16];
- uint8_t depth, next_hop_add, next_hop_return;
+ uint8_t depth;
+ uint32_t next_hop_add, next_hop_return;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1579,7 +1596,8 @@ test25(void)
struct rte_lpm6_config config;
uint8_t ip[16];
uint32_t i;
- uint8_t depth, next_hop_add, next_hop_return, next_hop_expected;
+ uint8_t depth;
+ uint32_t next_hop_add, next_hop_return, next_hop_expected;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1632,10 +1650,10 @@ test26(void)
uint8_t d_ip_10_32 = 32;
uint8_t d_ip_10_24 = 24;
uint8_t d_ip_20_25 = 25;
- uint8_t next_hop_ip_10_32 = 100;
- uint8_t next_hop_ip_10_24 = 105;
- uint8_t next_hop_ip_20_25 = 111;
- uint8_t next_hop_return = 0;
+ uint32_t next_hop_ip_10_32 = 100;
+ uint32_t next_hop_ip_10_24 = 105;
+ uint32_t next_hop_ip_20_25 = 111;
+ uint32_t next_hop_return = 0;
int32_t status = 0;
config.max_rules = MAX_RULES;
@@ -1650,7 +1668,7 @@ test26(void)
return -1;
status = rte_lpm6_lookup(lpm, ip_10_32, &next_hop_return);
- uint8_t test_hop_10_32 = next_hop_return;
+ uint32_t test_hop_10_32 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
@@ -1659,7 +1677,7 @@ test26(void)
return -1;
status = rte_lpm6_lookup(lpm, ip_10_24, &next_hop_return);
- uint8_t test_hop_10_24 = next_hop_return;
+ uint32_t test_hop_10_24 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
@@ -1668,7 +1686,7 @@ test26(void)
return -1;
status = rte_lpm6_lookup(lpm, ip_20_25, &next_hop_return);
- uint8_t test_hop_20_25 = next_hop_return;
+ uint32_t test_hop_20_25 = next_hop_return;
TEST_LPM_ASSERT(status == 0);
TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
@@ -1707,7 +1725,8 @@ test27(void)
struct rte_lpm6 *lpm = NULL;
struct rte_lpm6_config config;
uint8_t ip[] = {128,128,128,128,128,128,128,128,128,128,128,128,128,128,0,0};
- uint8_t depth = 128, next_hop_add = 100, next_hop_return;
+ uint8_t depth = 128;
+ uint32_t next_hop_add = 100, next_hop_return;
int32_t status = 0;
int i, j;
@@ -1746,6 +1765,42 @@ test27(void)
}
/*
+ * Call add, lookup and delete for a single rule with maximum 21bit next_hop
+ * size.
+ * Check that next_hop returned from lookup is equal to provisioned value.
+ * Delete the rule and check that the same test returs a miss.
+ */
+int32_t
+test28(void)
+{
+ struct rte_lpm6 *lpm = NULL;
+ struct rte_lpm6_config config;
+ uint8_t ip[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ uint8_t depth = 16;
+ uint32_t next_hop_add = 0x001FFFFF, next_hop_return = 0;
+ int32_t status = 0;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ status = rte_lpm6_add(lpm, ip, depth, next_hop_add);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm6_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+ status = rte_lpm6_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+ rte_lpm6_free(lpm);
+
+ return PASS;
+}
+
+/*
* Do all unit tests.
*/
static int
diff --git a/app/test/test_lpm6_data.h b/test/test/test_lpm6_data.h
index c3573b2b..c3573b2b 100644
--- a/app/test/test_lpm6_data.h
+++ b/test/test/test_lpm6_data.h
diff --git a/app/test/test_lpm6_perf.c b/test/test/test_lpm6_perf.c
index 0723081b..30be430f 100644
--- a/app/test/test_lpm6_perf.c
+++ b/test/test/test_lpm6_perf.c
@@ -86,7 +86,7 @@ test_lpm6_perf(void)
struct rte_lpm6_config config;
uint64_t begin, total_time;
unsigned i, j;
- uint8_t next_hop_add = 0xAA, next_hop_return = 0;
+ uint32_t next_hop_add = 0xAA, next_hop_return = 0;
int status = 0;
int64_t count = 0;
@@ -148,7 +148,7 @@ test_lpm6_perf(void)
count = 0;
uint8_t ip_batch[NUM_IPS_ENTRIES][16];
- int16_t next_hops[NUM_IPS_ENTRIES];
+ int32_t next_hops[NUM_IPS_ENTRIES];
for (i = 0; i < NUM_IPS_ENTRIES; i++)
memcpy(ip_batch[i], large_ips_table[i].ip, 16);
diff --git a/app/test/test_lpm_perf.c b/test/test/test_lpm_perf.c
index e7e1281b..e7e1281b 100644
--- a/app/test/test_lpm_perf.c
+++ b/test/test/test_lpm_perf.c
diff --git a/app/test/test_malloc.c b/test/test/test_malloc.c
index 0673d85b..0673d85b 100644
--- a/app/test/test_malloc.c
+++ b/test/test/test_malloc.c
diff --git a/app/test/test_mbuf.c b/test/test/test_mbuf.c
index c0823ea5..d3ea812e 100644
--- a/app/test/test_mbuf.c
+++ b/test/test/test_mbuf.c
@@ -692,7 +692,7 @@ test_refcnt_slave(__attribute__((unused)) void *arg)
while (refcnt_stop_slaves == 0) {
if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
free++;
- rte_pktmbuf_free((struct rte_mbuf *)mp);
+ rte_pktmbuf_free(mp);
}
}
@@ -930,6 +930,124 @@ test_failing_mbuf_sanity_check(void)
return 0;
}
+static int
+test_mbuf_linearize(int pkt_len, int nb_segs) {
+
+ struct rte_mbuf *m = NULL, *mbuf = NULL;
+ uint8_t *data;
+ int data_len = 0;
+ int remain;
+ int seg, seg_len;
+ int i;
+
+ if (pkt_len < 1) {
+ printf("Packet size must be 1 or more (is %d)\n", pkt_len);
+ return -1;
+ }
+
+ if (nb_segs < 1) {
+ printf("Number of segments must be 1 or more (is %d)\n",
+ nb_segs);
+ return -1;
+ }
+
+ seg_len = pkt_len / nb_segs;
+ if (seg_len == 0)
+ seg_len = 1;
+
+ remain = pkt_len;
+
+ /* Create chained mbuf_src and fill it generated data */
+ for (seg = 0; remain > 0; seg++) {
+
+ m = rte_pktmbuf_alloc(pktmbuf_pool);
+ if (m == NULL) {
+ printf("Cannot create segment for source mbuf");
+ goto fail;
+ }
+
+ /* Make sure if tailroom is zeroed */
+ memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
+ rte_pktmbuf_tailroom(m));
+
+ data_len = remain;
+ if (data_len > seg_len)
+ data_len = seg_len;
+
+ data = (uint8_t *)rte_pktmbuf_append(m, data_len);
+ if (data == NULL) {
+ printf("Cannot append %d bytes to the mbuf\n",
+ data_len);
+ goto fail;
+ }
+
+ for (i = 0; i < data_len; i++)
+ data[i] = (seg * seg_len + i) % 0x0ff;
+
+ if (seg == 0)
+ mbuf = m;
+ else
+ rte_pktmbuf_chain(mbuf, m);
+
+ remain -= data_len;
+ }
+
+ /* Create destination buffer to store coalesced data */
+ if (rte_pktmbuf_linearize(mbuf)) {
+ printf("Mbuf linearization failed\n");
+ goto fail;
+ }
+
+ if (!rte_pktmbuf_is_contiguous(mbuf)) {
+ printf("Source buffer should be contiguous after "
+ "linearization\n");
+ goto fail;
+ }
+
+ data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+
+ for (i = 0; i < pkt_len; i++)
+ if (data[i] != (i % 0x0ff)) {
+ printf("Incorrect data in linearized mbuf\n");
+ goto fail;
+ }
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+
+fail:
+ if (mbuf)
+ rte_pktmbuf_free(mbuf);
+ return -1;
+}
+
+static int
+test_mbuf_linearize_check(void)
+{
+ struct test_mbuf_array {
+ int size;
+ int nb_segs;
+ } mbuf_array[] = {
+ { 128, 1 },
+ { 64, 64 },
+ { 512, 10 },
+ { 250, 11 },
+ { 123, 8 },
+ };
+ unsigned int i;
+
+ printf("Test mbuf linearize API\n");
+
+ for (i = 0; i < RTE_DIM(mbuf_array); i++)
+ if (test_mbuf_linearize(mbuf_array[i].size,
+ mbuf_array[i].nb_segs)) {
+ printf("Test failed for %d, %d\n", mbuf_array[i].size,
+ mbuf_array[i].nb_segs);
+ return -1;
+ }
+
+ return 0;
+}
static int
test_mbuf(void)
@@ -1023,6 +1141,11 @@ test_mbuf(void)
printf("test_failing_mbuf_sanity_check() failed\n");
return -1;
}
+
+ if (test_mbuf_linearize_check() < 0) {
+ printf("test_mbuf_linearize_check() failed\n");
+ return -1;
+ }
return 0;
}
diff --git a/app/test/test_memcpy.c b/test/test/test_memcpy.c
index 1d93dd53..1d93dd53 100644
--- a/app/test/test_memcpy.c
+++ b/test/test/test_memcpy.c
diff --git a/app/test/test_memcpy_perf.c b/test/test/test_memcpy_perf.c
index ff3aaaac..ff3aaaac 100644
--- a/app/test/test_memcpy_perf.c
+++ b/test/test/test_memcpy_perf.c
diff --git a/app/test/test_memory.c b/test/test/test_memory.c
index 921bdc88..921bdc88 100644
--- a/app/test/test_memory.c
+++ b/test/test/test_memory.c
diff --git a/app/test/test_mempool.c b/test/test/test_mempool.c
index b9880b32..0a442395 100644
--- a/app/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -509,9 +509,11 @@ walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
static int
test_mempool(void)
{
+ int ret = -1;
struct rte_mempool *mp_cache = NULL;
struct rte_mempool *mp_nocache = NULL;
struct rte_mempool *mp_stack = NULL;
+ struct rte_mempool *default_pool = NULL;
rte_atomic32_init(&synchro);
@@ -561,6 +563,32 @@ test_mempool(void)
}
rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
+ /* Create a mempool based on Default handler */
+ printf("Testing %s mempool handler\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate default mempool\n");
+ goto err;
+ }
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL) < 0) {
+ printf("cannot set %s handler\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
+
/* retrieve the mempool from its name */
if (rte_mempool_lookup("test_nocache") != mp_nocache) {
printf("Cannot lookup mempool from its name\n");
@@ -605,15 +633,20 @@ test_mempool(void)
if (test_mempool_basic(mp_stack, 1) < 0)
goto err;
+ if (test_mempool_basic(default_pool, 1) < 0)
+ goto err;
+
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
err:
rte_mempool_free(mp_nocache);
rte_mempool_free(mp_cache);
rte_mempool_free(mp_stack);
- return -1;
+ rte_mempool_free(default_pool);
+
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);
diff --git a/app/test/test_mempool_perf.c b/test/test/test_mempool_perf.c
index ebf1721a..07b28c06 100644
--- a/app/test/test_mempool_perf.c
+++ b/test/test/test_mempool_perf.c
@@ -109,8 +109,6 @@
goto label; \
} while (0)
-static struct rte_mempool *mp;
-static struct rte_mempool *mp_cache, *mp_nocache;
static int use_external_cache;
static unsigned external_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -144,10 +142,11 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
}
static int
-per_lcore_mempool_test(__attribute__((unused)) void *arg)
+per_lcore_mempool_test(void *arg)
{
void *obj_table[MAX_KEEP];
unsigned i, idx;
+ struct rte_mempool *mp = arg;
unsigned lcore_id = rte_lcore_id();
int ret = 0;
uint64_t start_cycles, end_cycles;
@@ -221,7 +220,7 @@ out:
/* launch all the per-lcore test, and display the result */
static int
-launch_cores(unsigned cores)
+launch_cores(struct rte_mempool *mp, unsigned int cores)
{
unsigned lcore_id;
uint64_t rate;
@@ -249,13 +248,13 @@ launch_cores(unsigned cores)
break;
cores--;
rte_eal_remote_launch(per_lcore_mempool_test,
- NULL, lcore_id);
+ mp, lcore_id);
}
/* start synchro and launch test on master */
rte_atomic32_set(&synchro, 1);
- ret = per_lcore_mempool_test(NULL);
+ ret = per_lcore_mempool_test(mp);
cores = cores_save;
RTE_LCORE_FOREACH_SLAVE(lcore_id) {
@@ -282,7 +281,7 @@ launch_cores(unsigned cores)
/* for a given number of core, launch all test cases */
static int
-do_one_mempool_test(unsigned cores)
+do_one_mempool_test(struct rte_mempool *mp, unsigned int cores)
{
unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
@@ -299,7 +298,7 @@ do_one_mempool_test(unsigned cores)
n_get_bulk = *get_bulk_ptr;
n_put_bulk = *put_bulk_ptr;
n_keep = *keep_ptr;
- ret = launch_cores(cores);
+ ret = launch_cores(mp, cores);
if (ret < 0)
return -1;
@@ -312,72 +311,119 @@ do_one_mempool_test(unsigned cores)
static int
test_mempool_perf(void)
{
+ struct rte_mempool *mp_cache = NULL;
+ struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *default_pool = NULL;
+ int ret = -1;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
+ mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_nocache == NULL)
- mp_nocache = rte_mempool_create("perf_test_nocache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_nocache == NULL)
- return -1;
+ goto err;
/* create a mempool (with cache) */
+ mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
if (mp_cache == NULL)
- mp_cache = rte_mempool_create("perf_test_cache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_cache == NULL)
- return -1;
+ goto err;
+
+ /* Create a mempool based on Default handler */
+ default_pool = rte_mempool_create_empty("default_pool",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ 0, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (default_pool == NULL) {
+ printf("cannot allocate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_set_ops_byname(default_pool,
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL)
+ < 0) {
+ printf("cannot set %s handler\n", RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ if (rte_mempool_populate_default(default_pool) < 0) {
+ printf("cannot populate %s mempool\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+ goto err;
+ }
+
+ rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
/* performance test with 1, 2 and max cores */
printf("start performance test (without cache)\n");
- mp = mp_nocache;
- if (do_one_mempool_test(1) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, 1) < 0)
+ goto err;
- if (do_one_mempool_test(2) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, 2) < 0)
+ goto err;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
+ goto err;
+
+ /* performance test with 1, 2 and max cores */
+ printf("start performance test for %s (without cache)\n",
+ RTE_MBUF_DEFAULT_MEMPOOL_OPS);
+
+ if (do_one_mempool_test(default_pool, 1) < 0)
+ goto err;
+
+ if (do_one_mempool_test(default_pool, 2) < 0)
+ goto err;
+
+ if (do_one_mempool_test(default_pool, rte_lcore_count()) < 0)
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with cache)\n");
- mp = mp_cache;
- if (do_one_mempool_test(1) < 0)
- return -1;
+ if (do_one_mempool_test(mp_cache, 1) < 0)
+ goto err;
- if (do_one_mempool_test(2) < 0)
- return -1;
+ if (do_one_mempool_test(mp_cache, 2) < 0)
+ goto err;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ if (do_one_mempool_test(mp_cache, rte_lcore_count()) < 0)
+ goto err;
/* performance test with 1, 2 and max cores */
printf("start performance test (with user-owned cache)\n");
- mp = mp_nocache;
use_external_cache = 1;
- if (do_one_mempool_test(1) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, 1) < 0)
+ goto err;
- if (do_one_mempool_test(2) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, 2) < 0)
+ goto err;
- if (do_one_mempool_test(rte_lcore_count()) < 0)
- return -1;
+ if (do_one_mempool_test(mp_nocache, rte_lcore_count()) < 0)
+ goto err;
rte_mempool_list_dump(stdout);
- return 0;
+ ret = 0;
+
+err:
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_nocache);
+ rte_mempool_free(default_pool);
+ return ret;
}
REGISTER_TEST_COMMAND(mempool_perf_autotest, test_mempool_perf);
diff --git a/app/test/test_memzone.c b/test/test/test_memzone.c
index 7ae31cf7..7ae31cf7 100644
--- a/app/test/test_memzone.c
+++ b/test/test/test_memzone.c
diff --git a/app/test/test_meter.c b/test/test/test_meter.c
index 26b05657..26b05657 100644
--- a/app/test/test_meter.c
+++ b/test/test/test_meter.c
diff --git a/app/test/test_mp_secondary.c b/test/test/test_mp_secondary.c
index 26c4afd6..94042e57 100644
--- a/app/test/test_mp_secondary.c
+++ b/test/test/test_mp_secondary.c
@@ -245,16 +245,6 @@ run_object_creation_tests(void)
printf("# Checked rte_lpm_create() OK\n");
#endif
-#ifdef RTE_APP_TEST_RESOURCE_TAR
- /* Run a test_pci call */
- if (test_pci() != 0) {
- printf("PCI scan failed in secondary\n");
- if (getuid() == 0) /* pci scans can fail as non-root */
- return -1;
- } else
- printf("PCI scan succeeded in secondary\n");
-#endif
-
return 0;
}
@@ -267,13 +257,6 @@ int
test_mp_secondary(void)
{
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!test_pci_run) {
-#ifdef RTE_APP_TEST_RESOURCE_TAR
- printf("=== Running pre-requisite test of test_pci\n");
- test_pci();
- printf("=== Requisite test done\n");
-#endif
- }
return run_secondary_instances();
}
diff --git a/app/test/test_per_lcore.c b/test/test/test_per_lcore.c
index 747513d4..747513d4 100644
--- a/app/test/test_per_lcore.c
+++ b/test/test/test_per_lcore.c
diff --git a/app/test/test_pmd_perf.c b/test/test/test_pmd_perf.c
index e055aa07..1ffd65a5 100644
--- a/app/test/test_pmd_perf.c
+++ b/test/test/test_pmd_perf.c
@@ -100,7 +100,7 @@ static struct rte_eth_conf port_conf = {
.hw_vlan_strip = 0, /**< VLAN strip enabled. */
.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .hw_strip_crc = 1, /**< CRC stripped by hardware */
.enable_scatter = 0, /**< scatter rx disabled */
},
.txmode = {
diff --git a/app/test/test_pmd_ring.c b/test/test/test_pmd_ring.c
index 2cdf60d1..2cdf60d1 100644
--- a/app/test/test_pmd_ring.c
+++ b/test/test/test_pmd_ring.c
diff --git a/app/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index af011f7d..004882af 100644
--- a/app/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -67,7 +67,7 @@ test_empty_dequeue(void)
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();
const uint64_t eth_start = rte_rdtsc();
@@ -98,8 +98,8 @@ test_single_enqueue_dequeue(void)
const uint64_t sc_start = rte_rdtsc_precise();
rte_compiler_barrier();
for (i = 0; i < iterations; i++) {
- rte_ring_enqueue_bulk(r, &burst, 1);
- rte_ring_dequeue_bulk(r, &burst, 1);
+ rte_ring_enqueue_bulk(r, &burst, 1, NULL);
+ rte_ring_dequeue_bulk(r, &burst, 1, NULL);
}
const uint64_t sc_end = rte_rdtsc_precise();
rte_compiler_barrier();
@@ -131,8 +131,10 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, (void *)burst, bulk_sizes[sz]);
- rte_ring_sc_dequeue_bulk(r, (void *)burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
+ rte_ring_sc_dequeue_bulk(r, (void *)burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
diff --git a/app/test/test_power.c b/test/test/test_power.c
index b2e1344c..b2e1344c 100644
--- a/app/test/test_power.c
+++ b/test/test/test_power.c
diff --git a/app/test/test_power_acpi_cpufreq.c b/test/test/test_power_acpi_cpufreq.c
index 64f5dd56..64f5dd56 100644
--- a/app/test/test_power_acpi_cpufreq.c
+++ b/test/test/test_power_acpi_cpufreq.c
diff --git a/app/test/test_power_kvm_vm.c b/test/test/test_power_kvm_vm.c
index 253a5f8b..253a5f8b 100644
--- a/app/test/test_power_kvm_vm.c
+++ b/test/test/test_power_kvm_vm.c
diff --git a/app/test/test_prefetch.c b/test/test/test_prefetch.c
index 80afaaf3..80afaaf3 100644
--- a/app/test/test_prefetch.c
+++ b/test/test/test_prefetch.c
diff --git a/app/test/test_red.c b/test/test/test_red.c
index 348075dc..348075dc 100644
--- a/app/test/test_red.c
+++ b/test/test/test_red.c
diff --git a/app/test/test_reorder.c b/test/test/test_reorder.c
index e8a0a2f2..e8a0a2f2 100644
--- a/app/test/test_reorder.c
+++ b/test/test/test_reorder.c
diff --git a/app/test/test_resource.c b/test/test/test_resource.c
index a3a82f13..a3a82f13 100644
--- a/app/test/test_resource.c
+++ b/test/test/test_resource.c
diff --git a/test/test/test_ring.c b/test/test/test_ring.c
new file mode 100644
index 00000000..858ebc1d
--- /dev/null
+++ b/test/test/test_ring.c
@@ -0,0 +1,829 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_random.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_hexdump.h>
+
+#include "test.h"
+
+/*
+ * Ring
+ * ====
+ *
+ * #. Basic tests: done on one core:
+ *
+ * - Using single producer/single consumer functions:
+ *
+ * - Enqueue one object, two objects, MAX_BULK objects
+ * - Dequeue one object, two objects, MAX_BULK objects
+ * - Check that dequeued pointers are correct
+ *
+ * - Using multi producers/multi consumers functions:
+ *
+ * - Enqueue one object, two objects, MAX_BULK objects
+ * - Dequeue one object, two objects, MAX_BULK objects
+ * - Check that dequeued pointers are correct
+ *
+ * #. Performance tests.
+ *
+ * Tests done in test_ring_perf.c
+ */
+
+#define RING_SIZE 4096
+#define MAX_BULK 32
+
+static rte_atomic32_t synchro;
+
+static struct rte_ring *r;
+
+#define TEST_RING_VERIFY(exp) \
+ if (!(exp)) { \
+ printf("error at %s:%d\tcondition " #exp " failed\n", \
+ __func__, __LINE__); \
+ rte_ring_dump(stdout, r); \
+ return -1; \
+ }
+
+#define TEST_RING_FULL_EMTPY_ITER 8
+
+/*
+ * helper routine for test_ring_basic
+ */
+static int
+test_ring_basic_full_empty(void * const src[], void *dst[])
+{
+ unsigned i, rand;
+ const unsigned rsz = RING_SIZE - 1;
+
+ printf("Basic full/empty test\n");
+
+ for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
+
+ /* random shift in the ring */
+ rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
+ printf("%s: iteration %u, random shift: %u;\n",
+ __func__, i, rand);
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rand,
+ NULL) != 0);
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rand,
+ NULL) == rand);
+
+ /* fill the ring */
+ TEST_RING_VERIFY(rte_ring_enqueue_bulk(r, src, rsz, NULL) != 0);
+ TEST_RING_VERIFY(0 == rte_ring_free_count(r));
+ TEST_RING_VERIFY(rsz == rte_ring_count(r));
+ TEST_RING_VERIFY(rte_ring_full(r));
+ TEST_RING_VERIFY(0 == rte_ring_empty(r));
+
+ /* empty the ring */
+ TEST_RING_VERIFY(rte_ring_dequeue_bulk(r, dst, rsz,
+ NULL) == rsz);
+ TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
+ TEST_RING_VERIFY(0 == rte_ring_count(r));
+ TEST_RING_VERIFY(0 == rte_ring_full(r));
+ TEST_RING_VERIFY(rte_ring_empty(r));
+
+ /* check data */
+ TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
+ rte_ring_dump(stdout, r);
+ }
+ return 0;
+}
+
+static int
+test_ring_basic(void)
+{
+ void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
+ int ret;
+ unsigned i, num_elems;
+
+ /* alloc dummy object pointers */
+ src = malloc(RING_SIZE*2*sizeof(void *));
+ if (src == NULL)
+ goto fail;
+
+ for (i = 0; i < RING_SIZE*2 ; i++) {
+ src[i] = (void *)(unsigned long)i;
+ }
+ cur_src = src;
+
+ /* alloc some room for copied objects */
+ dst = malloc(RING_SIZE*2*sizeof(void *));
+ if (dst == NULL)
+ goto fail;
+
+ memset(dst, 0, RING_SIZE*2*sizeof(void *));
+ cur_dst = dst;
+
+ printf("enqueue 1 obj\n");
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1, NULL);
+ cur_src += 1;
+ if (ret == 0)
+ goto fail;
+
+ printf("enqueue 2 objs\n");
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if (ret == 0)
+ goto fail;
+
+ printf("enqueue MAX_BULK objs\n");
+ ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue 1 obj\n");
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1, NULL);
+ cur_dst += 1;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue 2 objs\n");
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue MAX_BULK objs\n");
+ ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("enqueue 1 obj\n");
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1, NULL);
+ cur_src += 1;
+ if (ret == 0)
+ goto fail;
+
+ printf("enqueue 2 objs\n");
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if (ret == 0)
+ goto fail;
+
+ printf("enqueue MAX_BULK objs\n");
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue 1 obj\n");
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1, NULL);
+ cur_dst += 1;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue 2 objs\n");
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if (ret == 0)
+ goto fail;
+
+ printf("dequeue MAX_BULK objs\n");
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("fill and empty the ring\n");
+ for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
+ ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+ ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if (ret == 0)
+ goto fail;
+ }
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ if (test_ring_basic_full_empty(src, dst) != 0)
+ goto fail;
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("test default bulk enqueue / dequeue\n");
+ num_elems = 16;
+
+ cur_src = src;
+ cur_dst = dst;
+
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
+ cur_src += num_elems;
+ if (ret == 0) {
+ printf("Cannot enqueue\n");
+ goto fail;
+ }
+ ret = rte_ring_enqueue_bulk(r, cur_src, num_elems, NULL);
+ cur_src += num_elems;
+ if (ret == 0) {
+ printf("Cannot enqueue\n");
+ goto fail;
+ }
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
+ cur_dst += num_elems;
+ if (ret == 0) {
+ printf("Cannot dequeue\n");
+ goto fail;
+ }
+ ret = rte_ring_dequeue_bulk(r, cur_dst, num_elems, NULL);
+ cur_dst += num_elems;
+ if (ret == 0) {
+ printf("Cannot dequeue2\n");
+ goto fail;
+ }
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ ret = rte_ring_mp_enqueue(r, cur_src);
+ if (ret != 0)
+ goto fail;
+
+ ret = rte_ring_mc_dequeue(r, cur_dst);
+ if (ret != 0)
+ goto fail;
+
+ free(src);
+ free(dst);
+ return 0;
+
+ fail:
+ free(src);
+ free(dst);
+ return -1;
+}
+
+static int
+test_ring_burst_basic(void)
+{
+ void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
+ int ret;
+ unsigned i;
+
+ /* alloc dummy object pointers */
+ src = malloc(RING_SIZE*2*sizeof(void *));
+ if (src == NULL)
+ goto fail;
+
+ for (i = 0; i < RING_SIZE*2 ; i++) {
+ src[i] = (void *)(unsigned long)i;
+ }
+ cur_src = src;
+
+ /* alloc some room for copied objects */
+ dst = malloc(RING_SIZE*2*sizeof(void *));
+ if (dst == NULL)
+ goto fail;
+
+ memset(dst, 0, RING_SIZE*2*sizeof(void *));
+ cur_dst = dst;
+
+ printf("Test SP & SC basic functions \n");
+ printf("enqueue 1 obj\n");
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 1, NULL);
+ cur_src += 1;
+ if ((ret & RTE_RING_SZ_MASK) != 1)
+ goto fail;
+
+ printf("enqueue 2 objs\n");
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ printf("enqueue MAX_BULK objs\n");
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+
+ printf("dequeue 1 obj\n");
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 1, NULL);
+ cur_dst += 1;
+ if ((ret & RTE_RING_SZ_MASK) != 1)
+ goto fail;
+
+ printf("dequeue 2 objs\n");
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ printf("dequeue MAX_BULK objs\n");
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("Test enqueue without enough memory space \n");
+ for (i = 0; i< (RING_SIZE/MAX_BULK - 1); i++) {
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK) {
+ goto fail;
+ }
+ }
+
+ printf("Enqueue 2 objects, free entries = MAX_BULK - 2 \n");
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ printf("Enqueue the remaining entries = MAX_BULK - 2 \n");
+ /* Always one free entry left */
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK - 3;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ goto fail;
+
+ printf("Test if ring is full \n");
+ if (rte_ring_full(r) != 1)
+ goto fail;
+
+ printf("Test enqueue for a full entry \n");
+ ret = rte_ring_sp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ if ((ret & RTE_RING_SZ_MASK) != 0)
+ goto fail;
+
+ printf("Test dequeue without enough objects \n");
+ for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+ }
+
+ /* Available memory space for the exact MAX_BULK entries */
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ ret = rte_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK - 3;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ goto fail;
+
+ printf("Test if ring is empty \n");
+ /* Check if ring is empty */
+ if (1 != rte_ring_empty(r))
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("Test MP & MC basic functions \n");
+
+ printf("enqueue 1 obj\n");
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 1, NULL);
+ cur_src += 1;
+ if ((ret & RTE_RING_SZ_MASK) != 1)
+ goto fail;
+
+ printf("enqueue 2 objs\n");
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ printf("enqueue MAX_BULK objs\n");
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+
+ printf("dequeue 1 obj\n");
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 1, NULL);
+ cur_dst += 1;
+ if ((ret & RTE_RING_SZ_MASK) != 1)
+ goto fail;
+
+ printf("dequeue 2 objs\n");
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ printf("dequeue MAX_BULK objs\n");
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("fill and empty the ring\n");
+ for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+ }
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("Test enqueue without enough memory space \n");
+ for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+ }
+
+ /* Available memory space for the exact MAX_BULK objects */
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ ret = rte_ring_mp_enqueue_burst(r, cur_src, MAX_BULK, NULL);
+ cur_src += MAX_BULK - 3;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ goto fail;
+
+
+ printf("Test dequeue without enough objects \n");
+ for (i = 0; i<RING_SIZE/MAX_BULK - 1; i++) {
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK)
+ goto fail;
+ }
+
+ /* Available objects - the exact MAX_BULK */
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ ret = rte_ring_mc_dequeue_burst(r, cur_dst, MAX_BULK, NULL);
+ cur_dst += MAX_BULK - 3;
+ if ((ret & RTE_RING_SZ_MASK) != MAX_BULK - 3)
+ goto fail;
+
+ /* check data */
+ if (memcmp(src, dst, cur_dst - dst)) {
+ rte_hexdump(stdout, "src", src, cur_src - src);
+ rte_hexdump(stdout, "dst", dst, cur_dst - dst);
+ printf("data after dequeue is not the same\n");
+ goto fail;
+ }
+
+ cur_src = src;
+ cur_dst = dst;
+
+ printf("Covering rte_ring_enqueue_burst functions \n");
+
+ ret = rte_ring_enqueue_burst(r, cur_src, 2, NULL);
+ cur_src += 2;
+ if ((ret & RTE_RING_SZ_MASK) != 2)
+ goto fail;
+
+ ret = rte_ring_dequeue_burst(r, cur_dst, 2, NULL);
+ cur_dst += 2;
+ if (ret != 2)
+ goto fail;
+
+ /* Free memory before test completed */
+ free(src);
+ free(dst);
+ return 0;
+
+ fail:
+ free(src);
+ free(dst);
+ return -1;
+}
+
+/*
+ * it will always fail to create ring with a wrong ring size number in this function
+ */
+static int
+test_ring_creation_with_wrong_size(void)
+{
+ struct rte_ring * rp = NULL;
+
+ /* Test if ring size is not power of 2 */
+ rp = rte_ring_create("test_bad_ring_size", RING_SIZE + 1, SOCKET_ID_ANY, 0);
+ if (NULL != rp) {
+ return -1;
+ }
+
+ /* Test if ring size is exceeding the limit */
+ rp = rte_ring_create("test_bad_ring_size", (RTE_RING_SZ_MASK + 1), SOCKET_ID_ANY, 0);
+ if (NULL != rp) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * it tests if it would always fail to create ring with an used ring name
+ */
+static int
+test_ring_creation_with_an_used_name(void)
+{
+ struct rte_ring * rp;
+
+ rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
+ if (NULL != rp)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Test to if a non-power of 2 count causes the create
+ * function to fail correctly
+ */
+static int
+test_create_count_odd(void)
+{
+ struct rte_ring *r = rte_ring_create("test_ring_count",
+ 4097, SOCKET_ID_ANY, 0 );
+ if(r != NULL){
+ return -1;
+ }
+ return 0;
+}
+
+static int
+test_lookup_null(void)
+{
+ struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
+ if (rlp ==NULL)
+ if (rte_errno != ENOENT){
+ printf( "test failed to returnn error on null pointer\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * it tests some more basic ring operations
+ */
+static int
+test_ring_basic_ex(void)
+{
+ int ret = -1;
+ unsigned i;
+ struct rte_ring * rp;
+ void **obj = NULL;
+
+ obj = rte_calloc("test_ring_basic_ex_malloc", RING_SIZE, sizeof(void *), 0);
+ if (obj == NULL) {
+ printf("test_ring_basic_ex fail to rte_malloc\n");
+ goto fail_test;
+ }
+
+ rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (rp == NULL) {
+ printf("test_ring_basic_ex fail to create ring\n");
+ goto fail_test;
+ }
+
+ if (rte_ring_lookup("test_ring_basic_ex") != rp) {
+ goto fail_test;
+ }
+
+ if (rte_ring_empty(rp) != 1) {
+ printf("test_ring_basic_ex ring is not empty but it should be\n");
+ goto fail_test;
+ }
+
+ printf("%u ring entries are now free\n", rte_ring_free_count(rp));
+
+ for (i = 0; i < RING_SIZE; i ++) {
+ rte_ring_enqueue(rp, obj[i]);
+ }
+
+ if (rte_ring_full(rp) != 1) {
+ printf("test_ring_basic_ex ring is not full but it should be\n");
+ goto fail_test;
+ }
+
+ for (i = 0; i < RING_SIZE; i ++) {
+ rte_ring_dequeue(rp, &obj[i]);
+ }
+
+ if (rte_ring_empty(rp) != 1) {
+ printf("test_ring_basic_ex ring is not empty but it should be\n");
+ goto fail_test;
+ }
+
+ /* Covering the ring burst operation */
+ ret = rte_ring_enqueue_burst(rp, obj, 2, NULL);
+ if ((ret & RTE_RING_SZ_MASK) != 2) {
+ printf("test_ring_basic_ex: rte_ring_enqueue_burst fails \n");
+ goto fail_test;
+ }
+
+ ret = rte_ring_dequeue_burst(rp, obj, 2, NULL);
+ if (ret != 2) {
+ printf("test_ring_basic_ex: rte_ring_dequeue_burst fails \n");
+ goto fail_test;
+ }
+
+ ret = 0;
+fail_test:
+ if (obj != NULL)
+ rte_free(obj);
+
+ return ret;
+}
+
+static int
+test_ring(void)
+{
+ /* some more basic operations */
+ if (test_ring_basic_ex() < 0)
+ return -1;
+
+ rte_atomic32_init(&synchro);
+
+ if (r == NULL)
+ r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
+ if (r == NULL)
+ return -1;
+
+ /* retrieve the ring from its name */
+ if (rte_ring_lookup("test") != r) {
+ printf("Cannot lookup ring from its name\n");
+ return -1;
+ }
+
+ /* burst operations */
+ if (test_ring_burst_basic() < 0)
+ return -1;
+
+ /* basic operations */
+ if (test_ring_basic() < 0)
+ return -1;
+
+ /* basic operations */
+ if ( test_create_count_odd() < 0){
+ printf ("Test failed to detect odd count\n");
+ return -1;
+ }
+ else
+ printf ( "Test detected odd count\n");
+
+ if ( test_lookup_null() < 0){
+ printf ("Test failed to detect NULL ring lookup\n");
+ return -1;
+ }
+ else
+ printf ( "Test detected NULL ring lookup \n");
+
+ /* test of creating ring with wrong size */
+ if (test_ring_creation_with_wrong_size() < 0)
+ return -1;
+
+ /* test of creation ring with an used name */
+ if (test_ring_creation_with_an_used_name() < 0)
+ return -1;
+
+ /* dump the ring status */
+ rte_ring_list_dump(stdout);
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(ring_autotest, test_ring);
diff --git a/app/test/test_ring_perf.c b/test/test/test_ring_perf.c
index 320c20cd..ed89896b 100644
--- a/app/test/test_ring_perf.c
+++ b/test/test/test_ring_perf.c
@@ -152,12 +152,12 @@ test_empty_dequeue(void)
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0]);
+ rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[0], NULL);
const uint64_t mc_end = rte_rdtsc();
printf("SC empty dequeue: %.2F\n",
@@ -195,13 +195,13 @@ enqueue_bulk(void *p)
const uint64_t sp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_sp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sp_end = rte_rdtsc();
const uint64_t mp_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mp_enqueue_bulk(r, burst, size) != 0)
+ while (rte_ring_mp_enqueue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mp_end = rte_rdtsc();
@@ -230,13 +230,13 @@ dequeue_bulk(void *p)
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_sc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_sc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++)
- while (rte_ring_mc_dequeue_bulk(r, burst, size) != 0)
+ while (rte_ring_mc_dequeue_bulk(r, burst, size, NULL) == 0)
rte_pause();
const uint64_t mc_end = rte_rdtsc();
@@ -323,15 +323,19 @@ test_burst_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_burst(r, burst, bulk_sizes[sz]);
- rte_ring_sc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
+ rte_ring_sc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_burst(r, burst, bulk_sizes[sz]);
- rte_ring_mc_dequeue_burst(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_burst(r, burst,
+ bulk_sizes[sz], NULL);
+ rte_ring_mc_dequeue_burst(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();
@@ -357,15 +361,19 @@ test_bulk_enqueue_dequeue(void)
for (sz = 0; sz < sizeof(bulk_sizes)/sizeof(bulk_sizes[0]); sz++) {
const uint64_t sc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_sp_enqueue_bulk(r, burst, bulk_sizes[sz]);
- rte_ring_sc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_sp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
+ rte_ring_sc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t sc_end = rte_rdtsc();
const uint64_t mc_start = rte_rdtsc();
for (i = 0; i < iterations; i++) {
- rte_ring_mp_enqueue_bulk(r, burst, bulk_sizes[sz]);
- rte_ring_mc_dequeue_bulk(r, burst, bulk_sizes[sz]);
+ rte_ring_mp_enqueue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
+ rte_ring_mc_dequeue_bulk(r, burst,
+ bulk_sizes[sz], NULL);
}
const uint64_t mc_end = rte_rdtsc();
diff --git a/app/test/test_rwlock.c b/test/test/test_rwlock.c
index 50f58ade..50f58ade 100644
--- a/app/test/test_rwlock.c
+++ b/test/test/test_rwlock.c
diff --git a/app/test/test_sched.c b/test/test/test_sched.c
index bd2776d3..bd2776d3 100644
--- a/app/test/test_sched.c
+++ b/test/test/test_sched.c
diff --git a/app/test/test_spinlock.c b/test/test/test_spinlock.c
index 2d94eecc..2d94eecc 100644
--- a/app/test/test_spinlock.c
+++ b/test/test/test_spinlock.c
diff --git a/app/test/test_string_fns.c b/test/test/test_string_fns.c
index 8b4359aa..8b4359aa 100644
--- a/app/test/test_string_fns.c
+++ b/test/test/test_string_fns.c
diff --git a/app/test/test_table.c b/test/test/test_table.c
index 1faa0a6d..9e9eed88 100644
--- a/app/test/test_table.c
+++ b/test/test/test_table.c
@@ -75,7 +75,7 @@ uint64_t pipeline_test_hash(void *key,
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed)
{
- uint32_t *k32 = (uint32_t *) key;
+ uint32_t *k32 = key;
uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
uint64_t signature = ip_dst;
diff --git a/app/test/test_table.h b/test/test/test_table.h
index 84d1845a..84d1845a 100644
--- a/app/test/test_table.h
+++ b/test/test/test_table.h
diff --git a/app/test/test_table_acl.c b/test/test/test_table_acl.c
index b3bfda4c..08c100ff 100644
--- a/app/test/test_table_acl.c
+++ b/test/test/test_table_acl.c
@@ -713,14 +713,14 @@ test_pipeline_single_filter(int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;
- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0) {
printf("Got no objects from ring %d - error code %d\n",
i, ret);
} else {
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
- mbuf = (struct rte_mbuf *)objs[j];
+ mbuf = objs[j];
rte_hexdump(stdout, "mbuf",
rte_pktmbuf_mtod(mbuf, char *), 64);
rte_pktmbuf_free(mbuf);
diff --git a/app/test/test_table_acl.h b/test/test/test_table_acl.h
index a64c3e6c..a64c3e6c 100644
--- a/app/test/test_table_acl.h
+++ b/test/test/test_table_acl.h
diff --git a/app/test/test_table_combined.c b/test/test/test_table_combined.c
index a2d19a1a..a2d19a1a 100644
--- a/app/test/test_table_combined.c
+++ b/test/test/test_table_combined.c
diff --git a/app/test/test_table_combined.h b/test/test/test_table_combined.h
index e1619f92..e1619f92 100644
--- a/app/test/test_table_combined.h
+++ b/test/test/test_table_combined.h
diff --git a/app/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 36bfeda3..a6fef721 100644
--- a/app/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -494,14 +494,14 @@ test_pipeline_single_filter(int test_type, int expected_count)
void *objs[RING_TX_SIZE];
struct rte_mbuf *mbuf;
- ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10);
+ ret = rte_ring_sc_dequeue_burst(rings_tx[i], objs, 10, NULL);
if (ret <= 0)
printf("Got no objects from ring %d - error code %d\n",
i, ret);
else {
printf("Got %d object(s) from ring %d!\n", ret, i);
for (j = 0; j < ret; j++) {
- mbuf = (struct rte_mbuf *)objs[j];
+ mbuf = objs[j];
rte_hexdump(stdout, "Object:",
rte_pktmbuf_mtod(mbuf, char *),
mbuf->data_len);
diff --git a/app/test/test_table_pipeline.h b/test/test/test_table_pipeline.h
index b3f20ba3..b3f20ba3 100644
--- a/app/test/test_table_pipeline.h
+++ b/test/test/test_table_pipeline.h
diff --git a/app/test/test_table_ports.c b/test/test/test_table_ports.c
index 25323677..39592ce1 100644
--- a/app/test/test_table_ports.c
+++ b/test/test/test_table_ports.c
@@ -80,7 +80,7 @@ test_port_ring_reader(void)
mbuf[0] = (void *)rte_pktmbuf_alloc(pool);
expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- mbuf, 1);
+ mbuf, 1, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf, 1);
if (received_pkts < expected_pkts)
@@ -93,7 +93,7 @@ test_port_ring_reader(void)
mbuf[i] = rte_pktmbuf_alloc(pool);
expected_pkts = rte_ring_sp_enqueue_burst(port_ring_reader_params.ring,
- (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX);
+ (void * const *) mbuf, RTE_PORT_IN_BURST_SIZE_MAX, NULL);
received_pkts = rte_port_ring_reader_ops.f_rx(port, res_mbuf,
RTE_PORT_IN_BURST_SIZE_MAX);
@@ -163,7 +163,7 @@ test_port_ring_writer(void)
rte_port_ring_writer_ops.f_flush(port);
expected_pkts = 1;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -7;
@@ -178,7 +178,7 @@ test_port_ring_writer(void)
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -8;
@@ -193,7 +193,7 @@ test_port_ring_writer(void)
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -8;
@@ -208,7 +208,7 @@ test_port_ring_writer(void)
expected_pkts = RTE_PORT_IN_BURST_SIZE_MAX;
received_pkts = rte_ring_sc_dequeue_burst(port_ring_writer_params.ring,
- (void **)res_mbuf, port_ring_writer_params.tx_burst_sz);
+ (void **)res_mbuf, port_ring_writer_params.tx_burst_sz, NULL);
if (received_pkts < expected_pkts)
return -9;
diff --git a/app/test/test_table_ports.h b/test/test/test_table_ports.h
index 512b77fe..512b77fe 100644
--- a/app/test/test_table_ports.h
+++ b/test/test/test_table_ports.h
diff --git a/app/test/test_table_tables.c b/test/test/test_table_tables.c
index d835eb9f..d835eb9f 100644
--- a/app/test/test_table_tables.c
+++ b/test/test/test_table_tables.c
diff --git a/app/test/test_table_tables.h b/test/test/test_table_tables.h
index 35311362..35311362 100644
--- a/app/test/test_table_tables.h
+++ b/test/test/test_table_tables.h
diff --git a/app/test/test_tailq.c b/test/test/test_tailq.c
index 33a3e8a9..33a3e8a9 100644
--- a/app/test/test_tailq.c
+++ b/test/test/test_tailq.c
diff --git a/app/test/test_thash.c b/test/test/test_thash.c
index 61754a94..61754a94 100644
--- a/app/test/test_thash.c
+++ b/test/test/test_thash.c
diff --git a/app/test/test_timer.c b/test/test/test_timer.c
index 2f6525a5..2f6525a5 100644
--- a/app/test/test_timer.c
+++ b/test/test/test_timer.c
diff --git a/app/test/test_timer_perf.c b/test/test/test_timer_perf.c
index fa77efbd..fa77efbd 100644
--- a/app/test/test_timer_perf.c
+++ b/test/test/test_timer_perf.c
diff --git a/app/test/test_timer_racecond.c b/test/test/test_timer_racecond.c
index 7824ec4b..7824ec4b 100644
--- a/app/test/test_timer_racecond.c
+++ b/test/test/test_timer_racecond.c
diff --git a/app/test/test_version.c b/test/test/test_version.c
index afc0d0b8..afc0d0b8 100644
--- a/app/test/test_version.c
+++ b/test/test/test_version.c
diff --git a/app/test/test_xmmt_ops.h b/test/test/test_xmmt_ops.h
index 42174d2c..42174d2c 100644
--- a/app/test/test_xmmt_ops.h
+++ b/test/test/test_xmmt_ops.h
diff --git a/app/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 65b44c64..e9dd8ac3 100644
--- a/app/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -117,7 +117,6 @@ virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static int
@@ -343,7 +342,7 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused,
dev_private = vrtl_eth_dev->data->dev_private;
rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
- nb_pkts);
+ nb_pkts, NULL);
/* increments ipackets count */
dev_private->eth_stats.ipackets += rx_count;
@@ -367,7 +366,7 @@ static uint16_t
virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
{
- struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue;
+ struct virtual_ethdev_queue *tx_q = queue;
struct rte_eth_dev *vrtl_eth_dev;
struct virtual_ethdev_private *dev_private;
@@ -381,7 +380,7 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
nb_pkts = 0;
else
nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
- nb_pkts);
+ nb_pkts, NULL);
/* increment opacket count */
dev_private->eth_stats.opackets += nb_pkts;
@@ -403,7 +402,7 @@ virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
int i;
- tx_q = (struct virtual_ethdev_queue *)queue;
+ tx_q = queue;
vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
dev_private = vrtl_eth_dev->data->dev_private;
@@ -497,7 +496,7 @@ virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
vrtl_eth_dev->data->dev_private;
return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}
int
@@ -509,7 +508,7 @@ virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
dev_private = vrtl_eth_dev->data->dev_private;
return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
- burst_length);
+ burst_length, NULL);
}
static uint8_t
@@ -533,7 +532,6 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
{
struct rte_pci_device *pci_dev = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct eth_driver *eth_drv = NULL;
struct rte_pci_driver *pci_drv = NULL;
struct rte_pci_id *id_table = NULL;
struct virtual_ethdev_private *dev_private = NULL;
@@ -551,10 +549,6 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
if (pci_dev == NULL)
goto err;
- eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
- if (eth_drv == NULL)
- goto err;
-
pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
if (pci_drv == NULL)
goto err;
@@ -595,14 +589,12 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
- eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
- eth_dev->driver = eth_drv;
+ eth_dev->device = &pci_dev->device;
+ eth_dev->device->driver = &pci_drv->driver;
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -625,8 +617,8 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
dev_private->dev_ops = virtual_ethdev_default_dev_ops;
eth_dev->dev_ops = &dev_private->dev_ops;
- eth_dev->pci_dev = pci_dev;
- eth_dev->pci_dev->device.driver = &eth_drv->pci_drv.driver;
+ pci_dev->device.driver = &pci_drv->driver;
+ eth_dev->device = &pci_dev->device;
eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
@@ -636,7 +628,6 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
err:
rte_free(pci_dev);
rte_free(pci_drv);
- rte_free(eth_drv);
rte_free(id_table);
rte_free(dev_private);
diff --git a/app/test/virtual_pmd.h b/test/test/virtual_pmd.h
index de001884..de001884 100644
--- a/app/test/virtual_pmd.h
+++ b/test/test/virtual_pmd.h
diff --git a/tools/cpu_layout.py b/usertools/cpu_layout.py
index d38d0b5a..93949449 100755
--- a/tools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -1,8 +1,10 @@
-#! /usr/bin/python
+#!/usr/bin/env python
+
#
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# Copyright(c) 2017 Cavium Networks Ltd. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -31,65 +33,71 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
-
+from __future__ import print_function
import sys
+try:
+ xrange # Python 2
+except NameError:
+ xrange = range # Python 3
sockets = []
cores = []
core_map = {}
-
-fd=open("/proc/cpuinfo")
-lines = fd.readlines()
+base_path = "/sys/devices/system/cpu"
+fd = open("{}/kernel_max".format(base_path))
+max_cpus = int(fd.read())
fd.close()
+for cpu in xrange(max_cpus + 1):
+ try:
+ fd = open("{}/cpu{}/topology/core_id".format(base_path, cpu))
+ except IOError:
+ continue
+ except:
+ break
+ core = int(fd.read())
+ fd.close()
+ fd = open("{}/cpu{}/topology/physical_package_id".format(base_path, cpu))
+ socket = int(fd.read())
+ fd.close()
+ if core not in cores:
+ cores.append(core)
+ if socket not in sockets:
+ sockets.append(socket)
+ key = (socket, core)
+ if key not in core_map:
+ core_map[key] = []
+ core_map[key].append(cpu)
-core_details = []
-core_lines = {}
-for line in lines:
- if len(line.strip()) != 0:
- name, value = line.split(":", 1)
- core_lines[name.strip()] = value.strip()
- else:
- core_details.append(core_lines)
- core_lines = {}
-
-for core in core_details:
- for field in ["processor", "core id", "physical id"]:
- if field not in core:
- print "Error getting '%s' value from /proc/cpuinfo" % field
- sys.exit(1)
- core[field] = int(core[field])
-
- if core["core id"] not in cores:
- cores.append(core["core id"])
- if core["physical id"] not in sockets:
- sockets.append(core["physical id"])
- key = (core["physical id"], core["core id"])
- if key not in core_map:
- core_map[key] = []
- core_map[key].append(core["processor"])
-
-print "============================================================"
-print "Core and Socket Information (as reported by '/proc/cpuinfo')"
-print "============================================================\n"
-print "cores = ",cores
-print "sockets = ", sockets
-print ""
+print(format("=" * (47 + len(base_path))))
+print("Core and Socket Information (as reported by '{}')".format(base_path))
+print("{}\n".format("=" * (47 + len(base_path))))
+print("cores = ", cores)
+print("sockets = ", sockets)
+print("")
max_processor_len = len(str(len(cores) * len(sockets) * 2 - 1))
-max_core_map_len = max_processor_len * 2 + len('[, ]') + len('Socket ')
+max_thread_count = len(list(core_map.values())[0])
+max_core_map_len = (max_processor_len * max_thread_count) \
+ + len(", ") * (max_thread_count - 1) \
+ + len('[]') + len('Socket ')
max_core_id_len = len(str(max(cores)))
-print " ".ljust(max_core_id_len + len('Core ')),
+output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
- print "Socket %s" % str(s).ljust(max_core_map_len - len('Socket ')),
-print ""
-print " ".ljust(max_core_id_len + len('Core ')),
+ output += " Socket %s" % str(s).ljust(max_core_map_len - len('Socket '))
+print(output)
+
+output = " ".ljust(max_core_id_len + len('Core '))
for s in sockets:
- print "--------".ljust(max_core_map_len),
-print ""
+ output += " --------".ljust(max_core_map_len)
+ output += " "
+print(output)
for c in cores:
- print "Core %s" % str(c).ljust(max_core_id_len),
- for s in sockets:
- print str(core_map[(s,c)]).ljust(max_core_map_len),
- print ""
+ output = "Core %s" % str(c).ljust(max_core_id_len)
+ for s in sockets:
+ if (s,c) in core_map:
+ output += " " + str(core_map[(s, c)]).ljust(max_core_map_len)
+ else:
+ output += " " * (max_core_map_len + 1)
+ print(output)
diff --git a/tools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index fef59c41..f0225b6c 100755
--- a/tools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -1,4 +1,4 @@
-#! /usr/bin/python
+#! /usr/bin/env python
#
# BSD LICENSE
#
@@ -38,9 +38,24 @@ import getopt
import subprocess
from os.path import exists, abspath, dirname, basename
-# The PCI base class for NETWORK devices
-NETWORK_BASE_CLASS = "02"
-CRYPTO_BASE_CLASS = "0b"
+# The PCI base class for all devices
+network_class = {'Class': '02', 'Vendor': None, 'Device': None,
+ 'SVendor': None, 'SDevice': None}
+encryption_class = {'Class': '10', 'Vendor': None, 'Device': None,
+ 'SVendor': None, 'SDevice': None}
+intel_processor_class = {'Class': '0b', 'Vendor': '8086', 'Device': None,
+ 'SVendor': None, 'SDevice': None}
+cavium_sso = {'Class': '08', 'Vendor': '177d', 'Device': 'a04b,a04d',
+ 'SVendor': None, 'SDevice': None}
+cavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',
+ 'SVendor': None, 'SDevice': None}
+cavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',
+ 'SVendor': None, 'SDevice': None}
+
+network_devices = [network_class, cavium_pkx]
+crypto_devices = [encryption_class, intel_processor_class]
+eventdev_devices = [cavium_sso]
+mempool_devices = [cavium_fpa]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
@@ -73,7 +88,8 @@ Options:
Display usage information and quit
-s, --status:
- Print the current status of all known network and crypto devices.
+ Print the current status of all known network, crypto, event
+ and mempool devices.
For each device, it displays the PCI domain, bus, slot and function,
along with a text description of the device. Depending upon whether the
device is being used by a kernel driver, the igb_uio driver, or no
@@ -86,6 +102,10 @@ Options:
status display will always occur after the other operations have taken
place.
+ --status-dev:
+ Print the status of given device group. Supported device groups are:
+ "net", "crypto", "event" and "mempool"
+
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
@@ -93,10 +113,10 @@ Options:
Unbind a device (Equivalent to \"-b none\")
--force:
- By default, network devices which are used by Linux - as indicated by having
- routes in the routing table - cannot be modified. Using the --force
- flag overrides this behavior, allowing active links to be forcibly
- unbound.
+ By default, network devices which are used by Linux - as indicated by
+ having routes in the routing table - cannot be modified. Using the
+ --force flag overrides this behavior, allowing active links to be
+ forcibly unbound.
WARNING: This can lead to loss of network connection and should be used
with caution.
@@ -106,6 +126,9 @@ Examples:
To display current device status:
%(argv0)s --status
+To display current network device status:
+ %(argv0)s --status-dev net
+
To bind eth1 from the current driver and move to use igb_uio
%(argv0)s --bind=igb_uio eth1
@@ -140,18 +163,17 @@ def find_module(mod):
# check using depmod
try:
- depmod_out = check_output(["modinfo", "-n", mod],
- stderr=subprocess.STDOUT).lower()
- if "error" not in depmod_out:
- path = depmod_out.strip()
- if exists(path):
- return path
+ with open(os.devnull, "w") as fnull:
+ path = check_output(["modinfo", "-n", mod], stderr=fnull).strip()
+
+ if path and exists(path):
+ return path
except: # if modinfo can't find module, it fails, so continue
pass
# check for a copy based off current path
tools_dir = dirname(abspath(sys.argv[0]))
- if (tools_dir.endswith("tools")):
+ if tools_dir.endswith("tools"):
base_dir = dirname(tools_dir)
find_out = check_output(["find", base_dir, "-name", mod + ".ko"])
if len(find_out) > 0: # something matched
@@ -208,19 +230,20 @@ def has_driver(dev_id):
return "Driver_str" in devices[dev_id]
-def get_pci_device_details(dev_id):
+def get_pci_device_details(dev_id, probe_lspci):
'''This function gets additional details for a PCI device'''
device = {}
- extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
+ if probe_lspci:
+ extra_info = check_output(["lspci", "-vmmks", dev_id]).splitlines()
- # parse lspci details
- for line in extra_info:
- if len(line) == 0:
- continue
- name, value = line.decode().split("\t", 1)
- name = name.strip(":") + "_str"
- device[name] = value
+ # parse lspci details
+ for line in extra_info:
+ if len(line) == 0:
+ continue
+ name, value = line.decode().split("\t", 1)
+ name = name.strip(":") + "_str"
+ device[name] = value
# check for a unix interface name
device["Interface"] = ""
for base, dirs, _ in os.walk("/sys/bus/pci/devices/%s/" % dev_id):
@@ -234,106 +257,71 @@ def get_pci_device_details(dev_id):
return device
-
-def get_nic_details():
- '''This function populates the "devices" dictionary. The keys used are
- the pci addresses (domain:bus:slot.func). The values are themselves
- dictionaries - one for each NIC.'''
- global devices
- global dpdk_drivers
-
- # clear any old data
+def clear_data():
+ '''This function clears any old data'''
devices = {}
- # first loop through and read details for all devices
- # request machine readable format, with numeric IDs
- dev = {}
- dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
- for dev_line in dev_lines:
- if (len(dev_line) == 0):
- if dev["Class"][0:2] == NETWORK_BASE_CLASS:
- # convert device and vendor ids to numbers, then add to global
- dev["Vendor"] = int(dev["Vendor"], 16)
- dev["Device"] = int(dev["Device"], 16)
- # use dict to make copy of dev
- devices[dev["Slot"]] = dict(dev)
- else:
- name, value = dev_line.decode().split("\t", 1)
- dev[name.rstrip(":")] = value
-
- # check what is the interface if any for an ssh connection if
- # any to this host, so we can mark it later.
- ssh_if = []
- route = check_output(["ip", "-o", "route"])
- # filter out all lines for 169.254 routes
- route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
- route.decode().splitlines()))
- rt_info = route.split()
- for i in range(len(rt_info) - 1):
- if rt_info[i] == "dev":
- ssh_if.append(rt_info[i+1])
- # based on the basic info, get extended text details
- for d in devices.keys():
- # get additional info and add it to existing data
- devices[d] = devices[d].copy()
- devices[d].update(get_pci_device_details(d).items())
-
- for _if in ssh_if:
- if _if in devices[d]["Interface"].split(","):
- devices[d]["Ssh_if"] = True
- devices[d]["Active"] = "*Active*"
- break
-
- # add igb_uio to list of supporting modules if needed
- if "Module_str" in devices[d]:
- for driver in dpdk_drivers:
- if driver not in devices[d]["Module_str"]:
- devices[d]["Module_str"] = \
- devices[d]["Module_str"] + ",%s" % driver
- else:
- devices[d]["Module_str"] = ",".join(dpdk_drivers)
-
- # make sure the driver and module strings do not have any duplicates
- if has_driver(d):
- modules = devices[d]["Module_str"].split(",")
- if devices[d]["Driver_str"] in modules:
- modules.remove(devices[d]["Driver_str"])
- devices[d]["Module_str"] = ",".join(modules)
-
-
-def get_crypto_details():
+def get_device_details(devices_type):
'''This function populates the "devices" dictionary. The keys used are
the pci addresses (domain:bus:slot.func). The values are themselves
dictionaries - one for each NIC.'''
global devices
global dpdk_drivers
- # clear any old data
- # devices = {}
# first loop through and read details for all devices
- # request machine readable format, with numeric IDs
+ # request machine readable format, with numeric IDs and String
dev = {}
- dev_lines = check_output(["lspci", "-Dvmmn"]).splitlines()
+ dev_lines = check_output(["lspci", "-Dvmmnnk"]).splitlines()
for dev_line in dev_lines:
- if (len(dev_line) == 0):
- if (dev["Class"][0:2] == CRYPTO_BASE_CLASS):
- # convert device and vendor ids to numbers, then add to global
- dev["Vendor"] = int(dev["Vendor"], 16)
- dev["Device"] = int(dev["Device"], 16)
+ if len(dev_line) == 0:
+ if device_type_match(dev, devices_type):
+ # Replace "Driver" with "Driver_str" to have consistency of
+ # of dictionary key names
+ if "Driver" in dev.keys():
+ dev["Driver_str"] = dev.pop("Driver")
# use dict to make copy of dev
devices[dev["Slot"]] = dict(dev)
+ # Clear previous device's data
+ dev = {}
else:
name, value = dev_line.decode().split("\t", 1)
- dev[name.rstrip(":")] = value
+ value_list = value.rsplit(' ', 1)
+ if len(value_list) > 1:
+ # String stored in <name>_str
+ dev[name.rstrip(":") + '_str'] = value_list[0]
+ # Numeric IDs
+ dev[name.rstrip(":")] = value_list[len(value_list) - 1] \
+ .rstrip("]").lstrip("[")
+
+ if devices_type == network_devices:
+ # check what is the interface if any for an ssh connection if
+ # any to this host, so we can mark it later.
+ ssh_if = []
+ route = check_output(["ip", "-o", "route"])
+ # filter out all lines for 169.254 routes
+ route = "\n".join(filter(lambda ln: not ln.startswith("169.254"),
+ route.decode().splitlines()))
+ rt_info = route.split()
+ for i in range(len(rt_info) - 1):
+ if rt_info[i] == "dev":
+ ssh_if.append(rt_info[i+1])
# based on the basic info, get extended text details
for d in devices.keys():
- if devices[d]["Class"][0:2] != CRYPTO_BASE_CLASS:
+ if not device_type_match(devices[d], devices_type):
continue
# get additional info and add it to existing data
devices[d] = devices[d].copy()
- devices[d].update(get_pci_device_details(d).items())
+ # No need to probe lspci
+ devices[d].update(get_pci_device_details(d, False).items())
+
+ if devices_type == network_devices:
+ for _if in ssh_if:
+ if _if in devices[d]["Interface"].split(","):
+ devices[d]["Ssh_if"] = True
+ devices[d]["Active"] = "*Active*"
+ break
# add igb_uio to list of supporting modules if needed
if "Module_str" in devices[d]:
@@ -352,6 +340,24 @@ def get_crypto_details():
devices[d]["Module_str"] = ",".join(modules)
+def device_type_match(dev, devices_type):
+ for i in range(len(devices_type)):
+ param_count = len(
+ [x for x in devices_type[i].values() if x is not None])
+ match_count = 0
+ if dev["Class"][0:2] == devices_type[i]["Class"]:
+ match_count = match_count + 1
+ for key in devices_type[i].keys():
+ if key != 'Class' and devices_type[i][key]:
+ value_list = devices_type[i][key].split(',')
+ for value in value_list:
+ if value.strip(' ') == dev[key]:
+ match_count = match_count + 1
+ # count must be the number of non None parameters to match
+ if match_count == param_count:
+ return True
+ return False
+
def dev_id_from_dev_name(dev_name):
'''Take a device "name" - a string passed in by user to identify a NIC
device, and determine the device id - i.e. the domain:bus:slot.func - for
@@ -423,22 +429,46 @@ def bind_one(dev_id, driver, force):
unbind_one(dev_id, force)
dev["Driver_str"] = "" # clear driver string
- # if we are binding to one of DPDK drivers, add PCI id's to that driver
+ # For kernels >= 3.15 driver_override can be used to specify the driver
+ # for a device rather than relying on the driver to provide a positive
+ # match of the device. The existing process of looking up
+ # the vendor and device ID, adding them to the driver new_id,
+ # will erroneously bind other devices too which has the additional burden
+ # of unbinding those devices
if driver in dpdk_drivers:
- filename = "/sys/bus/pci/drivers/%s/new_id" % driver
- try:
- f = open(filename, "w")
- except:
- print("Error: bind failed for %s - Cannot open %s"
- % (dev_id, filename))
- return
- try:
- f.write("%04x %04x" % (dev["Vendor"], dev["Device"]))
- f.close()
- except:
- print("Error: bind failed for %s - Cannot write new PCI ID to "
- "driver %s" % (dev_id, driver))
- return
+ filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
+ if os.path.exists(filename):
+ try:
+ f = open(filename, "w")
+ except:
+ print("Error: bind failed for %s - Cannot open %s"
+ % (dev_id, filename))
+ return
+ try:
+ f.write("%s" % driver)
+ f.close()
+ except:
+ print("Error: bind failed for %s - Cannot write driver %s to "
+ "PCI ID " % (dev_id, driver))
+ return
+ # For kernels < 3.15 use new_id to add PCI id's to the driver
+ else:
+ filename = "/sys/bus/pci/drivers/%s/new_id" % driver
+ try:
+ f = open(filename, "w")
+ except:
+ print("Error: bind failed for %s - Cannot open %s"
+ % (dev_id, filename))
+ return
+ try:
+ # Convert Device and Vendor Id to int to write to new_id
+ f.write("%04x %04x" % (int(dev["Vendor"],16),
+ int(dev["Device"], 16)))
+ f.close()
+ except:
+ print("Error: bind failed for %s - Cannot write new PCI ID to "
+ "driver %s" % (dev_id, driver))
+ return
# do the bind by writing to /sys
filename = "/sys/bus/pci/drivers/%s/bind" % driver
@@ -457,7 +487,7 @@ def bind_one(dev_id, driver, force):
# for some reason, closing dev_id after adding a new PCI ID to new_id
# results in IOError. however, if the device was successfully bound,
# we don't care for any errors and can safely ignore IOError
- tmp = get_pci_device_details(dev_id)
+ tmp = get_pci_device_details(dev_id, True)
if "Driver_str" in tmp and tmp["Driver_str"] == driver:
return
print("Error: bind failed for %s - Cannot bind to driver %s"
@@ -466,6 +496,25 @@ def bind_one(dev_id, driver, force):
bind_one(dev_id, saved_driver, force)
return
+ # For kernels > 3.15 driver_override is used to bind a device to a driver.
+ # Before unbinding it, overwrite driver_override with empty string so that
+ # the device can be bound to any other driver
+ filename = "/sys/bus/pci/devices/%s/driver_override" % dev_id
+ if os.path.exists(filename):
+ try:
+ f = open(filename, "w")
+ except:
+ print("Error: unbind failed for %s - Cannot open %s"
+ % (dev_id, filename))
+ sys.exit(1)
+ try:
+ f.write("\00")
+ f.close()
+ except:
+ print("Error: unbind failed for %s - Cannot open %s"
+ % (dev_id, filename))
+ sys.exit(1)
+
def unbind_all(dev_list, force=False):
"""Unbind method, takes a list of device locations"""
@@ -483,23 +532,24 @@ def bind_all(dev_list, driver, force=False):
for d in dev_list:
bind_one(d, driver, force)
- # when binding devices to a generic driver (i.e. one that doesn't have a
- # PCI ID table), some devices that are not bound to any other driver could
- # be bound even if no one has asked them to. hence, we check the list of
- # drivers again, and see if some of the previously-unbound devices were
- # erroneously bound.
- for d in devices.keys():
- # skip devices that were already bound or that we know should be bound
- if "Driver_str" in devices[d] or d in dev_list:
- continue
+ # For kenels < 3.15 when binding devices to a generic driver
+ # (i.e. one that doesn't have a PCI ID table) using new_id, some devices
+ # that are not bound to any other driver could be bound even if no one has
+ # asked them to. hence, we check the list of drivers again, and see if
+ # some of the previously-unbound devices were erroneously bound.
+ if not os.path.exists("/sys/bus/pci/devices/%s/driver_override" % d):
+ for d in devices.keys():
+ # skip devices that were already bound or that we know should be bound
+ if "Driver_str" in devices[d] or d in dev_list:
+ continue
- # update information about this device
- devices[d] = dict(devices[d].items() +
- get_pci_device_details(d).items())
+ # update information about this device
+ devices[d] = dict(devices[d].items() +
+ get_pci_device_details(d, True).items())
- # check if updated information indicates that the device was bound
- if "Driver_str" in devices[d]:
- unbind_one(d, force)
+ # check if updated information indicates that the device was bound
+ if "Driver_str" in devices[d]:
+ unbind_one(d, force)
def display_devices(title, dev_list, extra_params=None):
@@ -515,19 +565,17 @@ def display_devices(title, dev_list, extra_params=None):
else:
for dev in dev_list:
if extra_params is not None:
- strings.append("%s '%s' %s" % (dev["Slot"],
- dev["Device_str"], extra_params % dev))
+ strings.append("%s '%s %s' %s" % (dev["Slot"],
+ dev["Device_str"],
+ dev["Device"],
+ extra_params % dev))
else:
strings.append("%s '%s'" % (dev["Slot"], dev["Device_str"]))
# sort before printing, so that the entries appear in PCI order
strings.sort()
print("\n".join(strings)) # print one per line
-
-def show_status():
- '''Function called when the script is passed the "--status" option.
- Displays to the user what devices are bound to the igb_uio driver, the
- kernel driver or to no driver'''
+def show_device_status(devices_type, device_name):
global dpdk_drivers
kernel_drv = []
dpdk_drv = []
@@ -535,7 +583,7 @@ def show_status():
# split our list of network devices into the three categories above
for d in devices.keys():
- if (NETWORK_BASE_CLASS in devices[d]["Class"]):
+ if device_type_match(devices[d], devices_type):
if not has_driver(d):
no_drv.append(devices[d])
continue
@@ -545,41 +593,37 @@ def show_status():
kernel_drv.append(devices[d])
# print each category separately, so we can clearly see what's used by DPDK
- display_devices("Network devices using DPDK-compatible driver", dpdk_drv,
- "drv=%(Driver_str)s unused=%(Module_str)s")
- display_devices("Network devices using kernel driver", kernel_drv,
+ display_devices("%s devices using DPDK-compatible driver" % device_name,
+ dpdk_drv, "drv=%(Driver_str)s unused=%(Module_str)s")
+ display_devices("%s devices using kernel driver" % device_name, kernel_drv,
"if=%(Interface)s drv=%(Driver_str)s "
"unused=%(Module_str)s %(Active)s")
- display_devices("Other network devices", no_drv, "unused=%(Module_str)s")
+ display_devices("Other %s devices" % device_name, no_drv,
+ "unused=%(Module_str)s")
- # split our list of crypto devices into the three categories above
- kernel_drv = []
- dpdk_drv = []
- no_drv = []
+def show_status():
+ '''Function called when the script is passed the "--status" option.
+ Displays to the user what devices are bound to the igb_uio driver, the
+ kernel driver or to no driver'''
- for d in devices.keys():
- if (CRYPTO_BASE_CLASS in devices[d]["Class"]):
- if not has_driver(d):
- no_drv.append(devices[d])
- continue
- if devices[d]["Driver_str"] in dpdk_drivers:
- dpdk_drv.append(devices[d])
- else:
- kernel_drv.append(devices[d])
+ if status_dev == "net" or status_dev == "all":
+ show_device_status(network_devices, "Network")
- display_devices("Crypto devices using DPDK-compatible driver", dpdk_drv,
- "drv=%(Driver_str)s unused=%(Module_str)s")
- display_devices("Crypto devices using kernel driver", kernel_drv,
- "drv=%(Driver_str)s "
- "unused=%(Module_str)s")
- display_devices("Other crypto devices", no_drv, "unused=%(Module_str)s")
+ if status_dev == "crypto" or status_dev == "all":
+ show_device_status(crypto_devices, "Crypto")
+
+ if status_dev == "event" or status_dev == "all":
+ show_device_status(eventdev_devices, "Eventdev")
+ if status_dev == "mempool" or status_dev == "all":
+ show_device_status(mempool_devices, "Mempool")
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
global b_flag
global status_flag
+ global status_dev
global force_flag
global args
if len(sys.argv) <= 1:
@@ -588,8 +632,8 @@ def parse_args():
try:
opts, args = getopt.getopt(sys.argv[1:], "b:us",
- ["help", "usage", "status", "force",
- "bind=", "unbind"])
+ ["help", "usage", "status", "status-dev=",
+ "force", "bind=", "unbind", ])
except getopt.GetoptError as error:
print(str(error))
print("Run '%s --usage' for further information" % sys.argv[0])
@@ -599,8 +643,12 @@ def parse_args():
if opt == "--help" or opt == "--usage":
usage()
sys.exit(0)
+ if opt == "--status-dev":
+ status_flag = True
+ status_dev = arg
if opt == "--status" or opt == "-s":
status_flag = True
+ status_dev = "all"
if opt == "--force":
force_flag = True
if opt == "-b" or opt == "-u" or opt == "--bind" or opt == "--unbind":
@@ -637,8 +685,12 @@ def do_arg_actions():
bind_all(args, b_flag, force_flag)
if status_flag:
if b_flag is not None:
- get_nic_details() # refresh if we have changed anything
- get_crypto_details() # refresh if we have changed anything
+ clear_data()
+ # refresh if we have changed anything
+ get_device_details(network_devices)
+ get_device_details(crypto_devices)
+ get_device_details(eventdev_devices)
+ get_device_details(mempool_devices)
show_status()
@@ -646,8 +698,11 @@ def main():
'''program main function'''
parse_args()
check_modules()
- get_nic_details()
- get_crypto_details()
+ clear_data()
+ get_device_details(network_devices)
+ get_device_details(crypto_devices)
+ get_device_details(eventdev_devices)
+ get_device_details(mempool_devices)
do_arg_actions()
if __name__ == "__main__":
diff --git a/tools/dpdk-pmdinfo.py b/usertools/dpdk-pmdinfo.py
index 3db9819c..46c1be08 100755
--- a/tools/dpdk-pmdinfo.py
+++ b/usertools/dpdk-pmdinfo.py
@@ -1,55 +1,25 @@
#!/usr/bin/env python
+
# -------------------------------------------------------------------------
#
# Utility to dump PMD_INFO_STRING support from an object file
#
# -------------------------------------------------------------------------
+from __future__ import print_function
+import json
import os
+import platform
+import string
import sys
+from elftools.common.exceptions import ELFError
+from elftools.common.py3compat import (byte2int, bytes2str, str2bytes)
+from elftools.elf.elffile import ELFFile
from optparse import OptionParser
-import string
-import json
-import platform
# For running from development directory. It should take precedence over the
# installed pyelftools.
sys.path.insert(0, '.')
-
-from elftools import __version__
-from elftools.common.exceptions import ELFError
-from elftools.common.py3compat import (
- ifilter, byte2int, bytes2str, itervalues, str2bytes)
-from elftools.elf.elffile import ELFFile
-from elftools.elf.dynamic import DynamicSection, DynamicSegment
-from elftools.elf.enums import ENUM_D_TAG
-from elftools.elf.segments import InterpSegment
-from elftools.elf.sections import SymbolTableSection
-from elftools.elf.gnuversions import (
- GNUVerSymSection, GNUVerDefSection,
- GNUVerNeedSection,
-)
-from elftools.elf.relocation import RelocationSection
-from elftools.elf.descriptions import (
- describe_ei_class, describe_ei_data, describe_ei_version,
- describe_ei_osabi, describe_e_type, describe_e_machine,
- describe_e_version_numeric, describe_p_type, describe_p_flags,
- describe_sh_type, describe_sh_flags,
- describe_symbol_type, describe_symbol_bind, describe_symbol_visibility,
- describe_symbol_shndx, describe_reloc_type, describe_dyn_tag,
- describe_ver_flags,
-)
-from elftools.elf.constants import E_FLAGS
-from elftools.dwarf.dwarfinfo import DWARFInfo
-from elftools.dwarf.descriptions import (
- describe_reg_name, describe_attr_value, set_global_machine_arch,
- describe_CFI_instructions, describe_CFI_register_rule,
- describe_CFI_CFA_rule,
-)
-from elftools.dwarf.constants import (
- DW_LNS_copy, DW_LNS_set_file, DW_LNE_define_file)
-from elftools.dwarf.callframe import CIE, FDE
-
raw_output = False
pcidb = None
@@ -86,7 +56,7 @@ class Vendor:
self.devices[devID] = Device(deviceStr)
def report(self):
- print self.ID, self.name
+ print(self.ID, self.name)
for id, dev in self.devices.items():
dev.report()
@@ -112,7 +82,7 @@ class Device:
self.subdevices = {}
def report(self):
- print "\t%s\t%s" % (self.ID, self.name)
+ print("\t%s\t%s" % (self.ID, self.name))
for subID, subdev in self.subdevices.items():
subdev.report()
@@ -158,7 +128,7 @@ class SubDevice:
self.name = name
def report(self):
- print "\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID, self.name)
+ print("\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID, self.name))
class PCIIds:
@@ -186,7 +156,7 @@ class PCIIds:
"""Reports the vendors
"""
for vid, v in self.vendors.items():
- print v.ID, v.name
+ print(v.ID, v.name)
def report(self, vendor=None):
"""
@@ -217,7 +187,7 @@ class PCIIds:
def parse(self):
if len(self.contents) < 1:
- print "data/%s-pci.ids not found" % self.date
+ print("data/%s-pci.ids not found" % self.date)
else:
vendorID = ""
deviceID = ""
@@ -312,7 +282,10 @@ class ReadElf(object):
global raw_output
global pcidb
- optional_pmd_info = [{'id': 'params', 'tag': 'PMD PARAMETERS'}]
+ optional_pmd_info = [
+ {'id': 'params', 'tag': 'PMD PARAMETERS'},
+ {'id': 'kmod', 'tag': 'PMD KMOD DEPENDENCIES'}
+ ]
i = mystring.index("=")
mystring = mystring[i + 2:]
@@ -326,7 +299,7 @@ class ReadElf(object):
for i in optional_pmd_info:
try:
print("%s: %s" % (i['tag'], pmdinfo[i['id']]))
- except KeyError as e:
+ except KeyError:
continue
if (len(pmdinfo["pci_ids"]) != 0):
@@ -464,7 +437,7 @@ class ReadElf(object):
for tag in dynsec.iter_tags():
if tag.entry.d_tag == 'DT_NEEDED':
- rc = tag.needed.find("librte_pmd")
+ rc = tag.needed.find(b"librte_pmd")
if (rc != -1):
library = search_file(tag.needed,
runpath + ":" + ldlibpath +
@@ -475,7 +448,7 @@ class ReadElf(object):
with open(library, 'rb') as file:
try:
libelf = ReadElf(file, sys.stdout)
- except ELFError as e:
+ except ELFError:
print("%s is no an ELF file" % library)
continue
libelf.process_dt_needed_entries()
@@ -491,7 +464,7 @@ def scan_autoload_path(autoload_path):
try:
dirs = os.listdir(autoload_path)
- except OSError as e:
+ except OSError:
# Couldn't read the directory, give up
return
@@ -503,10 +476,10 @@ def scan_autoload_path(autoload_path):
try:
file = open(dpath, 'rb')
readelf = ReadElf(file, sys.stdout)
- except ELFError as e:
+ except ELFError:
# this is likely not an elf file, skip it
continue
- except IOError as e:
+ except IOError:
# No permission to read the file, skip it
continue
@@ -531,7 +504,7 @@ def scan_for_autoload_pmds(dpdk_path):
file = open(dpdk_path, 'rb')
try:
readelf = ReadElf(file, sys.stdout)
- except ElfError as e:
+ except ElfError:
if raw_output is False:
print("Unable to parse %s" % file)
return
@@ -557,7 +530,7 @@ def main(stream=None):
global raw_output
global pcidb
- pcifile_default = "./pci.ids" # for unknown OS's assume local file
+ pcifile_default = "./pci.ids" # For unknown OS's assume local file
if platform.system() == 'Linux':
pcifile_default = "/usr/share/hwdata/pci.ids"
elif platform.system() == 'FreeBSD':
@@ -577,7 +550,8 @@ def main(stream=None):
"to get vendor names from",
default=pcifile_default, metavar="FILE")
optparser.add_option("-t", "--table", dest="tblout",
- help="output information on hw support as a hex table",
+ help="output information on hw support as a "
+ "hex table",
action='store_true')
optparser.add_option("-p", "--plugindir", dest="pdir",
help="scan dpdk for autoload plugins",
diff --git a/tools/dpdk-setup.sh b/usertools/dpdk-setup.sh
index 14ed5907..c4fec5a6 100755
--- a/tools/dpdk-setup.sh
+++ b/usertools/dpdk-setup.sh
@@ -428,7 +428,7 @@ grep_meminfo()
show_devices()
{
if [ -d /sys/module/vfio_pci -o -d /sys/module/igb_uio ]; then
- ${RTE_SDK}/tools/dpdk-devbind.py --status
+ ${RTE_SDK}/usertools/dpdk-devbind.py --status
else
echo "# Please load the 'igb_uio' or 'vfio-pci' kernel module before "
echo "# querying or adjusting device bindings"
@@ -441,11 +441,11 @@ show_devices()
bind_devices_to_vfio()
{
if [ -d /sys/module/vfio_pci ]; then
- ${RTE_SDK}/tools/dpdk-devbind.py --status
+ ${RTE_SDK}/usertools/dpdk-devbind.py --status
echo ""
echo -n "Enter PCI address of device to bind to VFIO driver: "
read PCI_PATH
- sudo ${RTE_SDK}/tools/dpdk-devbind.py -b vfio-pci $PCI_PATH &&
+ sudo ${RTE_SDK}/usertools/dpdk-devbind.py -b vfio-pci $PCI_PATH &&
echo "OK"
else
echo "# Please load the 'vfio-pci' kernel module before querying or "
@@ -459,11 +459,11 @@ bind_devices_to_vfio()
bind_devices_to_igb_uio()
{
if [ -d /sys/module/igb_uio ]; then
- ${RTE_SDK}/tools/dpdk-devbind.py --status
+ ${RTE_SDK}/usertools/dpdk-devbind.py --status
echo ""
echo -n "Enter PCI address of device to bind to IGB UIO driver: "
read PCI_PATH
- sudo ${RTE_SDK}/tools/dpdk-devbind.py -b igb_uio $PCI_PATH && echo "OK"
+ sudo ${RTE_SDK}/usertools/dpdk-devbind.py -b igb_uio $PCI_PATH && echo "OK"
else
echo "# Please load the 'igb_uio' kernel module before querying or "
echo "# adjusting device bindings"
@@ -475,14 +475,14 @@ bind_devices_to_igb_uio()
#
unbind_devices()
{
- ${RTE_SDK}/tools/dpdk-devbind.py --status
+ ${RTE_SDK}/usertools/dpdk-devbind.py --status
echo ""
echo -n "Enter PCI address of device to unbind: "
read PCI_PATH
echo ""
echo -n "Enter name of kernel driver to bind the device to: "
read DRV
- sudo ${RTE_SDK}/tools/dpdk-devbind.py -b $DRV $PCI_PATH && echo "OK"
+ sudo ${RTE_SDK}/usertools/dpdk-devbind.py -b $DRV $PCI_PATH && echo "OK"
}
#